LUT updates
[platform/upstream/libdrm.git] / linux-core / drm_irq.c
1 /**
2  * \file drm_irq.c
3  * IRQ support
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37
38 #include <linux/interrupt.h>    /* For task queue support */
39
40 /**
41  * Get interrupt from bus id.
42  *
43  * \param inode device inode.
44  * \param file_priv DRM file private.
45  * \param cmd command.
46  * \param arg user argument, pointing to a drm_irq_busid structure.
47  * \return zero on success or a negative number on failure.
48  *
49  * Finds the PCI device with the specified bus id and gets its IRQ number.
50  * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
51  * to that of the device that this DRM instance attached to.
52  */
53 int drm_irq_by_busid(struct drm_device *dev, void *data,
54                      struct drm_file *file_priv)
55 {
56         struct drm_irq_busid *p = data;
57
58         if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
59                 return -EINVAL;
60
61         if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
62             (p->busnum & 0xff) != dev->pdev->bus->number ||
63             p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
64                 return -EINVAL;
65
66         p->irq = dev->pdev->irq;
67
68         DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
69                   p->irq);
70
71         return 0;
72 }
73
74 static void vblank_disable_fn(unsigned long arg)
75 {
76         struct drm_device *dev = (struct drm_device *)arg;
77         unsigned long irqflags;
78         int i;
79
80         for (i = 0; i < dev->num_crtcs; i++) {
81                 spin_lock_irqsave(&dev->vbl_lock, irqflags);
82                 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
83                     dev->vblank_enabled[i]) {
84                         dev->driver->disable_vblank(dev, i);
85                         dev->vblank_enabled[i] = 0;
86                 }
87                 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
88         }
89 }
90
91 static void drm_vblank_cleanup(struct drm_device *dev)
92 {
93         del_timer(&dev->vblank_disable_timer);
94
95         vblank_disable_fn((unsigned long)dev);
96
97         if (dev->vbl_queue)
98             drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
99                  DRM_MEM_DRIVER);
100
101         if (dev->vbl_sigs)
102             drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
103                  DRM_MEM_DRIVER);
104
105         if (dev->_vblank_count)
106             drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
107                  dev->num_crtcs, DRM_MEM_DRIVER);
108
109         if (dev->vblank_refcount)
110             drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
111                  dev->num_crtcs, DRM_MEM_DRIVER);
112
113         if (dev->vblank_enabled)
114             drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
115                  dev->num_crtcs, DRM_MEM_DRIVER);
116
117         if (dev->last_vblank)
118             drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
119                  DRM_MEM_DRIVER);
120
121         if (dev->vblank_premodeset)
122             drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
123                  dev->num_crtcs, DRM_MEM_DRIVER);
124
125         if (dev->vblank_offset)
126             drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
127                  DRM_MEM_DRIVER);
128 }
129
130 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
131 {
132         int i, ret = -ENOMEM;
133
134         setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
135                     (unsigned long)dev);
136         init_timer_deferrable(&dev->vblank_disable_timer);
137         spin_lock_init(&dev->vbl_lock);
138         atomic_set(&dev->vbl_signal_pending, 0);
139         dev->num_crtcs = num_crtcs;
140
141         dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
142                                    DRM_MEM_DRIVER);
143         if (!dev->vbl_queue)
144                 goto err;
145
146         dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
147                                   DRM_MEM_DRIVER);
148         if (!dev->vbl_sigs)
149                 goto err;
150
151         dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
152                                       DRM_MEM_DRIVER);
153         if (!dev->_vblank_count)
154                 goto err;
155
156         dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
157                                          DRM_MEM_DRIVER);
158         if (!dev->vblank_refcount)
159                 goto err;
160
161         dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
162                                          DRM_MEM_DRIVER);
163         if (!dev->vblank_enabled)
164                 goto err;
165
166         dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
167         if (!dev->last_vblank)
168                 goto err;
169
170         dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
171                                             DRM_MEM_DRIVER);
172         if (!dev->vblank_premodeset)
173                 goto err;
174
175         dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
176         if (!dev->vblank_offset)
177                 goto err;
178
179         /* Zero per-crtc vblank stuff */
180         for (i = 0; i < num_crtcs; i++) {
181                 init_waitqueue_head(&dev->vbl_queue[i]);
182                 INIT_LIST_HEAD(&dev->vbl_sigs[i]);
183                 atomic_set(&dev->_vblank_count[i], 0);
184                 atomic_set(&dev->vblank_refcount[i], 0);
185         }
186
187         return 0;
188
189 err:
190         drm_vblank_cleanup(dev);
191         return ret;
192 }
193 EXPORT_SYMBOL(drm_vblank_init);
194
195 int drm_wait_hotplug(struct drm_device *dev, void *data,
196                     struct drm_file *file_priv)
197 {
198         union drm_wait_hotplug *hotplugwait = data;
199         struct timeval now;
200         int ret = 0;
201         unsigned int flags;
202
203         if ((!dev->irq) || (!dev->irq_enabled))
204                 return -EINVAL;
205
206         flags = hotplugwait->request.type;
207
208         if (flags & _DRM_HOTPLUG_SIGNAL) {
209                 unsigned long irqflags;
210                 struct list_head *hotplug_sigs = dev->hotplug_sigs;
211                 struct drm_hotplug_sig *hotplug_sig;
212
213                 hotplug_sig = drm_calloc(1, sizeof(struct drm_hotplug_sig),
214                                      DRM_MEM_DRIVER);
215                 if (!hotplug_sig)
216                         return -ENOMEM;
217
218                 atomic_inc(&dev->hotplug_signal_pending);
219
220                 hotplug_sig->info.si_signo = hotplugwait->request.signal;
221                 hotplug_sig->task = current;
222                 hotplug_sig->counter = 
223                         hotplugwait->reply.counter = 
224                                         dev->mode_config.hotplug_counter;
225
226                 spin_lock_irqsave(&dev->hotplug_lock, irqflags);
227
228                 list_add_tail(&hotplug_sig->head, hotplug_sigs);
229
230                 spin_unlock_irqrestore(&dev->hotplug_lock, irqflags);
231         } else {
232                 int cur_hotplug = dev->mode_config.hotplug_counter;
233
234                 DRM_WAIT_ON(ret, dev->hotplug_queue, 3 * DRM_HZ,
235                                 dev->mode_config.hotplug_counter > cur_hotplug);
236
237                 do_gettimeofday(&now);
238
239                 hotplugwait->reply.tval_sec = now.tv_sec;
240                 hotplugwait->reply.tval_usec = now.tv_usec;
241                 hotplugwait->reply.counter = dev->mode_config.hotplug_counter;
242         }
243
244         return ret;
245 }
246
247 static void drm_hotplug_cleanup(struct drm_device *dev)
248 {
249         if (dev->hotplug_sigs)
250             drm_free(dev->hotplug_sigs, sizeof(*dev->hotplug_sigs),
251                  DRM_MEM_DRIVER);
252 }
253 EXPORT_SYMBOL(drm_hotplug_cleanup);
254
255 int drm_hotplug_init(struct drm_device *dev)
256 {
257         spin_lock_init(&dev->hotplug_lock);
258         atomic_set(&dev->hotplug_signal_pending, 0);
259
260         dev->hotplug_sigs = drm_alloc(sizeof(struct list_head), DRM_MEM_DRIVER);
261         if (!dev->hotplug_sigs)
262                 return -ENOMEM;
263
264         INIT_LIST_HEAD(dev->hotplug_sigs);
265         init_waitqueue_head(&dev->hotplug_queue);
266
267         return 0;
268 }
269 EXPORT_SYMBOL(drm_hotplug_init);
270
271 /**
272  * Install IRQ handler.
273  *
274  * \param dev DRM device.
275  *
276  * Initializes the IRQ related data. Installs the handler, calling the driver
277  * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
278  * before and after the installation.
279  */
280 int drm_irq_install(struct drm_device * dev)
281 {
282         int ret = 0;
283         unsigned long sh_flags = 0;
284
285         if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
286                 return -EINVAL;
287
288         if (dev->pdev->irq == 0)
289                 return -EINVAL;
290
291         mutex_lock(&dev->struct_mutex);
292
293         /* Driver must have been initialized */
294         if (!dev->dev_private) {
295                 mutex_unlock(&dev->struct_mutex);
296                 return -EINVAL;
297         }
298
299         if (dev->irq_enabled) {
300                 mutex_unlock(&dev->struct_mutex);
301                 return 0;
302         }
303         dev->irq_enabled = 1;
304         mutex_unlock(&dev->struct_mutex);
305
306         DRM_DEBUG("irq=%d\n", dev->pdev->irq);
307
308         /* Before installing handler */
309         dev->driver->irq_preinstall(dev);
310
311         /* Install handler */
312         if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
313                 sh_flags = IRQF_SHARED;
314
315         ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
316                           sh_flags, dev->devname, dev);
317         if (ret < 0) {
318                 mutex_lock(&dev->struct_mutex);
319                 dev->irq_enabled = 0;
320                 mutex_unlock(&dev->struct_mutex);
321                 return ret;
322         }
323         /* Expose the device irq to device drivers that want to export it for
324          * whatever reason.
325          */
326         dev->irq = dev->pdev->irq;
327
328         /* After installing handler */
329         ret = dev->driver->irq_postinstall(dev);
330         if (ret < 0) {
331                 mutex_lock(&dev->struct_mutex);
332                 dev->irq_enabled = 0;
333                 mutex_unlock(&dev->struct_mutex);
334         }
335
336         return ret;
337 }
338 EXPORT_SYMBOL(drm_irq_install);
339
340 /**
341  * Uninstall the IRQ handler.
342  *
343  * \param dev DRM device.
344  *
345  * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
346  */
347 int drm_irq_uninstall(struct drm_device * dev)
348 {
349         int irq_enabled;
350
351         if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
352                 return -EINVAL;
353
354         mutex_lock(&dev->struct_mutex);
355         irq_enabled = dev->irq_enabled;
356         dev->irq_enabled = 0;
357         mutex_unlock(&dev->struct_mutex);
358
359         if (!irq_enabled)
360                 return -EINVAL;
361
362         DRM_DEBUG("irq=%d\n", dev->pdev->irq);
363
364         dev->driver->irq_uninstall(dev);
365
366         free_irq(dev->pdev->irq, dev);
367
368         drm_vblank_cleanup(dev);
369
370         drm_hotplug_cleanup(dev);
371
372         dev->locked_tasklet_func = NULL;
373
374         return 0;
375 }
376 EXPORT_SYMBOL(drm_irq_uninstall);
377
378 /**
379  * IRQ control ioctl.
380  *
381  * \param inode device inode.
382  * \param file_priv DRM file private.
383  * \param cmd command.
384  * \param arg user argument, pointing to a drm_control structure.
385  * \return zero on success or a negative number on failure.
386  *
387  * Calls irq_install() or irq_uninstall() according to \p arg.
388  */
389 int drm_control(struct drm_device *dev, void *data,
390                 struct drm_file *file_priv)
391 {
392         struct drm_control *ctl = data;
393
394         /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
395
396
397         switch (ctl->func) {
398         case DRM_INST_HANDLER:
399                 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
400                         return 0;
401                 if (drm_core_check_feature(dev, DRIVER_MODESET))
402                         return 0;
403                 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
404                     ctl->irq != dev->pdev->irq)
405                         return -EINVAL;
406                 return drm_irq_install(dev);
407         case DRM_UNINST_HANDLER:
408                 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
409                         return 0;
410                 if (drm_core_check_feature(dev, DRIVER_MODESET))
411                         return 0;
412                 return drm_irq_uninstall(dev);
413         default:
414                 return -EINVAL;
415         }
416 }
417
418 /**
419  * drm_vblank_count - retrieve "cooked" vblank counter value
420  * @dev: DRM device
421  * @crtc: which counter to retrieve
422  *
423  * Fetches the "cooked" vblank count value that represents the number of
424  * vblank events since the system was booted, including lost events due to
425  * modesetting activity.
426  */
427 u32 drm_vblank_count(struct drm_device *dev, int crtc)
428 {
429         return atomic_read(&dev->_vblank_count[crtc]) +
430                 dev->vblank_offset[crtc];
431 }
432 EXPORT_SYMBOL(drm_vblank_count);
433
434 /**
435  * drm_update_vblank_count - update the master vblank counter
436  * @dev: DRM device
437  * @crtc: counter to update
438  *
439  * Call back into the driver to update the appropriate vblank counter
440  * (specified by @crtc).  Deal with wraparound, if it occurred, and
441  * update the last read value so we can deal with wraparound on the next
442  * call if necessary.
443  */
444 void drm_update_vblank_count(struct drm_device *dev, int crtc)
445 {
446         unsigned long irqflags;
447         u32 cur_vblank, diff;
448
449         /*
450          * Interrupts were disabled prior to this call, so deal with counter
451          * wrap if needed.
452          * NOTE!  It's possible we lost a full dev->max_vblank_count events
453          * here if the register is small or we had vblank interrupts off for
454          * a long time.
455          */
456         cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
457         spin_lock_irqsave(&dev->vbl_lock, irqflags);
458         if (cur_vblank < dev->last_vblank[crtc]) {
459                 diff = dev->max_vblank_count -
460                         dev->last_vblank[crtc];
461                 diff += cur_vblank;
462         } else {
463                 diff = cur_vblank - dev->last_vblank[crtc];
464         }
465         dev->last_vblank[crtc] = cur_vblank;
466         spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
467
468         atomic_add(diff, &dev->_vblank_count[crtc]);
469 }
470 EXPORT_SYMBOL(drm_update_vblank_count);
471
472 /**
473  * drm_vblank_get - get a reference count on vblank events
474  * @dev: DRM device
475  * @crtc: which CRTC to own
476  *
477  * Acquire a reference count on vblank events to avoid having them disabled
478  * while in use.  Note callers will probably want to update the master counter
479  * using drm_update_vblank_count() above before calling this routine so that
480  * wakeups occur on the right vblank event.
481  *
482  * RETURNS
483  * Zero on success, nonzero on failure.
484  */
485 int drm_vblank_get(struct drm_device *dev, int crtc)
486 {
487         unsigned long irqflags;
488         int ret = 0;
489
490         spin_lock_irqsave(&dev->vbl_lock, irqflags);    
491         /* Going from 0->1 means we have to enable interrupts again */
492         if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
493             !dev->vblank_enabled[crtc]) {
494                 ret = dev->driver->enable_vblank(dev, crtc);
495                 if (ret)
496                         atomic_dec(&dev->vblank_refcount[crtc]);
497                 else
498                         dev->vblank_enabled[crtc] = 1;
499         }
500         spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
501
502         return ret;
503 }
504 EXPORT_SYMBOL(drm_vblank_get);
505
506 /**
507  * drm_vblank_put - give up ownership of vblank events
508  * @dev: DRM device
509  * @crtc: which counter to give up
510  *
511  * Release ownership of a given vblank counter, turning off interrupts
512  * if possible.
513  */
514 void drm_vblank_put(struct drm_device *dev, int crtc)
515 {
516         /* Last user schedules interrupt disable */
517         if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
518             mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
519 }
520 EXPORT_SYMBOL(drm_vblank_put);
521
522 /**
523  * drm_modeset_ctl - handle vblank event counter changes across mode switch
524  * @DRM_IOCTL_ARGS: standard ioctl arguments
525  *
526  * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
527  * ioctls around modesetting so that any lost vblank events are accounted for.
528  */
529 int drm_modeset_ctl(struct drm_device *dev, void *data,
530                     struct drm_file *file_priv)
531 {
532         struct drm_modeset_ctl *modeset = data;
533         int crtc, ret = 0;
534         u32 new;
535
536         crtc = modeset->crtc;
537         if (crtc >= dev->num_crtcs) {
538                 ret = -EINVAL;
539                 goto out;
540         }
541
542         switch (modeset->cmd) {
543         case _DRM_PRE_MODESET:
544                 dev->vblank_premodeset[crtc] =
545                         dev->driver->get_vblank_counter(dev, crtc);
546                 break;
547         case _DRM_POST_MODESET:
548                 new = dev->driver->get_vblank_counter(dev, crtc);
549                 dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
550                 break;
551         default:
552                 ret = -EINVAL;
553                 break;
554         }
555
556 out:
557         return ret;
558 }
559
560 /**
561  * Wait for VBLANK.
562  *
563  * \param inode device inode.
564  * \param file_priv DRM file private.
565  * \param cmd command.
566  * \param data user argument, pointing to a drm_wait_vblank structure.
567  * \return zero on success or a negative number on failure.
568  *
569  * Verifies the IRQ is installed.
570  *
571  * If a signal is requested checks if this task has already scheduled the same signal
572  * for the same vblank sequence number - nothing to be done in
573  * that case. If the number of tasks waiting for the interrupt exceeds 100 the
574  * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
575  * task.
576  *
577  * If a signal is not requested, then calls vblank_wait().
578  */
579 int drm_wait_vblank(struct drm_device *dev, void *data,
580                     struct drm_file *file_priv)
581 {
582         union drm_wait_vblank *vblwait = data;
583         struct timeval now;
584         int ret = 0;
585         unsigned int flags, seq, crtc;
586
587         if ((!dev->pdev->irq) || (!dev->irq_enabled))
588                 return -EINVAL;
589
590         if (vblwait->request.type &
591             ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
592                 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
593                           vblwait->request.type,
594                           (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
595                 return -EINVAL;
596         }
597
598         flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
599         crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
600
601         if (crtc >= dev->num_crtcs)
602                 return -EINVAL;
603
604         drm_update_vblank_count(dev, crtc);
605         seq = drm_vblank_count(dev, crtc);
606
607         switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
608         case _DRM_VBLANK_RELATIVE:
609                 vblwait->request.sequence += seq;
610                 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
611         case _DRM_VBLANK_ABSOLUTE:
612                 break;
613         default:
614                 return -EINVAL;
615         }
616
617         if ((flags & _DRM_VBLANK_NEXTONMISS) &&
618             (seq - vblwait->request.sequence) <= (1<<23)) {
619                 vblwait->request.sequence = seq + 1;
620         }
621
622         if (flags & _DRM_VBLANK_SIGNAL) {
623                 unsigned long irqflags;
624                 struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
625                 struct drm_vbl_sig *vbl_sig, *tmp;
626
627                 spin_lock_irqsave(&dev->vbl_lock, irqflags);
628
629                 /* Check if this task has already scheduled the same signal
630                  * for the same vblank sequence number; nothing to be done in
631                  * that case
632                  */
633                 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
634                         if (vbl_sig->sequence == vblwait->request.sequence
635                             && vbl_sig->info.si_signo ==
636                             vblwait->request.signal
637                             && vbl_sig->task == current) {
638                                 spin_unlock_irqrestore(&dev->vbl_lock,
639                                                        irqflags);
640                                 vblwait->reply.sequence = seq;
641                                 goto done;
642                         }
643                 }
644
645                 if (atomic_read(&dev->vbl_signal_pending) >= 100) {
646                         spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
647                         return -EBUSY;
648                 }
649
650                 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
651
652                 vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
653                                      DRM_MEM_DRIVER);
654                 if (!vbl_sig)
655                         return -ENOMEM;
656
657                 ret = drm_vblank_get(dev, crtc);
658                 if (ret) {
659                         drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
660                                  DRM_MEM_DRIVER);
661                         return ret;
662                 }
663
664                 atomic_inc(&dev->vbl_signal_pending);
665
666                 vbl_sig->sequence = vblwait->request.sequence;
667                 vbl_sig->info.si_signo = vblwait->request.signal;
668                 vbl_sig->task = current;
669
670                 spin_lock_irqsave(&dev->vbl_lock, irqflags);
671
672                 list_add_tail(&vbl_sig->head, vbl_sigs);
673
674                 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
675
676                 vblwait->reply.sequence = seq;
677         } else {
678                 unsigned long cur_vblank;
679
680                 ret = drm_vblank_get(dev, crtc);
681                 if (ret)
682                         return ret;
683                 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
684                             (((cur_vblank = drm_vblank_count(dev, crtc))
685                               - vblwait->request.sequence) <= (1 << 23)));
686                 drm_vblank_put(dev, crtc);
687                 do_gettimeofday(&now);
688
689                 vblwait->reply.tval_sec = now.tv_sec;
690                 vblwait->reply.tval_usec = now.tv_usec;
691                 vblwait->reply.sequence = cur_vblank;
692         }
693
694       done:
695         return ret;
696 }
697
698 /**
699  * Send the VBLANK signals.
700  *
701  * \param dev DRM device.
702  * \param crtc CRTC where the vblank event occurred
703  *
704  * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
705  *
706  * If a signal is not requested, then calls vblank_wait().
707  */
708 static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
709 {
710         struct drm_vbl_sig *vbl_sig, *tmp;
711         struct list_head *vbl_sigs;
712         unsigned int vbl_seq;
713         unsigned long flags;
714
715         spin_lock_irqsave(&dev->vbl_lock, flags);
716
717         vbl_sigs = &dev->vbl_sigs[crtc];
718         vbl_seq = drm_vblank_count(dev, crtc);
719
720         list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
721             if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
722                 vbl_sig->info.si_code = vbl_seq;
723                 send_sig_info(vbl_sig->info.si_signo,
724                               &vbl_sig->info, vbl_sig->task);
725
726                 list_del(&vbl_sig->head);
727
728                 drm_free(vbl_sig, sizeof(*vbl_sig),
729                          DRM_MEM_DRIVER);
730                 atomic_dec(&dev->vbl_signal_pending);
731                 drm_vblank_put(dev, crtc);
732             }
733         }
734
735         spin_unlock_irqrestore(&dev->vbl_lock, flags);
736 }
737
738 /**
739  * drm_handle_vblank - handle a vblank event
740  * @dev: DRM device
741  * @crtc: where this event occurred
742  *
743  * Drivers should call this routine in their vblank interrupt handlers to
744  * update the vblank counter and send any signals that may be pending.
745  */
746 void drm_handle_vblank(struct drm_device *dev, int crtc)
747 {
748         drm_update_vblank_count(dev, crtc);
749         DRM_WAKEUP(&dev->vbl_queue[crtc]);
750         drm_vbl_send_signals(dev, crtc);
751 }
752 EXPORT_SYMBOL(drm_handle_vblank);
753
754 /**
755  * Send the HOTPLUG signals.
756  *
757  * \param dev DRM device.
758  *
759  * Sends a signal for each task in drm_device::hotplug_sigs and empties the list.
760  */
761 static void drm_hotplug_send_signals(struct drm_device * dev)
762 {
763         struct drm_hotplug_sig *hotplug_sig, *tmp;
764         struct list_head *hotplug_sigs;
765         unsigned long flags;
766
767         spin_lock_irqsave(&dev->hotplug_lock, flags);
768
769         hotplug_sigs = dev->hotplug_sigs;
770
771         list_for_each_entry_safe(hotplug_sig, tmp, hotplug_sigs, head) {
772             hotplug_sig->info.si_code = hotplug_sig->counter;
773
774             send_sig_info(hotplug_sig->info.si_signo,
775                               &hotplug_sig->info, hotplug_sig->task);
776
777             list_del(&hotplug_sig->head);
778
779             drm_free(hotplug_sig, sizeof(*hotplug_sig),
780                          DRM_MEM_DRIVER);
781             atomic_dec(&dev->hotplug_signal_pending);
782         }
783
784         spin_unlock_irqrestore(&dev->hotplug_lock, flags);
785 }
786
787 /**
788  * drm_handle_hotplug - handle a hotplug event
789  * @dev: DRM device
790  * @crtc: where this event occurred
791  *
792  * Drivers should call this routine in their hotplug interrupt handlers.
793  */
794 void drm_handle_hotplug(struct drm_device *dev)
795 {
796         DRM_WAKEUP(&dev->hotplug_queue);
797         drm_hotplug_send_signals(dev);
798 }
799 EXPORT_SYMBOL(drm_handle_hotplug);
800
801 /**
802  * Tasklet wrapper function.
803  *
804  * \param data DRM device in disguise.
805  *
806  * Attempts to grab the HW lock and calls the driver callback on success. On
807  * failure, leave the lock marked as contended so the callback can be called
808  * from drm_unlock().
809  */
810 static void drm_locked_tasklet_func(unsigned long data)
811 {
812         struct drm_device *dev = (struct drm_device *)data;
813         unsigned long irqflags;
814
815         spin_lock_irqsave(&dev->tasklet_lock, irqflags);
816
817         if (!dev->locked_tasklet_func ||
818             !drm_lock_take(&dev->primary->master->lock,
819                            DRM_KERNEL_CONTEXT)) {
820                 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
821                 return;
822         }
823
824         dev->primary->master->lock.lock_time = jiffies;
825         atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
826
827         dev->locked_tasklet_func(dev);
828
829         drm_lock_free(&dev->primary->master->lock,
830                       DRM_KERNEL_CONTEXT);
831
832         dev->locked_tasklet_func = NULL;
833
834         spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
835 }
836
837 /**
838  * Schedule a tasklet to call back a driver hook with the HW lock held.
839  *
840  * \param dev DRM device.
841  * \param func Driver callback.
842  *
843  * This is intended for triggering actions that require the HW lock from an
844  * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
845  * completes. Note that the callback may be called from interrupt or process
846  * context, it must not make any assumptions about this. Also, the HW lock will
847  * be held with the kernel context or any client context.
848  */
849 void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
850 {
851         unsigned long irqflags;
852         static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
853
854         if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
855             test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
856                 return;
857
858         spin_lock_irqsave(&dev->tasklet_lock, irqflags);
859
860         if (dev->locked_tasklet_func) {
861                 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
862                 return;
863         }
864
865         dev->locked_tasklet_func = func;
866
867         spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
868
869         drm_tasklet.data = (unsigned long)dev;
870
871         tasklet_hi_schedule(&drm_tasklet);
872 }
873 EXPORT_SYMBOL(drm_locked_tasklet);