5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/interrupt.h> /* For task queue support */
41 * Get interrupt from bus id.
43 * \param inode device inode.
44 * \param file_priv DRM file private.
46 * \param arg user argument, pointing to a drm_irq_busid structure.
47 * \return zero on success or a negative number on failure.
49 * Finds the PCI device with the specified bus id and gets its IRQ number.
50 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
51 * to that of the device that this DRM instance attached to.
53 int drm_irq_by_busid(struct drm_device *dev, void *data,
54 struct drm_file *file_priv)
56 struct drm_irq_busid *p = data;
58 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
61 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
62 (p->busnum & 0xff) != dev->pdev->bus->number ||
63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
66 p->irq = dev->pdev->irq;
68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
74 static void vblank_disable_fn(unsigned long arg)
76 struct drm_device *dev = (struct drm_device *)arg;
77 unsigned long irqflags;
80 for (i = 0; i < dev->num_crtcs; i++) {
81 spin_lock_irqsave(&dev->vbl_lock, irqflags);
82 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
83 dev->vblank_enabled[i]) {
84 dev->driver->disable_vblank(dev, i);
85 dev->vblank_enabled[i] = 0;
87 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
91 static void drm_vblank_cleanup(struct drm_device *dev)
93 del_timer(&dev->vblank_disable_timer);
95 vblank_disable_fn((unsigned long)dev);
98 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
102 drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
105 if (dev->_vblank_count)
106 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
107 dev->num_crtcs, DRM_MEM_DRIVER);
109 if (dev->vblank_refcount)
110 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
111 dev->num_crtcs, DRM_MEM_DRIVER);
113 if (dev->vblank_enabled)
114 drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
115 dev->num_crtcs, DRM_MEM_DRIVER);
117 if (dev->last_vblank)
118 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
121 if (dev->vblank_premodeset)
122 drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
123 dev->num_crtcs, DRM_MEM_DRIVER);
125 if (dev->vblank_offset)
126 drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
130 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
132 int i, ret = -ENOMEM;
134 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
136 init_timer_deferrable(&dev->vblank_disable_timer);
137 spin_lock_init(&dev->vbl_lock);
138 atomic_set(&dev->vbl_signal_pending, 0);
139 dev->num_crtcs = num_crtcs;
141 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
146 dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
151 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
153 if (!dev->_vblank_count)
156 dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
158 if (!dev->vblank_refcount)
161 dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
163 if (!dev->vblank_enabled)
166 dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
167 if (!dev->last_vblank)
170 dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
172 if (!dev->vblank_premodeset)
175 dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
176 if (!dev->vblank_offset)
179 /* Zero per-crtc vblank stuff */
180 for (i = 0; i < num_crtcs; i++) {
181 init_waitqueue_head(&dev->vbl_queue[i]);
182 INIT_LIST_HEAD(&dev->vbl_sigs[i]);
183 atomic_set(&dev->_vblank_count[i], 0);
184 atomic_set(&dev->vblank_refcount[i], 0);
190 drm_vblank_cleanup(dev);
193 EXPORT_SYMBOL(drm_vblank_init);
195 int drm_wait_hotplug(struct drm_device *dev, void *data,
196 struct drm_file *file_priv)
198 union drm_wait_hotplug *hotplugwait = data;
203 if ((!dev->irq) || (!dev->irq_enabled))
206 flags = hotplugwait->request.type;
208 if (flags & _DRM_HOTPLUG_SIGNAL) {
209 unsigned long irqflags;
210 struct list_head *hotplug_sigs = dev->hotplug_sigs;
211 struct drm_hotplug_sig *hotplug_sig;
213 hotplug_sig = drm_calloc(1, sizeof(struct drm_hotplug_sig),
218 atomic_inc(&dev->hotplug_signal_pending);
220 hotplug_sig->info.si_signo = hotplugwait->request.signal;
221 hotplug_sig->task = current;
222 hotplug_sig->counter =
223 hotplugwait->reply.counter =
224 dev->mode_config.hotplug_counter;
226 spin_lock_irqsave(&dev->hotplug_lock, irqflags);
228 list_add_tail(&hotplug_sig->head, hotplug_sigs);
230 spin_unlock_irqrestore(&dev->hotplug_lock, irqflags);
232 int cur_hotplug = dev->mode_config.hotplug_counter;
234 DRM_WAIT_ON(ret, dev->hotplug_queue, 3 * DRM_HZ,
235 dev->mode_config.hotplug_counter > cur_hotplug);
237 do_gettimeofday(&now);
239 hotplugwait->reply.tval_sec = now.tv_sec;
240 hotplugwait->reply.tval_usec = now.tv_usec;
241 hotplugwait->reply.counter = dev->mode_config.hotplug_counter;
247 static void drm_hotplug_cleanup(struct drm_device *dev)
249 if (dev->hotplug_sigs)
250 drm_free(dev->hotplug_sigs, sizeof(*dev->hotplug_sigs),
253 EXPORT_SYMBOL(drm_hotplug_cleanup);
255 int drm_hotplug_init(struct drm_device *dev)
257 spin_lock_init(&dev->hotplug_lock);
258 atomic_set(&dev->hotplug_signal_pending, 0);
260 dev->hotplug_sigs = drm_alloc(sizeof(struct list_head), DRM_MEM_DRIVER);
261 if (!dev->hotplug_sigs)
264 INIT_LIST_HEAD(dev->hotplug_sigs);
265 init_waitqueue_head(&dev->hotplug_queue);
269 EXPORT_SYMBOL(drm_hotplug_init);
272 * Install IRQ handler.
274 * \param dev DRM device.
276 * Initializes the IRQ related data. Installs the handler, calling the driver
277 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
278 * before and after the installation.
280 int drm_irq_install(struct drm_device * dev)
283 unsigned long sh_flags = 0;
285 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
288 if (dev->pdev->irq == 0)
291 mutex_lock(&dev->struct_mutex);
293 /* Driver must have been initialized */
294 if (!dev->dev_private) {
295 mutex_unlock(&dev->struct_mutex);
299 if (dev->irq_enabled) {
300 mutex_unlock(&dev->struct_mutex);
303 dev->irq_enabled = 1;
304 mutex_unlock(&dev->struct_mutex);
306 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
308 /* Before installing handler */
309 dev->driver->irq_preinstall(dev);
311 /* Install handler */
312 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
313 sh_flags = IRQF_SHARED;
315 ret = request_irq(dev->pdev->irq, dev->driver->irq_handler,
316 sh_flags, dev->devname, dev);
318 mutex_lock(&dev->struct_mutex);
319 dev->irq_enabled = 0;
320 mutex_unlock(&dev->struct_mutex);
323 /* Expose the device irq to device drivers that want to export it for
326 dev->irq = dev->pdev->irq;
328 /* After installing handler */
329 ret = dev->driver->irq_postinstall(dev);
331 mutex_lock(&dev->struct_mutex);
332 dev->irq_enabled = 0;
333 mutex_unlock(&dev->struct_mutex);
338 EXPORT_SYMBOL(drm_irq_install);
341 * Uninstall the IRQ handler.
343 * \param dev DRM device.
345 * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
347 int drm_irq_uninstall(struct drm_device * dev)
351 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
354 mutex_lock(&dev->struct_mutex);
355 irq_enabled = dev->irq_enabled;
356 dev->irq_enabled = 0;
357 mutex_unlock(&dev->struct_mutex);
362 DRM_DEBUG("irq=%d\n", dev->pdev->irq);
364 dev->driver->irq_uninstall(dev);
366 free_irq(dev->pdev->irq, dev);
368 drm_vblank_cleanup(dev);
370 drm_hotplug_cleanup(dev);
372 dev->locked_tasklet_func = NULL;
376 EXPORT_SYMBOL(drm_irq_uninstall);
381 * \param inode device inode.
382 * \param file_priv DRM file private.
383 * \param cmd command.
384 * \param arg user argument, pointing to a drm_control structure.
385 * \return zero on success or a negative number on failure.
387 * Calls irq_install() or irq_uninstall() according to \p arg.
389 int drm_control(struct drm_device *dev, void *data,
390 struct drm_file *file_priv)
392 struct drm_control *ctl = data;
394 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
398 case DRM_INST_HANDLER:
399 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
401 if (drm_core_check_feature(dev, DRIVER_MODESET))
403 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
404 ctl->irq != dev->pdev->irq)
406 return drm_irq_install(dev);
407 case DRM_UNINST_HANDLER:
408 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
410 if (drm_core_check_feature(dev, DRIVER_MODESET))
412 return drm_irq_uninstall(dev);
419 * drm_vblank_count - retrieve "cooked" vblank counter value
421 * @crtc: which counter to retrieve
423 * Fetches the "cooked" vblank count value that represents the number of
424 * vblank events since the system was booted, including lost events due to
425 * modesetting activity.
427 u32 drm_vblank_count(struct drm_device *dev, int crtc)
429 return atomic_read(&dev->_vblank_count[crtc]) +
430 dev->vblank_offset[crtc];
432 EXPORT_SYMBOL(drm_vblank_count);
435 * drm_update_vblank_count - update the master vblank counter
437 * @crtc: counter to update
439 * Call back into the driver to update the appropriate vblank counter
440 * (specified by @crtc). Deal with wraparound, if it occurred, and
441 * update the last read value so we can deal with wraparound on the next
444 void drm_update_vblank_count(struct drm_device *dev, int crtc)
446 unsigned long irqflags;
447 u32 cur_vblank, diff;
450 * Interrupts were disabled prior to this call, so deal with counter
452 * NOTE! It's possible we lost a full dev->max_vblank_count events
453 * here if the register is small or we had vblank interrupts off for
456 cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
457 spin_lock_irqsave(&dev->vbl_lock, irqflags);
458 if (cur_vblank < dev->last_vblank[crtc]) {
459 diff = dev->max_vblank_count -
460 dev->last_vblank[crtc];
463 diff = cur_vblank - dev->last_vblank[crtc];
465 dev->last_vblank[crtc] = cur_vblank;
466 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
468 atomic_add(diff, &dev->_vblank_count[crtc]);
470 EXPORT_SYMBOL(drm_update_vblank_count);
473 * drm_vblank_get - get a reference count on vblank events
475 * @crtc: which CRTC to own
477 * Acquire a reference count on vblank events to avoid having them disabled
478 * while in use. Note callers will probably want to update the master counter
479 * using drm_update_vblank_count() above before calling this routine so that
480 * wakeups occur on the right vblank event.
483 * Zero on success, nonzero on failure.
485 int drm_vblank_get(struct drm_device *dev, int crtc)
487 unsigned long irqflags;
490 spin_lock_irqsave(&dev->vbl_lock, irqflags);
491 /* Going from 0->1 means we have to enable interrupts again */
492 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
493 !dev->vblank_enabled[crtc]) {
494 ret = dev->driver->enable_vblank(dev, crtc);
496 atomic_dec(&dev->vblank_refcount[crtc]);
498 dev->vblank_enabled[crtc] = 1;
500 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
504 EXPORT_SYMBOL(drm_vblank_get);
507 * drm_vblank_put - give up ownership of vblank events
509 * @crtc: which counter to give up
511 * Release ownership of a given vblank counter, turning off interrupts
514 void drm_vblank_put(struct drm_device *dev, int crtc)
516 /* Last user schedules interrupt disable */
517 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
518 mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
520 EXPORT_SYMBOL(drm_vblank_put);
523 * drm_modeset_ctl - handle vblank event counter changes across mode switch
524 * @DRM_IOCTL_ARGS: standard ioctl arguments
526 * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
527 * ioctls around modesetting so that any lost vblank events are accounted for.
529 int drm_modeset_ctl(struct drm_device *dev, void *data,
530 struct drm_file *file_priv)
532 struct drm_modeset_ctl *modeset = data;
536 crtc = modeset->crtc;
537 if (crtc >= dev->num_crtcs) {
542 switch (modeset->cmd) {
543 case _DRM_PRE_MODESET:
544 dev->vblank_premodeset[crtc] =
545 dev->driver->get_vblank_counter(dev, crtc);
547 case _DRM_POST_MODESET:
548 new = dev->driver->get_vblank_counter(dev, crtc);
549 dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
563 * \param inode device inode.
564 * \param file_priv DRM file private.
565 * \param cmd command.
566 * \param data user argument, pointing to a drm_wait_vblank structure.
567 * \return zero on success or a negative number on failure.
569 * Verifies the IRQ is installed.
571 * If a signal is requested checks if this task has already scheduled the same signal
572 * for the same vblank sequence number - nothing to be done in
573 * that case. If the number of tasks waiting for the interrupt exceeds 100 the
574 * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
577 * If a signal is not requested, then calls vblank_wait().
579 int drm_wait_vblank(struct drm_device *dev, void *data,
580 struct drm_file *file_priv)
582 union drm_wait_vblank *vblwait = data;
585 unsigned int flags, seq, crtc;
587 if ((!dev->pdev->irq) || (!dev->irq_enabled))
590 if (vblwait->request.type &
591 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
592 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
593 vblwait->request.type,
594 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
598 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
599 crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
601 if (crtc >= dev->num_crtcs)
604 drm_update_vblank_count(dev, crtc);
605 seq = drm_vblank_count(dev, crtc);
607 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
608 case _DRM_VBLANK_RELATIVE:
609 vblwait->request.sequence += seq;
610 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
611 case _DRM_VBLANK_ABSOLUTE:
617 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
618 (seq - vblwait->request.sequence) <= (1<<23)) {
619 vblwait->request.sequence = seq + 1;
622 if (flags & _DRM_VBLANK_SIGNAL) {
623 unsigned long irqflags;
624 struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
625 struct drm_vbl_sig *vbl_sig, *tmp;
627 spin_lock_irqsave(&dev->vbl_lock, irqflags);
629 /* Check if this task has already scheduled the same signal
630 * for the same vblank sequence number; nothing to be done in
633 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
634 if (vbl_sig->sequence == vblwait->request.sequence
635 && vbl_sig->info.si_signo ==
636 vblwait->request.signal
637 && vbl_sig->task == current) {
638 spin_unlock_irqrestore(&dev->vbl_lock,
640 vblwait->reply.sequence = seq;
645 if (atomic_read(&dev->vbl_signal_pending) >= 100) {
646 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
650 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
652 vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
657 ret = drm_vblank_get(dev, crtc);
659 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
664 atomic_inc(&dev->vbl_signal_pending);
666 vbl_sig->sequence = vblwait->request.sequence;
667 vbl_sig->info.si_signo = vblwait->request.signal;
668 vbl_sig->task = current;
670 spin_lock_irqsave(&dev->vbl_lock, irqflags);
672 list_add_tail(&vbl_sig->head, vbl_sigs);
674 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
676 vblwait->reply.sequence = seq;
678 unsigned long cur_vblank;
680 ret = drm_vblank_get(dev, crtc);
683 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
684 (((cur_vblank = drm_vblank_count(dev, crtc))
685 - vblwait->request.sequence) <= (1 << 23)));
686 drm_vblank_put(dev, crtc);
687 do_gettimeofday(&now);
689 vblwait->reply.tval_sec = now.tv_sec;
690 vblwait->reply.tval_usec = now.tv_usec;
691 vblwait->reply.sequence = cur_vblank;
699 * Send the VBLANK signals.
701 * \param dev DRM device.
702 * \param crtc CRTC where the vblank event occurred
704 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
706 * If a signal is not requested, then calls vblank_wait().
708 static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
710 struct drm_vbl_sig *vbl_sig, *tmp;
711 struct list_head *vbl_sigs;
712 unsigned int vbl_seq;
715 spin_lock_irqsave(&dev->vbl_lock, flags);
717 vbl_sigs = &dev->vbl_sigs[crtc];
718 vbl_seq = drm_vblank_count(dev, crtc);
720 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
721 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
722 vbl_sig->info.si_code = vbl_seq;
723 send_sig_info(vbl_sig->info.si_signo,
724 &vbl_sig->info, vbl_sig->task);
726 list_del(&vbl_sig->head);
728 drm_free(vbl_sig, sizeof(*vbl_sig),
730 atomic_dec(&dev->vbl_signal_pending);
731 drm_vblank_put(dev, crtc);
735 spin_unlock_irqrestore(&dev->vbl_lock, flags);
739 * drm_handle_vblank - handle a vblank event
741 * @crtc: where this event occurred
743 * Drivers should call this routine in their vblank interrupt handlers to
744 * update the vblank counter and send any signals that may be pending.
746 void drm_handle_vblank(struct drm_device *dev, int crtc)
748 drm_update_vblank_count(dev, crtc);
749 DRM_WAKEUP(&dev->vbl_queue[crtc]);
750 drm_vbl_send_signals(dev, crtc);
752 EXPORT_SYMBOL(drm_handle_vblank);
755 * Send the HOTPLUG signals.
757 * \param dev DRM device.
759 * Sends a signal for each task in drm_device::hotplug_sigs and empties the list.
761 static void drm_hotplug_send_signals(struct drm_device * dev)
763 struct drm_hotplug_sig *hotplug_sig, *tmp;
764 struct list_head *hotplug_sigs;
767 spin_lock_irqsave(&dev->hotplug_lock, flags);
769 hotplug_sigs = dev->hotplug_sigs;
771 list_for_each_entry_safe(hotplug_sig, tmp, hotplug_sigs, head) {
772 hotplug_sig->info.si_code = hotplug_sig->counter;
774 send_sig_info(hotplug_sig->info.si_signo,
775 &hotplug_sig->info, hotplug_sig->task);
777 list_del(&hotplug_sig->head);
779 drm_free(hotplug_sig, sizeof(*hotplug_sig),
781 atomic_dec(&dev->hotplug_signal_pending);
784 spin_unlock_irqrestore(&dev->hotplug_lock, flags);
788 * drm_handle_hotplug - handle a hotplug event
790 * @crtc: where this event occurred
792 * Drivers should call this routine in their hotplug interrupt handlers.
794 void drm_handle_hotplug(struct drm_device *dev)
796 DRM_WAKEUP(&dev->hotplug_queue);
797 drm_hotplug_send_signals(dev);
799 EXPORT_SYMBOL(drm_handle_hotplug);
802 * Tasklet wrapper function.
804 * \param data DRM device in disguise.
806 * Attempts to grab the HW lock and calls the driver callback on success. On
807 * failure, leave the lock marked as contended so the callback can be called
810 static void drm_locked_tasklet_func(unsigned long data)
812 struct drm_device *dev = (struct drm_device *)data;
813 unsigned long irqflags;
815 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
817 if (!dev->locked_tasklet_func ||
818 !drm_lock_take(&dev->primary->master->lock,
819 DRM_KERNEL_CONTEXT)) {
820 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
824 dev->primary->master->lock.lock_time = jiffies;
825 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
827 dev->locked_tasklet_func(dev);
829 drm_lock_free(&dev->primary->master->lock,
832 dev->locked_tasklet_func = NULL;
834 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
838 * Schedule a tasklet to call back a driver hook with the HW lock held.
840 * \param dev DRM device.
841 * \param func Driver callback.
843 * This is intended for triggering actions that require the HW lock from an
844 * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
845 * completes. Note that the callback may be called from interrupt or process
846 * context, it must not make any assumptions about this. Also, the HW lock will
847 * be held with the kernel context or any client context.
849 void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
851 unsigned long irqflags;
852 static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
854 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
855 test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
858 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
860 if (dev->locked_tasklet_func) {
861 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
865 dev->locked_tasklet_func = func;
867 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
869 drm_tasklet.data = (unsigned long)dev;
871 tasklet_hi_schedule(&drm_tasklet);
873 EXPORT_SYMBOL(drm_locked_tasklet);