5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/interrupt.h> /* For task queue support */
41 * Get interrupt from bus id.
43 * \param inode device inode.
44 * \param file_priv DRM file private.
46 * \param arg user argument, pointing to a drm_irq_busid structure.
47 * \return zero on success or a negative number on failure.
49 * Finds the PCI device with the specified bus id and gets its IRQ number.
50 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
51 * to that of the device that this DRM instance attached to.
53 int drm_irq_by_busid(struct drm_device *dev, void *data,
54 struct drm_file *file_priv)
56 struct drm_irq_busid *p = data;
58 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
61 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
62 (p->busnum & 0xff) != dev->pdev->bus->number ||
63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
74 static void vblank_disable_fn(unsigned long arg)
76 struct drm_device *dev = (struct drm_device *)arg;
77 unsigned long irqflags;
80 for (i = 0; i < dev->num_crtcs; i++) {
81 spin_lock_irqsave(&dev->vbl_lock, irqflags);
82 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
83 dev->vblank_enabled[i]) {
84 dev->driver->disable_vblank(dev, i);
85 dev->vblank_enabled[i] = 0;
87 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
91 static void drm_vblank_cleanup(struct drm_device *dev)
93 del_timer(&dev->vblank_disable_timer);
95 vblank_disable_fn((unsigned long)dev);
98 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
102 drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
105 if (dev->_vblank_count)
106 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
107 dev->num_crtcs, DRM_MEM_DRIVER);
109 if (dev->vblank_refcount)
110 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
111 dev->num_crtcs, DRM_MEM_DRIVER);
113 if (dev->vblank_enabled)
114 drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
115 dev->num_crtcs, DRM_MEM_DRIVER);
117 if (dev->last_vblank)
118 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
121 if (dev->vblank_premodeset)
122 drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
123 dev->num_crtcs, DRM_MEM_DRIVER);
125 if (dev->vblank_offset)
126 drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
130 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
132 int i, ret = -ENOMEM;
134 setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
136 spin_lock_init(&dev->vbl_lock);
137 atomic_set(&dev->vbl_signal_pending, 0);
138 dev->num_crtcs = num_crtcs;
140 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
145 dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
150 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
152 if (!dev->_vblank_count)
155 dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
157 if (!dev->vblank_refcount)
160 dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
162 if (!dev->vblank_enabled)
165 dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
166 if (!dev->last_vblank)
169 dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
171 if (!dev->vblank_premodeset)
174 dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
175 if (!dev->vblank_offset)
178 /* Zero per-crtc vblank stuff */
179 for (i = 0; i < num_crtcs; i++) {
180 init_waitqueue_head(&dev->vbl_queue[i]);
181 INIT_LIST_HEAD(&dev->vbl_sigs[i]);
182 atomic_set(&dev->_vblank_count[i], 0);
183 atomic_set(&dev->vblank_refcount[i], 0);
189 drm_vblank_cleanup(dev);
192 EXPORT_SYMBOL(drm_vblank_init);
194 int drm_wait_hotplug(struct drm_device *dev, void *data,
195 struct drm_file *file_priv)
197 union drm_wait_hotplug *hotplugwait = data;
202 if ((!dev->irq) || (!dev->irq_enabled))
205 flags = hotplugwait->request.type;
207 if (flags & _DRM_HOTPLUG_SIGNAL) {
208 unsigned long irqflags;
209 struct list_head *hotplug_sigs = dev->hotplug_sigs;
210 struct drm_hotplug_sig *hotplug_sig;
212 hotplug_sig = drm_calloc(1, sizeof(struct drm_hotplug_sig),
217 atomic_inc(&dev->hotplug_signal_pending);
219 hotplug_sig->info.si_signo = hotplugwait->request.signal;
220 hotplug_sig->task = current;
221 hotplug_sig->counter =
222 hotplugwait->reply.counter =
223 dev->mode_config.hotplug_counter;
225 spin_lock_irqsave(&dev->hotplug_lock, irqflags);
227 list_add_tail(&hotplug_sig->head, hotplug_sigs);
229 spin_unlock_irqrestore(&dev->hotplug_lock, irqflags);
231 int cur_hotplug = dev->mode_config.hotplug_counter;
233 DRM_WAIT_ON(ret, dev->hotplug_queue, 3 * DRM_HZ,
234 dev->mode_config.hotplug_counter > cur_hotplug);
236 do_gettimeofday(&now);
238 hotplugwait->reply.tval_sec = now.tv_sec;
239 hotplugwait->reply.tval_usec = now.tv_usec;
240 hotplugwait->reply.counter = dev->mode_config.hotplug_counter;
246 static void drm_hotplug_cleanup(struct drm_device *dev)
248 if (dev->hotplug_sigs)
249 drm_free(dev->hotplug_sigs, sizeof(*dev->hotplug_sigs),
252 EXPORT_SYMBOL(drm_hotplug_cleanup);
254 int drm_hotplug_init(struct drm_device *dev)
256 spin_lock_init(&dev->hotplug_lock);
257 atomic_set(&dev->hotplug_signal_pending, 0);
259 dev->hotplug_sigs = drm_alloc(sizeof(struct list_head), DRM_MEM_DRIVER);
260 if (!dev->hotplug_sigs)
263 INIT_LIST_HEAD(dev->hotplug_sigs);
264 init_waitqueue_head(&dev->hotplug_queue);
268 EXPORT_SYMBOL(drm_hotplug_init);
271 * Install IRQ handler.
273 * \param dev DRM device.
275 * Initializes the IRQ related data. Installs the handler, calling the driver
276 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
277 * before and after the installation.
279 int drm_irq_install(struct drm_device * dev)
282 unsigned long sh_flags = 0;
284 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
290 mutex_lock(&dev->struct_mutex);
292 /* Driver must have been initialized */
293 if (!dev->dev_private) {
294 mutex_unlock(&dev->struct_mutex);
298 if (dev->irq_enabled) {
299 mutex_unlock(&dev->struct_mutex);
302 dev->irq_enabled = 1;
303 mutex_unlock(&dev->struct_mutex);
305 DRM_DEBUG("irq=%d\n", dev->irq);
307 /* Before installing handler */
308 dev->driver->irq_preinstall(dev);
310 /* Install handler */
311 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
312 sh_flags = IRQF_SHARED;
314 ret = request_irq(dev->irq, dev->driver->irq_handler,
315 sh_flags, dev->devname, dev);
317 mutex_lock(&dev->struct_mutex);
318 dev->irq_enabled = 0;
319 mutex_unlock(&dev->struct_mutex);
323 /* After installing handler */
324 ret = dev->driver->irq_postinstall(dev);
326 mutex_lock(&dev->struct_mutex);
327 dev->irq_enabled = 0;
328 mutex_unlock(&dev->struct_mutex);
333 EXPORT_SYMBOL(drm_irq_install);
336 * Uninstall the IRQ handler.
338 * \param dev DRM device.
340 * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
342 int drm_irq_uninstall(struct drm_device * dev)
346 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
349 mutex_lock(&dev->struct_mutex);
350 irq_enabled = dev->irq_enabled;
351 dev->irq_enabled = 0;
352 mutex_unlock(&dev->struct_mutex);
357 DRM_DEBUG("irq=%d\n", dev->irq);
359 dev->driver->irq_uninstall(dev);
361 free_irq(dev->irq, dev);
363 drm_vblank_cleanup(dev);
365 drm_hotplug_cleanup(dev);
367 dev->locked_tasklet_func = NULL;
371 EXPORT_SYMBOL(drm_irq_uninstall);
376 * \param inode device inode.
377 * \param file_priv DRM file private.
378 * \param cmd command.
379 * \param arg user argument, pointing to a drm_control structure.
380 * \return zero on success or a negative number on failure.
382 * Calls irq_install() or irq_uninstall() according to \p arg.
384 int drm_control(struct drm_device *dev, void *data,
385 struct drm_file *file_priv)
387 struct drm_control *ctl = data;
389 /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
393 case DRM_INST_HANDLER:
394 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
396 if (drm_core_check_feature(dev, DRIVER_MODESET))
398 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
399 ctl->irq != dev->irq)
401 return drm_irq_install(dev);
402 case DRM_UNINST_HANDLER:
403 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
405 if (drm_core_check_feature(dev, DRIVER_MODESET))
407 return drm_irq_uninstall(dev);
414 * drm_vblank_count - retrieve "cooked" vblank counter value
416 * @crtc: which counter to retrieve
418 * Fetches the "cooked" vblank count value that represents the number of
419 * vblank events since the system was booted, including lost events due to
420 * modesetting activity.
422 u32 drm_vblank_count(struct drm_device *dev, int crtc)
424 return atomic_read(&dev->_vblank_count[crtc]) +
425 dev->vblank_offset[crtc];
427 EXPORT_SYMBOL(drm_vblank_count);
430 * drm_update_vblank_count - update the master vblank counter
432 * @crtc: counter to update
434 * Call back into the driver to update the appropriate vblank counter
435 * (specified by @crtc). Deal with wraparound, if it occurred, and
436 * update the last read value so we can deal with wraparound on the next
439 void drm_update_vblank_count(struct drm_device *dev, int crtc)
441 unsigned long irqflags;
442 u32 cur_vblank, diff;
445 * Interrupts were disabled prior to this call, so deal with counter
447 * NOTE! It's possible we lost a full dev->max_vblank_count events
448 * here if the register is small or we had vblank interrupts off for
451 cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
452 spin_lock_irqsave(&dev->vbl_lock, irqflags);
453 if (cur_vblank < dev->last_vblank[crtc]) {
454 diff = dev->max_vblank_count -
455 dev->last_vblank[crtc];
458 diff = cur_vblank - dev->last_vblank[crtc];
460 dev->last_vblank[crtc] = cur_vblank;
461 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
463 atomic_add(diff, &dev->_vblank_count[crtc]);
465 EXPORT_SYMBOL(drm_update_vblank_count);
468 * drm_vblank_get - get a reference count on vblank events
470 * @crtc: which CRTC to own
472 * Acquire a reference count on vblank events to avoid having them disabled
473 * while in use. Note callers will probably want to update the master counter
474 * using drm_update_vblank_count() above before calling this routine so that
475 * wakeups occur on the right vblank event.
478 * Zero on success, nonzero on failure.
480 int drm_vblank_get(struct drm_device *dev, int crtc)
482 unsigned long irqflags;
485 spin_lock_irqsave(&dev->vbl_lock, irqflags);
486 /* Going from 0->1 means we have to enable interrupts again */
487 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
488 !dev->vblank_enabled[crtc]) {
489 ret = dev->driver->enable_vblank(dev, crtc);
491 atomic_dec(&dev->vblank_refcount[crtc]);
493 dev->vblank_enabled[crtc] = 1;
495 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
499 EXPORT_SYMBOL(drm_vblank_get);
502 * drm_vblank_put - give up ownership of vblank events
504 * @crtc: which counter to give up
506 * Release ownership of a given vblank counter, turning off interrupts
509 void drm_vblank_put(struct drm_device *dev, int crtc)
511 /* Last user schedules interrupt disable */
512 if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
513 mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
515 EXPORT_SYMBOL(drm_vblank_put);
518 * drm_modeset_ctl - handle vblank event counter changes across mode switch
519 * @DRM_IOCTL_ARGS: standard ioctl arguments
521 * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
522 * ioctls around modesetting so that any lost vblank events are accounted for.
524 int drm_modeset_ctl(struct drm_device *dev, void *data,
525 struct drm_file *file_priv)
527 struct drm_modeset_ctl *modeset = data;
531 crtc = modeset->crtc;
532 if (crtc >= dev->num_crtcs) {
537 switch (modeset->cmd) {
538 case _DRM_PRE_MODESET:
539 dev->vblank_premodeset[crtc] =
540 dev->driver->get_vblank_counter(dev, crtc);
542 case _DRM_POST_MODESET:
543 new = dev->driver->get_vblank_counter(dev, crtc);
544 dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
558 * \param inode device inode.
559 * \param file_priv DRM file private.
560 * \param cmd command.
561 * \param data user argument, pointing to a drm_wait_vblank structure.
562 * \return zero on success or a negative number on failure.
564 * Verifies the IRQ is installed.
566 * If a signal is requested checks if this task has already scheduled the same signal
567 * for the same vblank sequence number - nothing to be done in
568 * that case. If the number of tasks waiting for the interrupt exceeds 100 the
569 * function fails. Otherwise adds a new entry to drm_device::vbl_sigs for this
572 * If a signal is not requested, then calls vblank_wait().
574 int drm_wait_vblank(struct drm_device *dev, void *data,
575 struct drm_file *file_priv)
577 union drm_wait_vblank *vblwait = data;
580 unsigned int flags, seq, crtc;
582 if ((!dev->irq) || (!dev->irq_enabled))
585 if (vblwait->request.type &
586 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
587 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
588 vblwait->request.type,
589 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
593 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
594 crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
596 if (crtc >= dev->num_crtcs)
599 drm_update_vblank_count(dev, crtc);
600 seq = drm_vblank_count(dev, crtc);
602 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
603 case _DRM_VBLANK_RELATIVE:
604 vblwait->request.sequence += seq;
605 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
606 case _DRM_VBLANK_ABSOLUTE:
612 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
613 (seq - vblwait->request.sequence) <= (1<<23)) {
614 vblwait->request.sequence = seq + 1;
617 if (flags & _DRM_VBLANK_SIGNAL) {
618 unsigned long irqflags;
619 struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
620 struct drm_vbl_sig *vbl_sig, *tmp;
622 spin_lock_irqsave(&dev->vbl_lock, irqflags);
624 /* Check if this task has already scheduled the same signal
625 * for the same vblank sequence number; nothing to be done in
628 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
629 if (vbl_sig->sequence == vblwait->request.sequence
630 && vbl_sig->info.si_signo ==
631 vblwait->request.signal
632 && vbl_sig->task == current) {
633 spin_unlock_irqrestore(&dev->vbl_lock,
635 vblwait->reply.sequence = seq;
640 if (atomic_read(&dev->vbl_signal_pending) >= 100) {
641 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
645 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
647 vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
652 ret = drm_vblank_get(dev, crtc);
654 drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
659 atomic_inc(&dev->vbl_signal_pending);
661 vbl_sig->sequence = vblwait->request.sequence;
662 vbl_sig->info.si_signo = vblwait->request.signal;
663 vbl_sig->task = current;
665 spin_lock_irqsave(&dev->vbl_lock, irqflags);
667 list_add_tail(&vbl_sig->head, vbl_sigs);
669 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
671 vblwait->reply.sequence = seq;
673 unsigned long cur_vblank;
675 ret = drm_vblank_get(dev, crtc);
678 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
679 (((cur_vblank = drm_vblank_count(dev, crtc))
680 - vblwait->request.sequence) <= (1 << 23)));
681 drm_vblank_put(dev, crtc);
682 do_gettimeofday(&now);
684 vblwait->reply.tval_sec = now.tv_sec;
685 vblwait->reply.tval_usec = now.tv_usec;
686 vblwait->reply.sequence = cur_vblank;
694 * Send the VBLANK signals.
696 * \param dev DRM device.
697 * \param crtc CRTC where the vblank event occurred
699 * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
701 * If a signal is not requested, then calls vblank_wait().
703 static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
705 struct drm_vbl_sig *vbl_sig, *tmp;
706 struct list_head *vbl_sigs;
707 unsigned int vbl_seq;
710 spin_lock_irqsave(&dev->vbl_lock, flags);
712 vbl_sigs = &dev->vbl_sigs[crtc];
713 vbl_seq = drm_vblank_count(dev, crtc);
715 list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
716 if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
717 vbl_sig->info.si_code = vbl_seq;
718 send_sig_info(vbl_sig->info.si_signo,
719 &vbl_sig->info, vbl_sig->task);
721 list_del(&vbl_sig->head);
723 drm_free(vbl_sig, sizeof(*vbl_sig),
725 atomic_dec(&dev->vbl_signal_pending);
726 drm_vblank_put(dev, crtc);
730 spin_unlock_irqrestore(&dev->vbl_lock, flags);
734 * drm_handle_vblank - handle a vblank event
736 * @crtc: where this event occurred
738 * Drivers should call this routine in their vblank interrupt handlers to
739 * update the vblank counter and send any signals that may be pending.
741 void drm_handle_vblank(struct drm_device *dev, int crtc)
743 drm_update_vblank_count(dev, crtc);
744 DRM_WAKEUP(&dev->vbl_queue[crtc]);
745 drm_vbl_send_signals(dev, crtc);
747 EXPORT_SYMBOL(drm_handle_vblank);
750 * Send the HOTPLUG signals.
752 * \param dev DRM device.
754 * Sends a signal for each task in drm_device::hotplug_sigs and empties the list.
756 static void drm_hotplug_send_signals(struct drm_device * dev)
758 struct drm_hotplug_sig *hotplug_sig, *tmp;
759 struct list_head *hotplug_sigs;
762 spin_lock_irqsave(&dev->hotplug_lock, flags);
764 hotplug_sigs = dev->hotplug_sigs;
766 list_for_each_entry_safe(hotplug_sig, tmp, hotplug_sigs, head) {
767 hotplug_sig->info.si_code = hotplug_sig->counter;
769 send_sig_info(hotplug_sig->info.si_signo,
770 &hotplug_sig->info, hotplug_sig->task);
772 list_del(&hotplug_sig->head);
774 drm_free(hotplug_sig, sizeof(*hotplug_sig),
776 atomic_dec(&dev->hotplug_signal_pending);
779 spin_unlock_irqrestore(&dev->hotplug_lock, flags);
783 * drm_handle_hotplug - handle a hotplug event
785 * @crtc: where this event occurred
787 * Drivers should call this routine in their hotplug interrupt handlers.
789 void drm_handle_hotplug(struct drm_device *dev)
791 DRM_WAKEUP(&dev->hotplug_queue);
792 drm_hotplug_send_signals(dev);
794 EXPORT_SYMBOL(drm_handle_hotplug);
797 * Tasklet wrapper function.
799 * \param data DRM device in disguise.
801 * Attempts to grab the HW lock and calls the driver callback on success. On
802 * failure, leave the lock marked as contended so the callback can be called
805 static void drm_locked_tasklet_func(unsigned long data)
807 struct drm_device *dev = (struct drm_device *)data;
808 unsigned long irqflags;
810 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
812 if (!dev->locked_tasklet_func ||
813 !drm_lock_take(&dev->primary->master->lock,
814 DRM_KERNEL_CONTEXT)) {
815 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
819 dev->primary->master->lock.lock_time = jiffies;
820 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
822 dev->locked_tasklet_func(dev);
824 drm_lock_free(&dev->primary->master->lock,
827 dev->locked_tasklet_func = NULL;
829 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
833 * Schedule a tasklet to call back a driver hook with the HW lock held.
835 * \param dev DRM device.
836 * \param func Driver callback.
838 * This is intended for triggering actions that require the HW lock from an
839 * interrupt handler. The lock will be grabbed ASAP after the interrupt handler
840 * completes. Note that the callback may be called from interrupt or process
841 * context, it must not make any assumptions about this. Also, the HW lock will
842 * be held with the kernel context or any client context.
844 void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
846 unsigned long irqflags;
847 static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
849 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ) ||
850 test_bit(TASKLET_STATE_SCHED, &drm_tasklet.state))
853 spin_lock_irqsave(&dev->tasklet_lock, irqflags);
855 if (dev->locked_tasklet_func) {
856 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
860 dev->locked_tasklet_func = func;
862 spin_unlock_irqrestore(&dev->tasklet_lock, irqflags);
864 drm_tasklet.data = (unsigned long)dev;
866 tasklet_hi_schedule(&drm_tasklet);
868 EXPORT_SYMBOL(drm_locked_tasklet);