2 * Copyright 2003 Eric Anholt
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * ERIC ANHOLT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
20 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Eric Anholt <anholt@FreeBSD.org>
29 * Support code for handling setup/teardown of interrupt handlers and
30 * handing interrupt handlers off to the drivers.
36 static void drm_locked_task(void *context, int pending __unused);
38 int drm_irq_by_busid(struct drm_device *dev, void *data,
39 struct drm_file *file_priv)
41 drm_irq_busid_t *irq = data;
43 if ((irq->busnum >> 8) != dev->pci_domain ||
44 (irq->busnum & 0xff) != dev->pci_bus ||
45 irq->devnum != dev->pci_slot ||
46 irq->funcnum != dev->pci_func)
51 DRM_DEBUG("%d:%d:%d => IRQ %d\n",
52 irq->busnum, irq->devnum, irq->funcnum, irq->irq);
57 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
59 drm_irq_handler_wrap(DRM_IRQ_ARGS)
61 struct drm_device *dev = arg;
63 DRM_SPINLOCK(&dev->irq_lock);
64 dev->driver.irq_handler(arg);
65 DRM_SPINUNLOCK(&dev->irq_lock);
69 int drm_irq_install(struct drm_device *dev)
76 if (dev->irq == 0 || dev->dev_private == NULL)
79 DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
82 if (dev->irq_enabled) {
88 dev->context_flag = 0;
90 DRM_SPININIT(&dev->irq_lock, "DRM IRQ lock");
92 /* Before installing handler */
93 dev->driver.irq_preinstall(dev);
99 dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
100 &dev->irqrid, RF_SHAREABLE);
105 #if __FreeBSD_version >= 700031
106 retcode = bus_setup_intr(dev->device, dev->irqr,
107 INTR_TYPE_TTY | INTR_MPSAFE,
108 NULL, drm_irq_handler_wrap, dev, &dev->irqh);
110 retcode = bus_setup_intr(dev->device, dev->irqr,
111 INTR_TYPE_TTY | INTR_MPSAFE,
112 drm_irq_handler_wrap, dev, &dev->irqh);
116 #elif defined(__NetBSD__) || defined(__OpenBSD__)
117 if (pci_intr_map(&dev->pa, &ih) != 0) {
121 dev->irqh = pci_intr_establish(&dev->pa.pa_pc, ih, IPL_TTY,
122 (irqreturn_t (*)(void *))dev->irq_handler, dev);
129 /* After installing handler */
131 dev->driver.irq_postinstall(dev);
134 TASK_INIT(&dev->locked_task, 0, drm_locked_task, dev);
138 dev->irq_enabled = 0;
140 if (dev->irqrid != 0) {
141 bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid,
146 DRM_SPINUNINIT(&dev->irq_lock);
151 int drm_irq_uninstall(struct drm_device *dev)
157 if (!dev->irq_enabled)
160 dev->irq_enabled = 0;
162 irqrid = dev->irqrid;
166 DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
168 dev->driver.irq_uninstall(dev);
172 bus_teardown_intr(dev->device, dev->irqr, dev->irqh);
173 bus_release_resource(dev->device, SYS_RES_IRQ, irqrid, dev->irqr);
175 #elif defined(__NetBSD__) || defined(__OpenBSD__)
176 pci_intr_disestablish(&dev->pa.pa_pc, dev->irqh);
178 DRM_SPINUNINIT(&dev->irq_lock);
183 int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv)
185 drm_control_t *ctl = data;
188 switch ( ctl->func ) {
189 case DRM_INST_HANDLER:
190 /* Handle drivers whose DRM used to require IRQ setup but the
193 if (!dev->driver.use_irq)
195 if (dev->if_version < DRM_IF_VERSION(1, 2) &&
196 ctl->irq != dev->irq)
198 return drm_irq_install(dev);
199 case DRM_UNINST_HANDLER:
200 if (!dev->driver.use_irq)
203 err = drm_irq_uninstall(dev);
211 static void vblank_disable_fn(void *arg)
213 struct drm_device *dev = (struct drm_device *)arg;
214 unsigned long irqflags;
217 for (i = 0; i < dev->num_crtcs; i++) {
218 DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
219 if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
220 dev->vblank_enabled[i]) {
221 dev->driver.disable_vblank(dev, i);
222 dev->vblank_enabled[i] = 0;
224 DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
228 u32 drm_vblank_count(struct drm_device *dev, int crtc)
230 return atomic_read(&dev->_vblank_count[crtc]) +
231 dev->vblank_offset[crtc];
234 int drm_vblank_get(struct drm_device *dev, int crtc)
236 unsigned long irqflags;
239 DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
240 /* Going from 0->1 means we have to enable interrupts again */
241 atomic_add_acq_int(&dev->vblank_refcount[crtc], 1);
242 if (dev->vblank_refcount[crtc] == 1 &&
243 !dev->vblank_enabled[crtc]) {
244 ret = dev->driver.enable_vblank(dev, crtc);
246 atomic_dec(&dev->vblank_refcount[crtc]);
248 dev->vblank_enabled[crtc] = 1;
250 DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
255 void drm_vblank_put(struct drm_device *dev, int crtc)
257 /* Last user schedules interrupt disable */
258 atomic_subtract_acq_int(&dev->vblank_refcount[crtc], 1);
259 if (dev->vblank_refcount[crtc] == 0)
260 callout_reset(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ,
261 (timeout_t *)vblank_disable_fn, (void *)dev);
264 void drm_handle_vblank(struct drm_device *dev, int crtc)
266 drm_update_vblank_count(dev, crtc);
267 DRM_WAKEUP(&dev->vbl_queue[crtc]);
268 drm_vbl_send_signals(dev, crtc);
271 void drm_update_vblank_count(struct drm_device *dev, int crtc)
273 unsigned long irqflags;
274 u32 cur_vblank, diff;
277 * Interrupts were disabled prior to this call, so deal with counter
279 * NOTE! It's possible we lost a full dev->max_vblank_count events
280 * here if the register is small or we had vblank interrupts off for
283 cur_vblank = dev->driver.get_vblank_counter(dev, crtc);
284 DRM_SPINLOCK_IRQSAVE(&dev->vbl_lock, irqflags);
285 if (cur_vblank < dev->last_vblank[crtc]) {
286 diff = dev->max_vblank_count -
287 dev->last_vblank[crtc];
290 diff = cur_vblank - dev->last_vblank[crtc];
292 dev->last_vblank[crtc] = cur_vblank;
293 DRM_SPINUNLOCK_IRQRESTORE(&dev->vbl_lock, irqflags);
295 atomic_add(diff, &dev->_vblank_count[crtc]);
298 int drm_modeset_ctl(struct drm_device *dev, void *data,
299 struct drm_file *file_priv)
301 struct drm_modeset_ctl *modeset = data;
305 crtc = modeset->crtc;
306 if (crtc >= dev->num_crtcs) {
311 switch (modeset->cmd) {
312 case _DRM_PRE_MODESET:
313 dev->vblank_premodeset[crtc] =
314 dev->driver.get_vblank_counter(dev, crtc);
316 case _DRM_POST_MODESET:
317 new = dev->driver.get_vblank_counter(dev, crtc);
318 dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
329 int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
331 drm_wait_vblank_t *vblwait = data;
334 int flags, seq, crtc;
336 if (!dev->irq_enabled)
339 if (vblwait->request.type &
340 ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
341 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
342 vblwait->request.type,
343 (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
347 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
348 crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
350 if (crtc >= dev->num_crtcs)
353 drm_update_vblank_count(dev, crtc);
354 seq = drm_vblank_count(dev, crtc);
356 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
357 case _DRM_VBLANK_RELATIVE:
358 vblwait->request.sequence += seq;
359 vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
360 case _DRM_VBLANK_ABSOLUTE:
366 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
367 (seq - vblwait->request.sequence) <= (1<<23)) {
368 vblwait->request.sequence = seq + 1;
371 if (flags & _DRM_VBLANK_SIGNAL) {
373 drm_vbl_sig_t *vbl_sig = malloc(sizeof(drm_vbl_sig_t), M_DRM,
378 vbl_sig->sequence = vblwait->request.sequence;
379 vbl_sig->signo = vblwait->request.signal;
380 vbl_sig->pid = DRM_CURRENTPID;
382 vblwait->reply.sequence = atomic_read(&dev->vbl_received);
384 DRM_SPINLOCK(&dev->irq_lock);
385 TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link);
386 DRM_SPINUNLOCK(&dev->irq_lock);
391 unsigned long cur_vblank;
394 /* shared code returns -errno */
396 ret = drm_vblank_get(dev, crtc);
399 DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
400 (((cur_vblank = drm_vblank_count(dev, crtc))
401 - vblwait->request.sequence) <= (1 << 23)));
402 drm_vblank_put(dev, crtc);
406 vblwait->reply.tval_sec = now.tv_sec;
407 vblwait->reply.tval_usec = now.tv_usec;
413 static void drm_vblank_cleanup(struct drm_device *dev)
415 /* Bail if the driver didn't call drm_vblank_init() */
416 if (dev->num_crtcs == 0)
419 callout_stop(&dev->vblank_disable_timer);
421 vblank_disable_fn((void *)dev);
423 drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
425 drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
427 drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
428 dev->num_crtcs, DRM_MEM_DRIVER);
429 drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
430 dev->num_crtcs, DRM_MEM_DRIVER);
431 drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
432 dev->num_crtcs, DRM_MEM_DRIVER);
433 drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
435 drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
436 dev->num_crtcs, DRM_MEM_DRIVER);
437 drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs,
443 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
445 int i, ret = -ENOMEM;
447 callout_init(&dev->vblank_disable_timer, 0);
448 DRM_SPININIT(&dev->vbl_lock, "drm_vblk");
449 atomic_set(&dev->vbl_signal_pending, 0);
450 dev->num_crtcs = num_crtcs;
452 dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
457 dev->vbl_sigs = drm_alloc(sizeof(struct drm_vbl_sig) * num_crtcs,
462 dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
464 if (!dev->_vblank_count)
467 dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
469 if (!dev->vblank_refcount)
472 dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
474 if (!dev->vblank_enabled)
477 dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
478 if (!dev->last_vblank)
481 dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
483 if (!dev->vblank_premodeset)
486 dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
487 if (!dev->vblank_offset)
490 /* Zero per-crtc vblank stuff */
491 for (i = 0; i < num_crtcs; i++) {
492 DRM_INIT_WAITQUEUE(&dev->vbl_queue[i]);
493 TAILQ_INIT(&dev->vbl_sigs[i]);
494 atomic_set(&dev->_vblank_count[i], 0);
495 atomic_set(&dev->vblank_refcount[i], 0);
501 drm_vblank_cleanup(dev);
505 void drm_vbl_send_signals(struct drm_device *dev, int crtc)
510 void drm_vbl_send_signals(struct drm_device *dev, int crtc )
512 drm_vbl_sig_t *vbl_sig;
513 unsigned int vbl_seq = atomic_read( &dev->vbl_received );
516 vbl_sig = TAILQ_FIRST(&dev->vbl_sig_list);
517 while (vbl_sig != NULL) {
518 drm_vbl_sig_t *next = TAILQ_NEXT(vbl_sig, link);
520 if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
521 p = pfind(vbl_sig->pid);
523 psignal(p, vbl_sig->signo);
525 TAILQ_REMOVE(&dev->vbl_sig_list, vbl_sig, link);
526 DRM_FREE(vbl_sig,sizeof(*vbl_sig));
533 static void drm_locked_task(void *context, int pending __unused)
535 struct drm_device *dev = context;
541 if (drm_lock_take(&dev->lock.hw_lock->lock,
544 dev->lock.file_priv = NULL; /* kernel owned */
545 dev->lock.lock_time = jiffies;
546 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
547 break; /* Got lock */
551 #if defined(__FreeBSD__) && __FreeBSD_version > 500000
552 ret = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock,
553 PZERO | PCATCH, "drmlk2", 0);
555 ret = tsleep((void *)&dev->lock.lock_queue, PZERO | PCATCH,
563 dev->locked_task_call(dev);
565 drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
569 drm_locked_tasklet(struct drm_device *dev,
570 void (*tasklet)(struct drm_device *dev))
572 dev->locked_task_call = tasklet;
573 taskqueue_enqueue(taskqueue_swi, &dev->locked_task);