1 /* dma.c -- DMA IOCTL and function support -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3 * Revised: Sun Feb 13 23:19:45 2000 by kevin@precisioninsight.com
5 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
27 * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/dma.c,v 1.5 2000/02/23 04:47:26 martin Exp $
31 #define __NO_VERSION__
34 #include <linux/interrupt.h> /* For task queue support */
36 void drm_dma_setup(drm_device_t *dev)
40 dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
41 memset(dev->dma, 0, sizeof(*dev->dma));
42 for (i = 0; i <= DRM_MAX_ORDER; i++)
43 memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
46 void drm_dma_takedown(drm_device_t *dev)
48 drm_device_dma_t *dma = dev->dma;
53 /* Clear dma buffers */
54 for (i = 0; i <= DRM_MAX_ORDER; i++) {
55 if (dma->bufs[i].seg_count) {
56 DRM_DEBUG("order %d: buf_count = %d,"
59 dma->bufs[i].buf_count,
60 dma->bufs[i].seg_count);
61 for (j = 0; j < dma->bufs[i].seg_count; j++) {
62 drm_free_pages(dma->bufs[i].seglist[j],
63 dma->bufs[i].page_order,
66 drm_free(dma->bufs[i].seglist,
67 dma->bufs[i].seg_count
68 * sizeof(*dma->bufs[0].seglist),
71 if(dma->bufs[i].buf_count) {
72 for(j = 0; j < dma->bufs[i].buf_count; j++) {
73 if(dma->bufs[i].buflist[j].dev_private) {
74 drm_free(dma->bufs[i].buflist[j].dev_private,
75 dma->bufs[i].buflist[j].dev_priv_size,
79 drm_free(dma->bufs[i].buflist,
80 dma->bufs[i].buf_count *
81 sizeof(*dma->bufs[0].buflist),
83 drm_freelist_destroy(&dma->bufs[i].freelist);
88 drm_free(dma->buflist,
89 dma->buf_count * sizeof(*dma->buflist),
94 drm_free(dma->pagelist,
95 dma->page_count * sizeof(*dma->pagelist),
98 drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
102 #if DRM_DMA_HISTOGRAM
103 /* This is slow, but is useful for debugging. */
104 int drm_histogram_slot(unsigned long count)
106 int value = DRM_DMA_HISTOGRAM_INITIAL;
110 slot < DRM_DMA_HISTOGRAM_SLOTS;
111 ++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
112 if (count < value) return slot;
114 return DRM_DMA_HISTOGRAM_SLOTS - 1;
117 void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf)
119 cycles_t queued_to_dispatched;
120 cycles_t dispatched_to_completed;
121 cycles_t completed_to_freed;
122 int q2d, d2c, c2f, q2c, q2f;
124 if (buf->time_queued) {
125 queued_to_dispatched = (buf->time_dispatched
127 dispatched_to_completed = (buf->time_completed
128 - buf->time_dispatched);
129 completed_to_freed = (buf->time_freed
130 - buf->time_completed);
132 q2d = drm_histogram_slot(queued_to_dispatched);
133 d2c = drm_histogram_slot(dispatched_to_completed);
134 c2f = drm_histogram_slot(completed_to_freed);
136 q2c = drm_histogram_slot(queued_to_dispatched
137 + dispatched_to_completed);
138 q2f = drm_histogram_slot(queued_to_dispatched
139 + dispatched_to_completed
140 + completed_to_freed);
142 atomic_inc(&dev->histo.total);
143 atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
144 atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
145 atomic_inc(&dev->histo.completed_to_freed[c2f]);
147 atomic_inc(&dev->histo.queued_to_completed[q2c]);
148 atomic_inc(&dev->histo.queued_to_freed[q2f]);
151 buf->time_queued = 0;
152 buf->time_dispatched = 0;
153 buf->time_completed = 0;
158 void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
160 drm_device_dma_t *dma = dev->dma;
168 #if DRM_DMA_HISTOGRAM
169 buf->time_completed = get_cycles();
171 if (waitqueue_active(&buf->dma_wait)) {
172 wake_up_interruptible(&buf->dma_wait);
174 /* If processes are waiting, the last one
175 to wake will put the buffer on the free
176 list. If no processes are waiting, we
177 put the buffer on the freelist here. */
178 drm_freelist_put(dev, &dma->bufs[buf->order].freelist, buf);
182 void drm_reclaim_buffers(drm_device_t *dev, pid_t pid)
184 drm_device_dma_t *dma = dev->dma;
188 for (i = 0; i < dma->buf_count; i++) {
189 if (dma->buflist[i]->pid == pid) {
190 switch (dma->buflist[i]->list) {
192 drm_free_buffer(dev, dma->buflist[i]);
195 dma->buflist[i]->list = DRM_LIST_RECLAIM;
198 /* Buffer already on hardware. */
205 int drm_context_switch(drm_device_t *dev, int old, int new)
210 atomic_inc(&dev->total_ctx);
212 if (test_and_set_bit(0, &dev->context_flag)) {
213 DRM_ERROR("Reentering -- FIXME\n");
217 #if DRM_DMA_HISTOGRAM
218 dev->ctx_start = get_cycles();
221 DRM_DEBUG("Context switch from %d to %d\n", old, new);
223 if (new >= dev->queue_count) {
224 clear_bit(0, &dev->context_flag);
228 if (new == dev->last_context) {
229 clear_bit(0, &dev->context_flag);
233 q = dev->queuelist[new];
234 atomic_inc(&q->use_count);
235 if (atomic_read(&q->use_count) == 1) {
236 atomic_dec(&q->use_count);
237 clear_bit(0, &dev->context_flag);
241 if (drm_flags & DRM_FLAG_NOCTX) {
242 drm_context_switch_complete(dev, new);
244 sprintf(buf, "C %d %d\n", old, new);
245 drm_write_string(dev, buf);
248 atomic_dec(&q->use_count);
253 int drm_context_switch_complete(drm_device_t *dev, int new)
255 drm_device_dma_t *dma = dev->dma;
257 dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
258 dev->last_switch = jiffies;
260 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
261 DRM_ERROR("Lock isn't held after context switch\n");
264 if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
265 if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
266 DRM_KERNEL_CONTEXT)) {
267 DRM_ERROR("Cannot free lock\n");
271 #if DRM_DMA_HISTOGRAM
272 atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
276 clear_bit(0, &dev->context_flag);
277 wake_up_interruptible(&dev->context_wait);
282 void drm_clear_next_buffer(drm_device_t *dev)
284 drm_device_dma_t *dma = dev->dma;
286 dma->next_buffer = NULL;
287 if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
288 wake_up_interruptible(&dma->next_queue->flush_queue);
290 dma->next_queue = NULL;
294 int drm_select_queue(drm_device_t *dev, void (*wrapper)(unsigned long))
301 DRM_ERROR("No device\n");
304 if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
305 /* This only happens between the time the
306 interrupt is initialized and the time
307 the queues are initialized. */
311 /* Doing "while locked" DMA? */
312 if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
313 return DRM_KERNEL_CONTEXT;
316 /* If there are buffers on the last_context
317 queue, and we have not been executing
318 this context very long, continue to
319 execute this context. */
320 if (dev->last_switch <= j
321 && dev->last_switch + DRM_TIME_SLICE > j
322 && DRM_WAITCOUNT(dev, dev->last_context)) {
323 return dev->last_context;
326 /* Otherwise, find a candidate */
327 for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
328 if (DRM_WAITCOUNT(dev, i)) {
329 candidate = dev->last_checked = i;
335 for (i = 0; i < dev->queue_count; i++) {
336 if (DRM_WAITCOUNT(dev, i)) {
337 candidate = dev->last_checked = i;
345 && candidate != dev->last_context
346 && dev->last_switch <= j
347 && dev->last_switch + DRM_TIME_SLICE > j) {
348 if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
349 del_timer(&dev->timer);
350 dev->timer.function = wrapper;
351 dev->timer.data = (unsigned long)dev;
352 dev->timer.expires = dev->last_switch+DRM_TIME_SLICE;
353 add_timer(&dev->timer);
362 int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *d)
368 int while_locked = 0;
369 drm_device_dma_t *dma = dev->dma;
370 DECLARE_WAITQUEUE(entry, current);
372 DRM_DEBUG("%d\n", d->send_count);
374 if (d->flags & _DRM_DMA_WHILE_LOCKED) {
375 int context = dev->lock.hw_lock->lock;
377 if (!_DRM_LOCK_IS_HELD(context)) {
378 DRM_ERROR("No lock held during \"while locked\""
382 if (d->context != _DRM_LOCKING_CONTEXT(context)
383 && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
384 DRM_ERROR("Lock held by %d while %d makes"
385 " \"while locked\" request\n",
386 _DRM_LOCKING_CONTEXT(context),
390 q = dev->queuelist[DRM_KERNEL_CONTEXT];
393 q = dev->queuelist[d->context];
397 atomic_inc(&q->use_count);
398 if (atomic_read(&q->block_write)) {
399 current->state = TASK_INTERRUPTIBLE;
400 add_wait_queue(&q->write_queue, &entry);
401 atomic_inc(&q->block_count);
403 if (!atomic_read(&q->block_write)) break;
405 if (signal_pending(current)) {
406 atomic_dec(&q->use_count);
410 atomic_dec(&q->block_count);
411 current->state = TASK_RUNNING;
412 remove_wait_queue(&q->write_queue, &entry);
415 for (i = 0; i < d->send_count; i++) {
416 idx = d->send_indices[i];
417 if (idx < 0 || idx >= dma->buf_count) {
418 atomic_dec(&q->use_count);
419 DRM_ERROR("Index %d (of %d max)\n",
420 d->send_indices[i], dma->buf_count - 1);
423 buf = dma->buflist[ idx ];
424 if (buf->pid != current->pid) {
425 atomic_dec(&q->use_count);
426 DRM_ERROR("Process %d using buffer owned by %d\n",
427 current->pid, buf->pid);
430 if (buf->list != DRM_LIST_NONE) {
431 atomic_dec(&q->use_count);
432 DRM_ERROR("Process %d using buffer %d on list %d\n",
433 current->pid, buf->idx, buf->list);
435 buf->used = d->send_sizes[i];
436 buf->while_locked = while_locked;
437 buf->context = d->context;
439 DRM_ERROR("Queueing 0 length buffer\n");
442 atomic_dec(&q->use_count);
443 DRM_ERROR("Queueing pending buffer:"
444 " buffer %d, offset %d\n",
445 d->send_indices[i], i);
449 atomic_dec(&q->use_count);
450 DRM_ERROR("Queueing waiting buffer:"
451 " buffer %d, offset %d\n",
452 d->send_indices[i], i);
456 if (atomic_read(&q->use_count) == 1
457 || atomic_read(&q->finalization)) {
458 drm_free_buffer(dev, buf);
460 drm_waitlist_put(&q->waitlist, buf);
461 atomic_inc(&q->total_queued);
464 atomic_dec(&q->use_count);
469 static int drm_dma_get_buffers_of_order(drm_device_t *dev, drm_dma_t *d,
474 drm_device_dma_t *dma = dev->dma;
476 for (i = d->granted_count; i < d->request_count; i++) {
477 buf = drm_freelist_get(&dma->bufs[order].freelist,
478 d->flags & _DRM_DMA_WAIT);
480 if (buf->pending || buf->waiting) {
481 DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
487 buf->pid = current->pid;
488 copy_to_user_ret(&d->request_indices[i],
492 copy_to_user_ret(&d->request_sizes[i],
502 int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma)
508 order = drm_order(dma->request_size);
510 dma->granted_count = 0;
511 retcode = drm_dma_get_buffers_of_order(dev, dma, order);
513 if (dma->granted_count < dma->request_count
514 && (dma->flags & _DRM_DMA_SMALLER_OK)) {
515 for (tmp_order = order - 1;
517 && dma->granted_count < dma->request_count
518 && tmp_order >= DRM_MIN_ORDER;
521 retcode = drm_dma_get_buffers_of_order(dev, dma,
526 if (dma->granted_count < dma->request_count
527 && (dma->flags & _DRM_DMA_LARGER_OK)) {
528 for (tmp_order = order + 1;
530 && dma->granted_count < dma->request_count
531 && tmp_order <= DRM_MAX_ORDER;
534 retcode = drm_dma_get_buffers_of_order(dev, dma,