Merged mga branch with trunk
[platform/upstream/libdrm.git] / linux / dma.c
1 /* dma.c -- DMA IOCTL and function support -*- linux-c -*-
2  * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3  * Revised: Sun Feb 13 23:19:45 2000 by kevin@precisioninsight.com
4  *
5  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  * 
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  * 
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  * 
27  * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/dma.c,v 1.5 2000/02/23 04:47:26 martin Exp $
28  *
29  */
30
31 #define __NO_VERSION__
32 #include "drmP.h"
33
34 #include <linux/interrupt.h>    /* For task queue support */
35
36 void drm_dma_setup(drm_device_t *dev)
37 {
38         int i;
39         
40         dev->dma = drm_alloc(sizeof(*dev->dma), DRM_MEM_DRIVER);
41         memset(dev->dma, 0, sizeof(*dev->dma));
42         for (i = 0; i <= DRM_MAX_ORDER; i++)
43                 memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
44 }
45
46 void drm_dma_takedown(drm_device_t *dev)
47 {
48         drm_device_dma_t  *dma = dev->dma;
49         int               i, j;
50
51         if (!dma) return;
52         
53                                 /* Clear dma buffers */
54         for (i = 0; i <= DRM_MAX_ORDER; i++) {
55                 if (dma->bufs[i].seg_count) {
56                         DRM_DEBUG("order %d: buf_count = %d,"
57                                   " seg_count = %d\n",
58                                   i,
59                                   dma->bufs[i].buf_count,
60                                   dma->bufs[i].seg_count);
61                         for (j = 0; j < dma->bufs[i].seg_count; j++) {
62                                 drm_free_pages(dma->bufs[i].seglist[j],
63                                                dma->bufs[i].page_order,
64                                                DRM_MEM_DMA);
65                         }
66                         drm_free(dma->bufs[i].seglist,
67                                  dma->bufs[i].seg_count
68                                  * sizeof(*dma->bufs[0].seglist),
69                                  DRM_MEM_SEGS);
70                 }
71                 if(dma->bufs[i].buf_count) {
72                         for(j = 0; j < dma->bufs[i].buf_count; j++) {
73                            if(dma->bufs[i].buflist[j].dev_private) {
74                               drm_free(dma->bufs[i].buflist[j].dev_private,
75                                        dma->bufs[i].buflist[j].dev_priv_size,
76                                        DRM_MEM_BUFS);
77                            }
78                         }
79                         drm_free(dma->bufs[i].buflist,
80                                  dma->bufs[i].buf_count *
81                                  sizeof(*dma->bufs[0].buflist),
82                                  DRM_MEM_BUFS);
83                         drm_freelist_destroy(&dma->bufs[i].freelist);
84                 }
85         }
86         
87         if (dma->buflist) {
88                 drm_free(dma->buflist,
89                          dma->buf_count * sizeof(*dma->buflist),
90                          DRM_MEM_BUFS);
91         }
92
93         if (dma->pagelist) {
94                 drm_free(dma->pagelist,
95                          dma->page_count * sizeof(*dma->pagelist),
96                          DRM_MEM_PAGES);
97         }
98         drm_free(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
99         dev->dma = NULL;
100 }
101
102 #if DRM_DMA_HISTOGRAM
103 /* This is slow, but is useful for debugging. */
104 int drm_histogram_slot(unsigned long count)
105 {
106         int value = DRM_DMA_HISTOGRAM_INITIAL;
107         int slot;
108
109         for (slot = 0;
110              slot < DRM_DMA_HISTOGRAM_SLOTS;
111              ++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
112                 if (count < value) return slot;
113         }
114         return DRM_DMA_HISTOGRAM_SLOTS - 1;
115 }
116
117 void drm_histogram_compute(drm_device_t *dev, drm_buf_t *buf)
118 {
119         cycles_t queued_to_dispatched;
120         cycles_t dispatched_to_completed;
121         cycles_t completed_to_freed;
122         int      q2d, d2c, c2f, q2c, q2f;
123         
124         if (buf->time_queued) {
125                 queued_to_dispatched    = (buf->time_dispatched
126                                            - buf->time_queued);
127                 dispatched_to_completed = (buf->time_completed
128                                            - buf->time_dispatched);
129                 completed_to_freed      = (buf->time_freed
130                                            - buf->time_completed);
131
132                 q2d = drm_histogram_slot(queued_to_dispatched);
133                 d2c = drm_histogram_slot(dispatched_to_completed);
134                 c2f = drm_histogram_slot(completed_to_freed);
135
136                 q2c = drm_histogram_slot(queued_to_dispatched
137                                          + dispatched_to_completed);
138                 q2f = drm_histogram_slot(queued_to_dispatched
139                                          + dispatched_to_completed
140                                          + completed_to_freed);
141                 
142                 atomic_inc(&dev->histo.total);
143                 atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
144                 atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
145                 atomic_inc(&dev->histo.completed_to_freed[c2f]);
146                 
147                 atomic_inc(&dev->histo.queued_to_completed[q2c]);
148                 atomic_inc(&dev->histo.queued_to_freed[q2f]);
149
150         }
151         buf->time_queued     = 0;
152         buf->time_dispatched = 0;
153         buf->time_completed  = 0;
154         buf->time_freed      = 0;
155 }
156 #endif
157
158 void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
159 {
160         drm_device_dma_t *dma = dev->dma;
161
162         if (!buf) return;
163         
164         buf->waiting  = 0;
165         buf->pending  = 0;
166         buf->pid      = 0;
167         buf->used     = 0;
168 #if DRM_DMA_HISTOGRAM
169         buf->time_completed = get_cycles();
170 #endif
171         if (waitqueue_active(&buf->dma_wait)) {
172                 wake_up_interruptible(&buf->dma_wait);
173         } else {
174                                 /* If processes are waiting, the last one
175                                    to wake will put the buffer on the free
176                                    list.  If no processes are waiting, we
177                                    put the buffer on the freelist here. */
178                 drm_freelist_put(dev, &dma->bufs[buf->order].freelist, buf);
179         }
180 }
181
182 void drm_reclaim_buffers(drm_device_t *dev, pid_t pid)
183 {
184         drm_device_dma_t *dma = dev->dma;
185         int              i;
186
187         if (!dma) return;
188         for (i = 0; i < dma->buf_count; i++) {
189                 if (dma->buflist[i]->pid == pid) {
190                         switch (dma->buflist[i]->list) {
191                         case DRM_LIST_NONE:
192                                 drm_free_buffer(dev, dma->buflist[i]);
193                                 break;
194                         case DRM_LIST_WAIT:
195                                 dma->buflist[i]->list = DRM_LIST_RECLAIM;
196                                 break;
197                         default:
198                                 /* Buffer already on hardware. */
199                                 break;
200                         }
201                 }
202         }
203 }
204
205 int drm_context_switch(drm_device_t *dev, int old, int new)
206 {
207         char        buf[64];
208         drm_queue_t *q;
209
210         atomic_inc(&dev->total_ctx);
211
212         if (test_and_set_bit(0, &dev->context_flag)) {
213                 DRM_ERROR("Reentering -- FIXME\n");
214                 return -EBUSY;
215         }
216
217 #if DRM_DMA_HISTOGRAM
218         dev->ctx_start = get_cycles();
219 #endif
220         
221         DRM_DEBUG("Context switch from %d to %d\n", old, new);
222
223         if (new >= dev->queue_count) {
224                 clear_bit(0, &dev->context_flag);
225                 return -EINVAL;
226         }
227
228         if (new == dev->last_context) {
229                 clear_bit(0, &dev->context_flag);
230                 return 0;
231         }
232         
233         q = dev->queuelist[new];
234         atomic_inc(&q->use_count);
235         if (atomic_read(&q->use_count) == 1) {
236                 atomic_dec(&q->use_count);
237                 clear_bit(0, &dev->context_flag);
238                 return -EINVAL;
239         }
240
241         if (drm_flags & DRM_FLAG_NOCTX) {
242                 drm_context_switch_complete(dev, new);
243         } else {
244                 sprintf(buf, "C %d %d\n", old, new);
245                 drm_write_string(dev, buf);
246         }
247         
248         atomic_dec(&q->use_count);
249         
250         return 0;
251 }
252
253 int drm_context_switch_complete(drm_device_t *dev, int new)
254 {
255         drm_device_dma_t *dma = dev->dma;
256         
257         dev->last_context = new;  /* PRE/POST: This is the _only_ writer. */
258         dev->last_switch  = jiffies;
259         
260         if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
261                 DRM_ERROR("Lock isn't held after context switch\n");
262         }
263
264         if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
265                 if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
266                                   DRM_KERNEL_CONTEXT)) {
267                         DRM_ERROR("Cannot free lock\n");
268                 }
269         }
270         
271 #if DRM_DMA_HISTOGRAM
272         atomic_inc(&dev->histo.ctx[drm_histogram_slot(get_cycles()
273                                                       - dev->ctx_start)]);
274                    
275 #endif
276         clear_bit(0, &dev->context_flag);
277         wake_up_interruptible(&dev->context_wait);
278         
279         return 0;
280 }
281
282 void drm_clear_next_buffer(drm_device_t *dev)
283 {
284         drm_device_dma_t *dma = dev->dma;
285         
286         dma->next_buffer = NULL;
287         if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
288                 wake_up_interruptible(&dma->next_queue->flush_queue);
289         }
290         dma->next_queue  = NULL;
291 }
292
293
294 int drm_select_queue(drm_device_t *dev, void (*wrapper)(unsigned long))
295 {
296         int        i;
297         int        candidate = -1;
298         int        j         = jiffies;
299
300         if (!dev) {
301                 DRM_ERROR("No device\n");
302                 return -1;
303         }
304         if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
305                                 /* This only happens between the time the
306                                    interrupt is initialized and the time
307                                    the queues are initialized. */
308                 return -1;
309         }
310
311                                 /* Doing "while locked" DMA? */
312         if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
313                 return DRM_KERNEL_CONTEXT;
314         }
315
316                                 /* If there are buffers on the last_context
317                                    queue, and we have not been executing
318                                    this context very long, continue to
319                                    execute this context. */
320         if (dev->last_switch <= j
321             && dev->last_switch + DRM_TIME_SLICE > j
322             && DRM_WAITCOUNT(dev, dev->last_context)) {
323                 return dev->last_context;
324         }
325
326                                 /* Otherwise, find a candidate */
327         for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
328                 if (DRM_WAITCOUNT(dev, i)) {
329                         candidate = dev->last_checked = i;
330                         break;
331                 }
332         }
333
334         if (candidate < 0) {
335                 for (i = 0; i < dev->queue_count; i++) {
336                         if (DRM_WAITCOUNT(dev, i)) {
337                                 candidate = dev->last_checked = i;
338                                 break;
339                         }
340                 }
341         }
342
343         if (wrapper
344             && candidate >= 0
345             && candidate != dev->last_context
346             && dev->last_switch <= j
347             && dev->last_switch + DRM_TIME_SLICE > j) {
348                 if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
349                         del_timer(&dev->timer);
350                         dev->timer.function = wrapper;
351                         dev->timer.data     = (unsigned long)dev;
352                         dev->timer.expires  = dev->last_switch+DRM_TIME_SLICE;
353                         add_timer(&dev->timer);
354                 }
355                 return -1;
356         }
357
358         return candidate;
359 }
360
361
362 int drm_dma_enqueue(drm_device_t *dev, drm_dma_t *d)
363 {
364         int               i;
365         drm_queue_t       *q;
366         drm_buf_t         *buf;
367         int               idx;
368         int               while_locked = 0;
369         drm_device_dma_t  *dma = dev->dma;
370         DECLARE_WAITQUEUE(entry, current);
371
372         DRM_DEBUG("%d\n", d->send_count);
373
374         if (d->flags & _DRM_DMA_WHILE_LOCKED) {
375                 int context = dev->lock.hw_lock->lock;
376                 
377                 if (!_DRM_LOCK_IS_HELD(context)) {
378                         DRM_ERROR("No lock held during \"while locked\""
379                                   " request\n");
380                         return -EINVAL;
381                 }
382                 if (d->context != _DRM_LOCKING_CONTEXT(context)
383                     && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
384                         DRM_ERROR("Lock held by %d while %d makes"
385                                   " \"while locked\" request\n",
386                                   _DRM_LOCKING_CONTEXT(context),
387                                   d->context);
388                         return -EINVAL;
389                 }
390                 q = dev->queuelist[DRM_KERNEL_CONTEXT];
391                 while_locked = 1;
392         } else {
393                 q = dev->queuelist[d->context];
394         }
395
396
397         atomic_inc(&q->use_count);
398         if (atomic_read(&q->block_write)) {
399                 current->state = TASK_INTERRUPTIBLE;
400                 add_wait_queue(&q->write_queue, &entry);
401                 atomic_inc(&q->block_count);
402                 for (;;) {
403                         if (!atomic_read(&q->block_write)) break;
404                         schedule();
405                         if (signal_pending(current)) {
406                                 atomic_dec(&q->use_count);
407                                 return -EINTR;
408                         }
409                 }
410                 atomic_dec(&q->block_count);
411                 current->state = TASK_RUNNING;
412                 remove_wait_queue(&q->write_queue, &entry);
413         }
414         
415         for (i = 0; i < d->send_count; i++) {
416                 idx = d->send_indices[i];
417                 if (idx < 0 || idx >= dma->buf_count) {
418                         atomic_dec(&q->use_count);
419                         DRM_ERROR("Index %d (of %d max)\n",
420                                   d->send_indices[i], dma->buf_count - 1);
421                         return -EINVAL;
422                 }
423                 buf = dma->buflist[ idx ];
424                 if (buf->pid != current->pid) {
425                         atomic_dec(&q->use_count);
426                         DRM_ERROR("Process %d using buffer owned by %d\n",
427                                   current->pid, buf->pid);
428                         return -EINVAL;
429                 }
430                 if (buf->list != DRM_LIST_NONE) {
431                         atomic_dec(&q->use_count);
432                         DRM_ERROR("Process %d using buffer %d on list %d\n",
433                                   current->pid, buf->idx, buf->list);
434                 }
435                 buf->used         = d->send_sizes[i];
436                 buf->while_locked = while_locked;
437                 buf->context      = d->context;
438                 if (!buf->used) {
439                         DRM_ERROR("Queueing 0 length buffer\n");
440                 }
441                 if (buf->pending) {
442                         atomic_dec(&q->use_count);
443                         DRM_ERROR("Queueing pending buffer:"
444                                   " buffer %d, offset %d\n",
445                                   d->send_indices[i], i);
446                         return -EINVAL;
447                 }
448                 if (buf->waiting) {
449                         atomic_dec(&q->use_count);
450                         DRM_ERROR("Queueing waiting buffer:"
451                                   " buffer %d, offset %d\n",
452                                   d->send_indices[i], i);
453                         return -EINVAL;
454                 }
455                 buf->waiting = 1;
456                 if (atomic_read(&q->use_count) == 1
457                     || atomic_read(&q->finalization)) {
458                         drm_free_buffer(dev, buf);
459                 } else {
460                         drm_waitlist_put(&q->waitlist, buf);
461                         atomic_inc(&q->total_queued);
462                 }
463         }
464         atomic_dec(&q->use_count);
465         
466         return 0;
467 }
468
469 static int drm_dma_get_buffers_of_order(drm_device_t *dev, drm_dma_t *d,
470                                         int order)
471 {
472         int               i;
473         drm_buf_t         *buf;
474         drm_device_dma_t  *dma = dev->dma;
475         
476         for (i = d->granted_count; i < d->request_count; i++) {
477                 buf = drm_freelist_get(&dma->bufs[order].freelist,
478                                        d->flags & _DRM_DMA_WAIT);
479                 if (!buf) break;
480                 if (buf->pending || buf->waiting) {
481                         DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
482                                   buf->idx,
483                                   buf->pid,
484                                   buf->waiting,
485                                   buf->pending);
486                 }
487                 buf->pid     = current->pid;
488                 copy_to_user_ret(&d->request_indices[i],
489                                  &buf->idx,
490                                  sizeof(buf->idx),
491                                  -EFAULT);
492                 copy_to_user_ret(&d->request_sizes[i],
493                                  &buf->total,
494                                  sizeof(buf->total),
495                                  -EFAULT);
496                 ++d->granted_count;
497         }
498         return 0;
499 }
500
501
502 int drm_dma_get_buffers(drm_device_t *dev, drm_dma_t *dma)
503 {
504         int               order;
505         int               retcode = 0;
506         int               tmp_order;
507         
508         order = drm_order(dma->request_size);
509
510         dma->granted_count = 0;
511         retcode            = drm_dma_get_buffers_of_order(dev, dma, order);
512
513         if (dma->granted_count < dma->request_count
514             && (dma->flags & _DRM_DMA_SMALLER_OK)) {
515                 for (tmp_order = order - 1;
516                      !retcode
517                              && dma->granted_count < dma->request_count
518                              && tmp_order >= DRM_MIN_ORDER;
519                      --tmp_order) {
520                         
521                         retcode = drm_dma_get_buffers_of_order(dev, dma,
522                                                                tmp_order);
523                 }
524         }
525
526         if (dma->granted_count < dma->request_count
527             && (dma->flags & _DRM_DMA_LARGER_OK)) {
528                 for (tmp_order = order + 1;
529                      !retcode
530                              && dma->granted_count < dma->request_count
531                              && tmp_order <= DRM_MAX_ORDER;
532                      ++tmp_order) {
533                         
534                         retcode = drm_dma_get_buffers_of_order(dev, dma,
535                                                                tmp_order);
536                 }
537         }
538         return 0;
539 }