Cleanup and fix support for pinned buffers.
[platform/upstream/libdrm.git] / linux-core / i915_buffer.c
1 /**************************************************************************
2  * 
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
4  * All Rights Reserved.
5  * 
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  * 
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  * 
26  * 
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30  */
31
32 #include "drmP.h"
33 #include "i915_drm.h"
34 #include "i915_drv.h"
35
36
37 drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
38 {
39         return drm_agp_init_ttm(dev, NULL);
40 }
41
42 int i915_fence_types(uint32_t buffer_flags, uint32_t * class, uint32_t * type)
43 {
44         *class = 0;
45         if (buffer_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
46                 *type = 3;
47         else
48                 *type = 1;
49         return 0;
50 }
51
52 int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
53 {
54         /*
55          * FIXME: Only emit once per batchbuffer submission.
56          */
57
58         uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
59
60         if (flags & DRM_BO_FLAG_READ)
61                 flush_cmd |= MI_READ_FLUSH;
62         if (flags & DRM_BO_FLAG_EXE)
63                 flush_cmd |= MI_EXE_FLUSH;
64
65         return i915_emit_mi_flush(dev, flush_cmd);
66 }
67
68 int i915_init_mem_type(drm_device_t *dev, uint32_t type, 
69                        drm_mem_type_manager_t *man)
70 {
71         switch(type) {
72         case DRM_BO_MEM_LOCAL:
73                 man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
74                         _DRM_FLAG_MEMTYPE_CACHED;
75                 break;
76         case DRM_BO_MEM_TT:
77                 if (!(drm_core_has_AGP(dev) && dev->agp)) {
78                         DRM_ERROR("AGP is not enabled for memory type %u\n", 
79                                   (unsigned) type);
80                         return -EINVAL;
81                 }
82                 man->io_offset = dev->agp->agp_info.aper_base;
83                 man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
84                 man->io_addr = NULL;
85                 man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
86                         _DRM_FLAG_MEMTYPE_CSELECT |
87                         _DRM_FLAG_NEEDS_IOREMAP;
88                 break;
89         case DRM_BO_MEM_PRIV0:
90                 if (!(drm_core_has_AGP(dev) && dev->agp)) {
91                         DRM_ERROR("AGP is not enabled for memory type %u\n", 
92                                   (unsigned) type);
93                         return -EINVAL;
94                 }
95                 man->io_offset = dev->agp->agp_info.aper_base;
96                 man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
97                 man->io_addr = NULL;
98                 man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
99                         _DRM_FLAG_MEMTYPE_FIXED |
100                         _DRM_FLAG_NEEDS_IOREMAP;
101
102                 break;
103         default:
104                 DRM_ERROR("Unsupported memory type %u\n", (unsigned) type);
105                 return -EINVAL;
106         }
107         return 0;
108 }
109
110 uint32_t i915_evict_flags(drm_device_t *dev, uint32_t type)
111 {
112         switch(type) {
113         case DRM_BO_MEM_LOCAL:
114         case DRM_BO_MEM_TT:
115                 return DRM_BO_FLAG_MEM_LOCAL;
116         default:
117                 return DRM_BO_FLAG_MEM_TT;
118         }
119 }
120
121 static void i915_emit_copy_blit(drm_device_t *dev,
122                          uint32_t src_offset,
123                          uint32_t dst_offset,
124                          uint32_t pages,
125                          int direction)
126 {
127         uint32_t cur_pages;
128         uint32_t stride = PAGE_SIZE;
129         drm_i915_private_t *dev_priv = dev->dev_private;
130         RING_LOCALS;
131
132         if (!dev_priv)
133                 return;
134         
135         i915_kernel_lost_context(dev);
136         while(pages > 0) {
137                 cur_pages = pages;
138                 if (cur_pages > 2048)
139                         cur_pages = 2048;
140                 pages -= cur_pages;
141
142                 BEGIN_LP_RING(6);
143                 OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
144                          XY_SRC_COPY_BLT_WRITE_RGB);
145                 OUT_RING((stride & 0xffff) | ( 0xcc << 16) | (1 << 24) | 
146                          (1 << 25) | (direction ? (1 << 30) : 0));
147                 OUT_RING((cur_pages << 16) | PAGE_SIZE);
148                 OUT_RING(dst_offset);
149                 OUT_RING(stride & 0xffff);
150                 OUT_RING(src_offset);
151                 ADVANCE_LP_RING();
152         }
153         return;
154 }
155
156 static int i915_move_blit(drm_buffer_object_t *bo,
157                           int evict,
158                           int no_wait,
159                           drm_bo_mem_reg_t *new_mem)
160 {
161         drm_bo_mem_reg_t *old_mem = &bo->mem;
162         int dir = 0;
163
164         if ((old_mem->mem_type == new_mem->mem_type) && 
165             (new_mem->mm_node->start < 
166              old_mem->mm_node->start +  old_mem->mm_node->size)) {
167                 dir = 1;
168         }
169
170         i915_emit_copy_blit(bo->dev,
171                             old_mem->mm_node->start << PAGE_SHIFT,
172                             new_mem->mm_node->start << PAGE_SHIFT,
173                             new_mem->num_pages,
174                             dir);
175
176         i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH);
177
178         return drm_bo_move_accel_cleanup(bo, evict, no_wait,
179                                          DRM_FENCE_TYPE_EXE |
180                                          DRM_I915_FENCE_TYPE_RW, 
181                                          DRM_I915_FENCE_FLAG_FLUSHED, 
182                                          new_mem);
183 }
184
185 /*
186  * Flip destination ttm into cached-coherent AGP, 
187  * then blit and subsequently move out again.
188  */
189
190
191 static int i915_move_flip(drm_buffer_object_t *bo,
192                           int evict,
193                           int no_wait,
194                           drm_bo_mem_reg_t *new_mem)
195 {
196         drm_device_t *dev = bo->dev;
197         drm_bo_mem_reg_t tmp_mem;
198         int ret;
199
200         tmp_mem = *new_mem;
201         tmp_mem.mm_node = NULL;
202         tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
203                 DRM_BO_FLAG_CACHED  |
204                 DRM_BO_FLAG_FORCE_CACHING;
205         
206         ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
207         if (ret) 
208                 return ret;
209         
210         ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start);
211         if (ret) 
212                 goto out_cleanup;
213
214         ret = i915_move_blit(bo, 1, no_wait, &tmp_mem);
215         if (ret) 
216                 goto out_cleanup;
217         
218         ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
219 out_cleanup:
220         if (tmp_mem.mm_node) {
221                 mutex_lock(&dev->struct_mutex);
222                 drm_mm_put_block(tmp_mem.mm_node);
223                 tmp_mem.mm_node = NULL;
224                 mutex_unlock(&dev->struct_mutex);
225         }
226         return ret;
227 }
228
229         
230 int i915_move(drm_buffer_object_t *bo,
231               int evict,
232               int no_wait,
233               drm_bo_mem_reg_t *new_mem)
234 {
235         drm_bo_mem_reg_t *old_mem = &bo->mem;
236
237         if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
238                 return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
239         } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
240                 if (i915_move_flip(bo, evict, no_wait, new_mem)) 
241                         return drm_bo_move_memcpy(bo, evict, no_wait, 
242                                                   new_mem);
243         } else {
244                 if (i915_move_blit(bo, evict, no_wait, new_mem))
245                         return drm_bo_move_memcpy(bo, evict, no_wait, 
246                                                   new_mem);
247         }
248         return 0;
249 }