Merge branch 'master' into vblank-rework, including mach64 support
[platform/upstream/libdrm.git] / linux-core / i915_buffer.c
1 /**************************************************************************
2  *
3  * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  *
26  *
27  **************************************************************************/
28 /*
29  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
30  */
31
32 #include "drmP.h"
33 #include "i915_drm.h"
34 #include "i915_drv.h"
35
36 struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)
37 {
38         return drm_agp_init_ttm(dev);
39 }
40
41 int i915_fence_type(struct drm_buffer_object *bo,
42                      uint32_t *fclass,
43                      uint32_t *type)
44 {
45         if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
46                 *type = 3;
47         else
48                 *type = 1;
49         return 0;
50 }
51
52 int i915_invalidate_caches(struct drm_device *dev, uint64_t flags)
53 {
54         /*
55          * FIXME: Only emit once per batchbuffer submission.
56          */
57
58         uint32_t flush_cmd = MI_NO_WRITE_FLUSH;
59
60         if (flags & DRM_BO_FLAG_READ)
61                 flush_cmd |= MI_READ_FLUSH;
62         if (flags & DRM_BO_FLAG_EXE)
63                 flush_cmd |= MI_EXE_FLUSH;
64
65         return i915_emit_mi_flush(dev, flush_cmd);
66 }
67
68 int i915_init_mem_type(struct drm_device *dev, uint32_t type,
69                        struct drm_mem_type_manager *man)
70 {
71         switch (type) {
72         case DRM_BO_MEM_LOCAL:
73                 man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
74                     _DRM_FLAG_MEMTYPE_CACHED;
75                 man->drm_bus_maptype = 0;
76                 man->gpu_offset = 0;
77                 break;
78         case DRM_BO_MEM_TT:
79                 if (!(drm_core_has_AGP(dev) && dev->agp)) {
80                         DRM_ERROR("AGP is not enabled for memory type %u\n",
81                                   (unsigned)type);
82                         return -EINVAL;
83                 }
84                 man->io_offset = dev->agp->agp_info.aper_base;
85                 man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
86                 man->io_addr = NULL;
87                 man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
88                     _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
89                 man->drm_bus_maptype = _DRM_AGP;
90                 man->gpu_offset = 0;
91                 break;
92         case DRM_BO_MEM_PRIV0:
93                 if (!(drm_core_has_AGP(dev) && dev->agp)) {
94                         DRM_ERROR("AGP is not enabled for memory type %u\n",
95                                   (unsigned)type);
96                         return -EINVAL;
97                 }
98                 man->io_offset = dev->agp->agp_info.aper_base;
99                 man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
100                 man->io_addr = NULL;
101                 man->flags =  _DRM_FLAG_MEMTYPE_MAPPABLE |
102                     _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
103                 man->drm_bus_maptype = _DRM_AGP;
104                 man->gpu_offset = 0;
105                 break;
106         default:
107                 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
108                 return -EINVAL;
109         }
110         return 0;
111 }
112
113 /*
114  * i915_evict_flags:
115  *
116  * @bo: the buffer object to be evicted
117  *
118  * Return the bo flags for a buffer which is not mapped to the hardware.
119  * These will be placed in proposed_flags so that when the move is
120  * finished, they'll end up in bo->mem.flags
121  */
122 uint64_t i915_evict_flags(struct drm_buffer_object *bo)
123 {
124         switch (bo->mem.mem_type) {
125         case DRM_BO_MEM_LOCAL:
126         case DRM_BO_MEM_TT:
127                 return DRM_BO_FLAG_MEM_LOCAL;
128         default:
129                 return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
130         }
131 }
132
133 #if 0 /* See comment below */
134
135 static void i915_emit_copy_blit(struct drm_device * dev,
136                                 uint32_t src_offset,
137                                 uint32_t dst_offset,
138                                 uint32_t pages, int direction)
139 {
140         uint32_t cur_pages;
141         uint32_t stride = PAGE_SIZE;
142         drm_i915_private_t *dev_priv = dev->dev_private;
143         RING_LOCALS;
144
145         if (!dev_priv)
146                 return;
147
148         i915_kernel_lost_context(dev);
149         while (pages > 0) {
150                 cur_pages = pages;
151                 if (cur_pages > 2048)
152                         cur_pages = 2048;
153                 pages -= cur_pages;
154
155                 BEGIN_LP_RING(6);
156                 OUT_RING(SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
157                          XY_SRC_COPY_BLT_WRITE_RGB);
158                 OUT_RING((stride & 0xffff) | (0xcc << 16) | (1 << 24) |
159                          (1 << 25) | (direction ? (1 << 30) : 0));
160                 OUT_RING((cur_pages << 16) | PAGE_SIZE);
161                 OUT_RING(dst_offset);
162                 OUT_RING(stride & 0xffff);
163                 OUT_RING(src_offset);
164                 ADVANCE_LP_RING();
165         }
166         return;
167 }
168
169 static int i915_move_blit(struct drm_buffer_object * bo,
170                           int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
171 {
172         struct drm_bo_mem_reg *old_mem = &bo->mem;
173         int dir = 0;
174
175         if ((old_mem->mem_type == new_mem->mem_type) &&
176             (new_mem->mm_node->start <
177              old_mem->mm_node->start + old_mem->mm_node->size)) {
178                 dir = 1;
179         }
180
181         i915_emit_copy_blit(bo->dev,
182                             old_mem->mm_node->start << PAGE_SHIFT,
183                             new_mem->mm_node->start << PAGE_SHIFT,
184                             new_mem->num_pages, dir);
185
186         i915_emit_mi_flush(bo->dev, MI_READ_FLUSH | MI_EXE_FLUSH);
187
188         return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
189                                          DRM_FENCE_TYPE_EXE |
190                                          DRM_I915_FENCE_TYPE_RW,
191                                          DRM_I915_FENCE_FLAG_FLUSHED, new_mem);
192 }
193
194 /*
195  * Flip destination ttm into cached-coherent AGP,
196  * then blit and subsequently move out again.
197  */
198
199 static int i915_move_flip(struct drm_buffer_object * bo,
200                           int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
201 {
202         struct drm_device *dev = bo->dev;
203         struct drm_bo_mem_reg tmp_mem;
204         int ret;
205
206         tmp_mem = *new_mem;
207         tmp_mem.mm_node = NULL;
208         tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
209             DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
210
211         ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
212         if (ret)
213                 return ret;
214
215         ret = drm_bind_ttm(bo->ttm, &tmp_mem);
216         if (ret)
217                 goto out_cleanup;
218
219         ret = i915_move_blit(bo, 1, no_wait, &tmp_mem);
220         if (ret)
221                 goto out_cleanup;
222
223         ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
224 out_cleanup:
225         if (tmp_mem.mm_node) {
226                 mutex_lock(&dev->struct_mutex);
227                 if (tmp_mem.mm_node != bo->pinned_node)
228                         drm_mm_put_block(tmp_mem.mm_node);
229                 tmp_mem.mm_node = NULL;
230                 mutex_unlock(&dev->struct_mutex);
231         }
232         return ret;
233 }
234
235 #endif
236
237 /*
238  * Disable i915_move_flip for now, since we can't guarantee that the hardware
239  * lock is held here. To re-enable we need to make sure either
240  * a) The X server is using DRM to submit commands to the ring, or
241  * b) DRM can use the HP ring for these blits. This means i915 needs to
242  *    implement a new ring submission mechanism and fence class.
243  */
244 int i915_move(struct drm_buffer_object *bo,
245               int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
246 {
247         struct drm_bo_mem_reg *old_mem = &bo->mem;
248
249         if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
250                 return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
251         } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
252                 if (0) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
253                         return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
254         } else {
255                 if (0) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
256                         return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
257         }
258         return 0;
259 }
260
261 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
262 static inline void clflush(volatile void *__p)
263 {
264         asm volatile("clflush %0" : "+m" (*(char __force *)__p));
265 }
266 #endif
267
268 static inline void drm_cache_flush_addr(void *virt)
269 {
270         int i;
271
272         for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
273                 clflush(virt+i);
274 }
275
276 static inline void drm_cache_flush_page(struct page *p)
277 {
278         drm_cache_flush_addr(page_address(p));
279 }
280
281 void i915_flush_ttm(struct drm_ttm *ttm)
282 {
283         int i;
284
285         if (!ttm)
286                 return;
287
288         DRM_MEMORYBARRIER();
289
290 #ifdef CONFIG_X86_32
291         /* Hopefully nobody has built an x86-64 processor without clflush */
292         if (!cpu_has_clflush) {
293                 wbinvd();
294                 DRM_MEMORYBARRIER();
295                 return;
296         }
297 #endif
298
299         for (i = ttm->num_pages - 1; i >= 0; i--)
300                 drm_cache_flush_page(drm_ttm_get_page(ttm, i));
301
302         DRM_MEMORYBARRIER();
303 }