1 /**************************************************************************
3 * Copyright 2007 Dave Airlie
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
27 **************************************************************************/
29 * Authors: Dave Airlie <airlied@linux.ie>
33 #include "radeon_drm.h"
34 #include "radeon_drv.h"
36 struct drm_ttm_backend *radeon_create_ttm_backend_entry(struct drm_device * dev)
38 drm_radeon_private_t *dev_priv = dev->dev_private;
40 if(dev_priv->flags & RADEON_IS_AGP)
41 return drm_agp_init_ttm(dev);
43 return ati_pcigart_init_ttm(dev, &dev_priv->gart_info, radeon_gart_flush);
46 int radeon_fence_types(struct drm_buffer_object *bo, uint32_t * class, uint32_t * type)
53 int radeon_invalidate_caches(struct drm_device * dev, uint64_t flags)
55 drm_radeon_private_t *dev_priv = dev->dev_private;
58 if (!dev_priv->cp_running)
63 RADEON_FLUSH_ZCACHE();
69 int radeon_init_mem_type(struct drm_device * dev, uint32_t type,
70 struct drm_mem_type_manager * man)
72 drm_radeon_private_t *dev_priv = dev->dev_private;
75 case DRM_BO_MEM_LOCAL:
76 man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
77 _DRM_FLAG_MEMTYPE_CACHED;
78 man->drm_bus_maptype = 0;
81 man->flags = _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
83 man->drm_bus_maptype = _DRM_FRAME_BUFFER;
84 man->io_offset = drm_get_resource_start(dev, 0);
85 man->io_size = drm_get_resource_len(dev, 0);
88 if (dev_priv->flags & RADEON_IS_AGP) {
89 if (!(drm_core_has_AGP(dev) && dev->agp)) {
90 DRM_ERROR("AGP is not enabled for memory type %u\n",
94 man->io_offset = dev->agp->agp_info.aper_base;
95 man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
97 man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
98 _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
99 man->drm_bus_maptype = _DRM_AGP;
101 man->io_offset = dev_priv->gart_vm_start;
102 man->io_size = dev_priv->gart_size;
104 man->flags = _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CMA;
105 man->drm_bus_maptype = _DRM_SCATTER_GATHER;
109 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
115 static void radeon_emit_copy_blit(struct drm_device * dev,
118 uint32_t pages, int direction)
121 uint32_t stride = PAGE_SIZE;
122 drm_radeon_private_t *dev_priv = dev->dev_private;
123 uint32_t format, height;
129 /* 32-bit copy format */
130 format = RADEON_COLOR_FORMAT_ARGB8888;
132 /* radeon limited to 16k stride */
136 if (cur_pages > 2048)
140 /* needs verification */
142 OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
143 OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
144 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
145 RADEON_GMC_BRUSH_NONE |
147 RADEON_GMC_SRC_DATATYPE_COLOR |
149 RADEON_DP_SRC_SOURCE_MEMORY |
150 RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
152 OUT_RING((stride << 22) | (src_offset >> 10));
153 OUT_RING((stride << 22) | (dst_offset >> 10));
155 OUT_RING((stride << 22) | (dst_offset >> 10));
156 OUT_RING((stride << 22) | (src_offset >> 10));
159 OUT_RING(pages); /* x - y */
160 OUT_RING((stride << 16) | cur_pages);
165 RADEON_WAIT_UNTIL_2D_IDLE();
171 static int radeon_move_blit(struct drm_buffer_object * bo,
172 int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
174 struct drm_bo_mem_reg *old_mem = &bo->mem;
177 if ((old_mem->mem_type == new_mem->mem_type) &&
178 (new_mem->mm_node->start <
179 old_mem->mm_node->start + old_mem->mm_node->size)) {
183 radeon_emit_copy_blit(bo->dev,
184 old_mem->mm_node->start << PAGE_SHIFT,
185 new_mem->mm_node->start << PAGE_SHIFT,
186 new_mem->num_pages, dir);
189 return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
190 DRM_FENCE_TYPE_EXE, 0,
194 static int radeon_move_flip(struct drm_buffer_object * bo,
195 int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
197 struct drm_device *dev = bo->dev;
198 struct drm_bo_mem_reg tmp_mem;
202 tmp_mem.mm_node = NULL;
203 // tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
204 // DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
206 ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
210 ret = drm_ttm_bind(bo->ttm, &tmp_mem);
214 ret = radeon_move_blit(bo, 1, no_wait, &tmp_mem);
218 ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
220 if (tmp_mem.mm_node) {
221 mutex_lock(&dev->struct_mutex);
222 if (tmp_mem.mm_node != bo->pinned_node)
223 drm_memrange_put_block(tmp_mem.mm_node);
224 tmp_mem.mm_node = NULL;
225 mutex_unlock(&dev->struct_mutex);
230 int radeon_move(struct drm_buffer_object * bo,
231 int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
233 struct drm_bo_mem_reg *old_mem = &bo->mem;
235 return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
238 if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
239 return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
240 } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
241 if (radeon_move_flip(bo, evict, no_wait, new_mem))
242 return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
244 if (radeon_move_blit(bo, evict, no_wait, new_mem))
245 return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
255 * @bo: the buffer object to be evicted
257 * Return the bo flags for a buffer which is not mapped to the hardware.
258 * These will be placed in proposed_flags so that when the move is
259 * finished, they'll end up in bo->mem.flags
261 uint64_t radeon_evict_flags(struct drm_buffer_object *bo)
263 switch (bo->mem.mem_type) {
264 case DRM_BO_MEM_LOCAL:
266 return DRM_BO_FLAG_MEM_LOCAL;
268 return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;