mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
- i915_buffer.o
+ i915_buffer.o intel_display.o intel_crt.o intel_lvds.o \
+ intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
- nouveau_object.o nouveau_irq.o \
+ nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
+ nouveau_sgdma.o nouveau_dma.o \
nv04_timer.o \
- nv04_mc.o nv40_mc.o \
+ nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv40_fb.o \
- nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \
- nv40_graph.o
+ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
+ nv04_graph.o nv10_graph.o nv20_graph.o \
+ nv40_graph.o nv50_graph.o \
+ nv04_instmem.o nv50_instmem.o
- radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
+ radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o radeon_fence.o radeon_buffer.o
sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o
savage-objs := savage_drv.o savage_bci.o savage_state.o
return ret;
}
EXPORT_SYMBOL(drm_ati_pcigart_init);
-static int ati_pcigart_bind_ttm(drm_ttm_backend_t *backend,
- unsigned long offset,
- int cached)
+
+ static int ati_pcigart_needs_unbind_cache_adjust(drm_ttm_backend_t *backend)
+ {
+ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
+ }
+
+ static int ati_pcigart_populate(drm_ttm_backend_t *backend,
+ unsigned long num_pages,
+ struct page **pages)
+ {
+ ati_pcigart_ttm_backend_t *atipci_be =
+ container_of(backend, ati_pcigart_ttm_backend_t, backend);
+
+ DRM_ERROR("%ld\n", num_pages);
+ atipci_be->pages = pages;
+ atipci_be->num_pages = num_pages;
+ atipci_be->populated = 1;
+ return 0;
+ }
+
- DRM_ERROR("Offset is %08lX\n", offset);
++static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend,
++ struct drm_bo_mem_reg *bo_mem)
+ {
+ ati_pcigart_ttm_backend_t *atipci_be =
+ container_of(backend, ati_pcigart_ttm_backend_t, backend);
+ off_t j;
+ int i;
+ struct ati_pcigart_info *info = atipci_be->gart_info;
+ u32 *pci_gart;
+ u32 page_base;
++ unsigned long offset = bo_mem->mm_node->start;
+ pci_gart = info->addr;
+
-static drm_ttm_backend_func_t ati_pcigart_ttm_backend =
++ DRM_ERROR("Offset is %08lX\n", bo_mem->mm_node->start);
+ j = offset;
+ while (j < (offset + atipci_be->num_pages)) {
+ if (get_page_base_from_table(info, pci_gart+j))
+ return -EBUSY;
+ j++;
+ }
+
+ for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
+ struct page *cur_page = atipci_be->pages[i];
+ /* write value */
+ page_base = page_to_phys(cur_page);
+ insert_page_into_table(info, page_base, pci_gart + j);
+ }
+
+ #if defined(__i386__) || defined(__x86_64__)
+ wbinvd();
+ #else
+ mb();
+ #endif
+
+ atipci_be->gart_flush_fn(atipci_be->dev);
+
+ atipci_be->bound = 1;
+ atipci_be->offset = offset;
+ /* need to traverse table and add entries */
+ DRM_DEBUG("\n");
+ return 0;
+ }
+
+ static int ati_pcigart_unbind_ttm(drm_ttm_backend_t *backend)
+ {
+ ati_pcigart_ttm_backend_t *atipci_be =
+ container_of(backend, ati_pcigart_ttm_backend_t, backend);
+ struct ati_pcigart_info *info = atipci_be->gart_info;
+ unsigned long offset = atipci_be->offset;
+ int i;
+ off_t j;
+ u32 *pci_gart = info->addr;
+
+ DRM_DEBUG("\n");
+
+ if (atipci_be->bound != 1)
+ return -EINVAL;
+
+ for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
+ *(pci_gart + j) = 0;
+ }
+ atipci_be->gart_flush_fn(atipci_be->dev);
+ atipci_be->bound = 0;
+ atipci_be->offset = 0;
+ return 0;
+ }
+
+ static void ati_pcigart_clear_ttm(drm_ttm_backend_t *backend)
+ {
+ ati_pcigart_ttm_backend_t *atipci_be =
+ container_of(backend, ati_pcigart_ttm_backend_t, backend);
+
+ DRM_DEBUG("\n");
+ if (atipci_be->pages) {
+ backend->func->unbind(backend);
+ atipci_be->pages = NULL;
+
+ }
+ atipci_be->num_pages = 0;
+ }
+
+ static void ati_pcigart_destroy_ttm(drm_ttm_backend_t *backend)
+ {
+ ati_pcigart_ttm_backend_t *atipci_be;
+ if (backend) {
+ DRM_DEBUG("\n");
+ atipci_be = container_of(backend, ati_pcigart_ttm_backend_t, backend);
+ if (atipci_be) {
+ if (atipci_be->pages) {
+ backend->func->clear(backend);
+ }
+ drm_ctl_free(atipci_be, sizeof(*atipci_be), DRM_MEM_TTM);
+ }
+ }
+ }
+
-drm_ttm_backend_t *ati_pcigart_init_ttm(struct drm_device *dev, struct ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev))
++static struct drm_ttm_backend_func ati_pcigart_ttm_backend =
+ {
+ .needs_ub_cache_adjust = ati_pcigart_needs_unbind_cache_adjust,
+ .populate = ati_pcigart_populate,
+ .clear = ati_pcigart_clear_ttm,
+ .bind = ati_pcigart_bind_ttm,
+ .unbind = ati_pcigart_unbind_ttm,
+ .destroy = ati_pcigart_destroy_ttm,
+ };
+
- atipci_be->backend.mem_type = DRM_BO_MEM_TT;
++struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev))
+ {
+ ati_pcigart_ttm_backend_t *atipci_be;
+
+ atipci_be = drm_ctl_calloc(1, sizeof (*atipci_be), DRM_MEM_TTM);
+ if (!atipci_be)
+ return NULL;
+
+ atipci_be->populated = 0;
+ atipci_be->backend.func = &ati_pcigart_ttm_backend;
+ atipci_be->gart_info = info;
+ atipci_be->gart_flush_fn = gart_flush_fn;
+ atipci_be->dev = dev;
+
+ return &atipci_be->backend;
+ }
+ EXPORT_SYMBOL(ati_pcigart_init_ttm);
DRM_AGP_MEM *mem;
struct agp_bridge_data *bridge;
int populated;
-} drm_agp_ttm_backend_t;
+};
#endif
- drm_ttm_backend_t backend;
+ typedef struct ati_pcigart_ttm_backend {
- drm_device_t *dev;
++ struct drm_ttm_backend backend;
+ int populated;
+ void (*gart_flush_fn)(struct drm_device *dev);
+ struct ati_pcigart_info *gart_info;
+ unsigned long offset;
+ struct page **pages;
+ int num_pages;
+ int bound;
++ struct drm_device *dev;
+ } ati_pcigart_ttm_backend_t;
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
struct proc_dir_entry *dev_root);
/* Scatter Gather Support (drm_scatter.h) */
-extern void drm_sg_cleanup(drm_sg_mem_t * entry);
-extern int drm_sg_alloc(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_sg_free(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+extern void drm_sg_cleanup(struct drm_sg_mem * entry);
+extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
+extern int drm_sg_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* ATI PCIGART support (ati_pcigart.h) */
-extern int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info *gart_info);
-extern int drm_ati_pcigart_cleanup(drm_device_t * dev, drm_ati_pcigart_info *gart_info);
-extern drm_ttm_backend_t *ati_pcigart_init_ttm(struct drm_device *dev, struct ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev));
+extern int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info);
+extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info);
++extern struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev));
-extern drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size,
+extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr);
-extern void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah);
-extern void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah);
+extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
+extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
/* sysfs support (drm_sysfs.c) */
struct drm_sysfs_class;
--- /dev/null
-drm_ttm_backend_t *radeon_create_ttm_backend_entry(drm_device_t * dev)
+ /**************************************************************************
+ *
+ * Copyright 2007 Dave Airlie
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+ /*
+ * Authors: Dave Airlie <airlied@linux.ie>
+ */
+
+ #include "drmP.h"
+ #include "radeon_drm.h"
+ #include "radeon_drv.h"
+
-int radeon_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)
++struct drm_ttm_backend *radeon_create_ttm_backend_entry(struct drm_device * dev)
+ {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if(dev_priv->flags & RADEON_IS_AGP)
+ return drm_agp_init_ttm(dev);
+ else
+ return ati_pcigart_init_ttm(dev, &dev_priv->gart_info, radeon_gart_flush);
+ }
+
-int radeon_invalidate_caches(drm_device_t * dev, uint32_t flags)
++int radeon_fence_types(struct drm_buffer_object *bo, uint32_t * class, uint32_t * type)
+ {
+ *class = 0;
+ if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+ *type = 3;
+ else
+ *type = 1;
+ return 0;
+ }
+
-uint32_t radeon_evict_mask(drm_buffer_object_t *bo)
++int radeon_invalidate_caches(struct drm_device * dev, uint64_t flags)
+ {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ BEGIN_RING(4);
+ RADEON_FLUSH_CACHE();
+ RADEON_FLUSH_ZCACHE();
+ ADVANCE_RING();
+ return 0;
+ }
+
-int radeon_init_mem_type(drm_device_t * dev, uint32_t type,
- drm_mem_type_manager_t * man)
++uint32_t radeon_evict_mask(struct drm_buffer_object *bo)
+ {
+ switch (bo->mem.mem_type) {
+ case DRM_BO_MEM_LOCAL:
+ case DRM_BO_MEM_TT:
+ return DRM_BO_FLAG_MEM_LOCAL;
+ case DRM_BO_MEM_VRAM:
+ if (bo->mem.num_pages > 128)
+ return DRM_BO_MEM_TT;
+ else
+ return DRM_BO_MEM_LOCAL;
+ default:
+ return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
+ }
+ }
+
-static void radeon_emit_copy_blit(drm_device_t * dev,
++int radeon_init_mem_type(struct drm_device * dev, uint32_t type,
++ struct drm_mem_type_manager * man)
+ {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ switch (type) {
+ case DRM_BO_MEM_LOCAL:
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_CACHED;
+ man->drm_bus_maptype = 0;
+ break;
+ case DRM_BO_MEM_VRAM:
+ man->flags = _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
+ man->io_addr = NULL;
+ man->drm_bus_maptype = _DRM_FRAME_BUFFER;
+ man->io_offset = drm_get_resource_start(dev, 0);
+ man->io_size = drm_get_resource_len(dev, 0);
+ break;
+ case DRM_BO_MEM_TT:
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ if (!(drm_core_has_AGP(dev) && dev->agp)) {
+ DRM_ERROR("AGP is not enabled for memory type %u\n",
+ (unsigned)type);
+ return -EINVAL;
+ }
+ man->io_offset = dev->agp->agp_info.aper_base;
+ man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
+ man->io_addr = NULL;
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
+ man->drm_bus_maptype = _DRM_AGP;
+ } else {
+ man->io_offset = dev_priv->gart_vm_start;
+ man->io_size = dev_priv->gart_size;
+ man->io_addr = NULL;
+ man->flags = _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CMA;
+ man->drm_bus_maptype = _DRM_SCATTER_GATHER;
+ }
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
-static int radeon_move_blit(drm_buffer_object_t * bo,
- int evict, int no_wait, drm_bo_mem_reg_t *new_mem)
++static void radeon_emit_copy_blit(struct drm_device * dev,
+ uint32_t src_offset,
+ uint32_t dst_offset,
+ uint32_t pages, int direction)
+ {
+ uint32_t cur_pages;
+ uint32_t stride = PAGE_SIZE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ uint32_t format, height;
+ RING_LOCALS;
+
+ if (!dev_priv)
+ return;
+
+ /* 32-bit copy format */
+ format = RADEON_COLOR_FORMAT_ARGB8888;
+
+ /* radeon limited to 16k stride */
+ stride &= 0x3fff;
+ while(pages > 0) {
+ cur_pages = pages;
+ if (cur_pages > 2048)
+ cur_pages = 2048;
+ pages -= cur_pages;
+
+ /* needs verification */
+ BEGIN_RING(7);
+ OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
+ OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+ RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_NONE |
+ (format << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_MEMORY |
+ RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
+ if (direction) {
+ OUT_RING((stride << 22) | (src_offset >> 10));
+ OUT_RING((stride << 22) | (dst_offset >> 10));
+ } else {
+ OUT_RING((stride << 22) | (dst_offset >> 10));
+ OUT_RING((stride << 22) | (src_offset >> 10));
+ }
+ OUT_RING(0);
+ OUT_RING(pages); /* x - y */
+ OUT_RING((stride << 16) | cur_pages);
+ ADVANCE_RING();
+ }
+
+ BEGIN_RING(2);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+ ADVANCE_RING();
+
+ return;
+ }
+
- drm_bo_mem_reg_t *old_mem = &bo->mem;
++static int radeon_move_blit(struct drm_buffer_object * bo,
++ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
+ {
-static int radeon_move_flip(drm_buffer_object_t * bo,
- int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
++ struct drm_bo_mem_reg *old_mem = &bo->mem;
+ int dir = 0;
+
+ if ((old_mem->mem_type == new_mem->mem_type) &&
+ (new_mem->mm_node->start <
+ old_mem->mm_node->start + old_mem->mm_node->size)) {
+ dir = 1;
+ }
+
+ radeon_emit_copy_blit(bo->dev,
+ old_mem->mm_node->start << PAGE_SHIFT,
+ new_mem->mm_node->start << PAGE_SHIFT,
+ new_mem->num_pages, dir);
+
+
+ return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
+ DRM_FENCE_TYPE_EXE |
+ DRM_RADEON_FENCE_TYPE_RW,
+ DRM_RADEON_FENCE_FLAG_FLUSHED, new_mem);
+ }
+
- drm_device_t *dev = bo->dev;
- drm_bo_mem_reg_t tmp_mem;
++static int radeon_move_flip(struct drm_buffer_object * bo,
++ int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
+ {
- ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start);
++ struct drm_device *dev = bo->dev;
++ struct drm_bo_mem_reg tmp_mem;
+ int ret;
+
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
+
+ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
+ if (ret)
+ return ret;
+
-int radeon_move(drm_buffer_object_t * bo,
- int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
++ ret = drm_bind_ttm(bo->ttm, &tmp_mem);
+ if (ret)
+ goto out_cleanup;
+
+ ret = radeon_move_blit(bo, 1, no_wait, &tmp_mem);
+ if (ret)
+ goto out_cleanup;
+
+ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
+ out_cleanup:
+ if (tmp_mem.mm_node) {
+ mutex_lock(&dev->struct_mutex);
+ if (tmp_mem.mm_node != bo->pinned_node)
+ drm_mm_put_block(tmp_mem.mm_node);
+ tmp_mem.mm_node = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ }
+ return ret;
+ }
+
- drm_bo_mem_reg_t *old_mem = &bo->mem;
++int radeon_move(struct drm_buffer_object * bo,
++ int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
+ {
++ struct drm_bo_mem_reg *old_mem = &bo->mem;
+
+ DRM_DEBUG("\n");
+ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
+ if (radeon_move_flip(bo, evict, no_wait, new_mem))
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ } else {
+ if (radeon_move_blit(bo, evict, no_wait, new_mem))
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ }
+ return 0;
+ }
+
radeon_PCI_IDS
};
-static drm_fence_driver_t radeon_fence_driver = {
+
+ #ifdef RADEON_HAVE_FENCE
-static drm_bo_driver_t radeon_bo_driver = {
++static struct drm_fence_driver radeon_fence_driver = {
+ .num_classes = 1,
+ .wrap_diff = (1 << 30),
+ .flush_diff = (1 << 29),
+ .sequence_mask = 0xffffffffU,
+ .lazy_capable = 1,
+ .emit = radeon_fence_emit_sequence,
+ .poke_flush = radeon_poke_flush,
+ .has_irq = radeon_fence_has_irq,
+ };
+ #endif
+ #ifdef RADEON_HAVE_BUFFER
+
+ static uint32_t radeon_mem_prios[] = {DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
+ static uint32_t radeon_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL};
+
++static struct drm_bo_driver radeon_bo_driver = {
+ .mem_type_prio = radeon_mem_prios,
+ .mem_busy_prio = radeon_busy_prios,
+ .num_mem_type_prio = sizeof(radeon_mem_prios)/sizeof(uint32_t),
+ .num_mem_busy_prio = sizeof(radeon_busy_prios)/sizeof(uint32_t),
+ .create_ttm_backend_entry = radeon_create_ttm_backend_entry,
+ .fence_type = radeon_fence_types,
+ .invalidate_caches = radeon_invalidate_caches,
+ .init_mem_type = radeon_init_mem_type,
+ .evict_mask = radeon_evict_mask,
+ .move = radeon_move,
+ };
+ #endif
+
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
.driver_features =
--- /dev/null
-static void radeon_perform_flush(drm_device_t * dev)
+ /**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+ /*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+ #include "drmP.h"
+ #include "drm.h"
+ #include "radeon_drm.h"
+ #include "radeon_drv.h"
+
+ /*
+ * Implements an intel sync flush operation.
+ */
+
- drm_fence_manager_t *fm = &dev->fm;
- drm_fence_class_manager_t *fc = &dev->fm.class[0];
- drm_fence_driver_t *driver = dev->driver->fence_driver;
++static void radeon_perform_flush(struct drm_device * dev)
+ {
+ drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
- drm_fence_handler(dev, 0, sequence, pending_flush_types);
++ struct drm_fence_manager *fm = &dev->fm;
++ struct drm_fence_class_manager *fc = &dev->fm.fence_class[0];
++ struct drm_fence_driver *driver = dev->driver->fence_driver;
+ uint32_t pending_flush_types = 0;
+ uint32_t sequence;
+
+ if (!dev_priv)
+ return;
+
+ pending_flush_types = fc->pending_flush |
+ ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
+
+ if (pending_flush_types) {
+ sequence = READ_BREADCRUMB(dev_priv);
+
-void radeon_poke_flush(drm_device_t * dev, uint32_t class)
++ drm_fence_handler(dev, 0, sequence, pending_flush_types, 0);
+ }
+
+ return;
+ }
+
- drm_fence_manager_t *fm = &dev->fm;
++void radeon_poke_flush(struct drm_device * dev, uint32_t class)
+ {
-int radeon_fence_emit_sequence(drm_device_t *dev, uint32_t class,
++ struct drm_fence_manager *fm = &dev->fm;
+ unsigned long flags;
+
+ if (class != 0)
+ return;
+
+ write_lock_irqsave(&fm->lock, flags);
+ radeon_perform_flush(dev);
+ write_unlock_irqrestore(&fm->lock, flags);
+ }
+
-void radeon_fence_handler(drm_device_t * dev)
++int radeon_fence_emit_sequence(struct drm_device *dev, uint32_t class,
+ uint32_t flags, uint32_t *sequence,
+ uint32_t *native_type)
+ {
+ drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+ RING_LOCALS;
+
+ if (!dev_priv)
+ return -EINVAL;
+
+ *native_type = DRM_FENCE_TYPE_EXE;
+ if (flags & DRM_RADEON_FENCE_FLAG_FLUSHED) {
+ *native_type |= DRM_RADEON_FENCE_TYPE_RW;
+
+ BEGIN_RING(4);
+
+ RADEON_FLUSH_CACHE();
+ RADEON_FLUSH_ZCACHE();
+ ADVANCE_RING();
+ }
+
+ radeon_emit_irq(dev);
+ *sequence = (uint32_t) dev_priv->counter;
+
+
+ return 0;
+ }
+
- drm_fence_manager_t *fm = &dev->fm;
++void radeon_fence_handler(struct drm_device * dev)
+ {
-int radeon_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags)
++ struct drm_fence_manager *fm = &dev->fm;
+
+ write_lock(&fm->lock);
+ radeon_perform_flush(dev);
+ write_unlock(&fm->lock);
+ }
+
++int radeon_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
+ {
+ /*
+ * We have an irq that tells us when we have a new breadcrumb.
+ */
+
+ if (class == 0 && flags == DRM_FENCE_TYPE_EXE)
+ return 1;
+
+ return 0;
+ }
}
}
-
+ void radeon_gart_flush(struct drm_device *dev)
+ {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if (dev_priv->flags & RADEON_IS_IGPGART) {
+ RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_FLUSH);
+ RADEON_WRITE_IGPGART(RADEON_IGPGART_FLUSH, 0x1);
+ RADEON_READ_IGPGART(dev_priv, RADEON_IGPGART_FLUSH);
+ RADEON_WRITE_IGPGART(RADEON_IGPGART_FLUSH, 0x0);
+ } else if (dev_priv->flags & RADEON_IS_PCIE) {
+ u32 tmp = RADEON_READ_PCIE(dev_priv, RADEON_PCIE_TX_GART_CNTL);
+ tmp |= RADEON_PCIE_TX_GART_INVALIDATE_TLB;
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+ tmp &= ~RADEON_PCIE_TX_GART_INVALIDATE_TLB;
+ RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
+ } else {
+
+
+ }
+
+ }
+
-static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
+static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
}
/* radeon_cp.c */
-extern int radeon_cp_init(DRM_IOCTL_ARGS);
-extern int radeon_cp_start(DRM_IOCTL_ARGS);
-extern int radeon_cp_stop(DRM_IOCTL_ARGS);
-extern int radeon_cp_reset(DRM_IOCTL_ARGS);
-extern int radeon_cp_idle(DRM_IOCTL_ARGS);
-extern int radeon_cp_resume(DRM_IOCTL_ARGS);
-extern int radeon_engine_reset(DRM_IOCTL_ARGS);
-extern int radeon_fullscreen(DRM_IOCTL_ARGS);
-extern int radeon_cp_buffers(DRM_IOCTL_ARGS);
+extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+ extern void radeon_gart_flush(struct drm_device *dev);
-extern void radeon_freelist_reset(drm_device_t * dev);
-extern drm_buf_t *radeon_freelist_get(drm_device_t * dev);
+extern void radeon_freelist_reset(struct drm_device * dev);
+extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv);
-extern int radeon_mem_alloc(DRM_IOCTL_ARGS);
-extern int radeon_mem_free(DRM_IOCTL_ARGS);
-extern int radeon_mem_init_heap(DRM_IOCTL_ARGS);
+extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern void radeon_mem_takedown(struct mem_block **heap);
-extern void radeon_mem_release(DRMFILE filp, struct mem_block *heap);
+extern void radeon_mem_release(struct drm_file *file_priv,
+ struct mem_block *heap);
/* radeon_irq.c */
-extern int radeon_irq_emit(DRM_IOCTL_ARGS);
-extern int radeon_irq_wait(DRM_IOCTL_ARGS);
-extern int radeon_emit_irq(drm_device_t * dev);
+extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
++extern int radeon_emit_irq(struct drm_device * dev);
-extern void radeon_do_release(drm_device_t * dev);
-extern int radeon_driver_vblank_wait(drm_device_t * dev,
+extern void radeon_do_release(struct drm_device * dev);
+extern int radeon_driver_vblank_wait(struct drm_device * dev,
unsigned int *sequence);
-extern int radeon_driver_vblank_wait2(drm_device_t * dev,
+extern int radeon_driver_vblank_wait2(struct drm_device * dev,
unsigned int *sequence);
extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
-extern void radeon_driver_irq_preinstall(drm_device_t * dev);
-extern void radeon_driver_irq_postinstall(drm_device_t * dev);
-extern void radeon_driver_irq_uninstall(drm_device_t * dev);
-extern int radeon_vblank_crtc_get(drm_device_t *dev);
-extern int radeon_vblank_crtc_set(drm_device_t *dev, int64_t value);
+extern void radeon_driver_irq_preinstall(struct drm_device * dev);
+extern void radeon_driver_irq_postinstall(struct drm_device * dev);
+extern void radeon_driver_irq_uninstall(struct drm_device * dev);
+extern int radeon_vblank_crtc_get(struct drm_device *dev);
+extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
extern int radeon_driver_unload(struct drm_device *dev);
/* r300_cmdbuf.c */
extern void r300_init_reg_flags(void);
-extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
- drm_file_t* filp_priv,
+extern int r300_do_cp_cmdbuf(struct drm_device *dev,
+ struct drm_file *file_priv,
drm_radeon_kcmd_buffer_t* cmdbuf);
-extern void radeon_fence_handler(drm_device_t *dev);
-extern int radeon_fence_emit_sequence(drm_device_t *dev, uint32_t class,
+
+ #ifdef RADEON_HAVE_FENCE
+ /* i915_fence.c */
+
+
-extern void radeon_poke_flush(drm_device_t *dev, uint32_t class);
-extern int radeon_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags);
++extern void radeon_fence_handler(struct drm_device *dev);
++extern int radeon_fence_emit_sequence(struct drm_device *dev, uint32_t class,
+ uint32_t flags, uint32_t *sequence,
+ uint32_t *native_type);
-extern drm_ttm_backend_t *radeon_create_ttm_backend_entry(drm_device_t *dev);
++extern void radeon_poke_flush(struct drm_device *dev, uint32_t class);
++extern int radeon_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags);
+ #endif
+
+ #ifdef RADEON_HAVE_BUFFER
+ /* radeon_buffer.c */
-extern int radeon_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
-extern uint32_t radeon_evict_mask(drm_buffer_object_t *bo);
-extern int radeon_init_mem_type(drm_device_t * dev, uint32_t type,
- drm_mem_type_manager_t * man);
-extern int radeon_move(drm_buffer_object_t * bo,
- int evict, int no_wait, drm_bo_mem_reg_t * new_mem);
++extern drm_ttm_backend_t *radeon_create_ttm_backend_entry(struct drm_device *dev);
+ extern int radeon_fence_types(struct drm_buffer_object *bo, uint32_t *class, uint32_t *type);
++extern int radeon_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
++extern uint32_t radeon_evict_mask(struct drm_buffer_object *bo);
++extern int radeon_init_mem_type(struct drm_device * dev, uint32_t type,
++ struct drm_mem_type_manager * man);
++extern int radeon_move(struct drm_buffer_object * bo,
++ int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
+ #endif
/* Flags for stats.boxes
*/
#define RADEON_BOX_DMA_IDLE 0x1
write &= mask; \
} while (0)
-static inline int radeon_update_breadcrumb(drm_device_t *dev)
+ /* Breadcrumb - swi irq */
+ #define READ_BREADCRUMB(dev_priv) RADEON_READ(RADEON_LAST_SWI_REG)
+
++static inline int radeon_update_breadcrumb(struct drm_device *dev)
+ {
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ dev_priv->sarea_priv->last_fence = ++dev_priv->counter;
+
+ if (dev_priv->counter > 0x7FFFFFFFUL)
+ dev_priv->sarea_priv->last_fence = dev_priv->counter = 1;
+
+ return dev_priv->counter;
+ }
+
#endif /* __RADEON_DRV_H__ */
if (!stat)
return IRQ_NONE;
+ stat &= dev_priv->irq_enable_reg;
+
/* SW interrupt */
if (stat & RADEON_SW_INT_TEST) {
- DRM_WAKEUP(&dev_priv->swi_queue);
+ DRM_WAKEUP(&dev_priv->irq_queue);
+ #ifdef RADEON_HAVE_FENCE
+ radeon_fence_handler(dev);
+ #endif
}
/* VBLANK interrupt */
return IRQ_HANDLED;
}
- static int radeon_emit_irq(struct drm_device * dev)
-int radeon_emit_irq(drm_device_t * dev)
++int radeon_emit_irq(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
unsigned int ret;
(drm_radeon_private_t *) dev->dev_private;
int ret = 0;
- if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
- if (READ_BREADCRUMB(dev_priv) >= irq_nr)
++ if (READ_BREADCRUMB(dev_priv) >= swi_nr)
return 0;
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
- DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ,
- RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
+ DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
++ READ_BREADCRUMB(dev_priv) >= swi_nr);
return ret;
}