From be5bf1346e49d5c2e0080913fd55e6898a8744cf Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Sun, 1 Apr 2007 16:48:38 +1000 Subject: [PATCH] copy over some files and reorg radeon to add ttm fencing not working yet --- linux-core/Makefile.kernel | 2 +- linux-core/radeon_buffer.c | 117 +++++++++++++++++++++++++++++++++++++++++ linux-core/radeon_drv.c | 39 ++++++++++++++ linux-core/radeon_fence.c | 128 +++++++++++++++++++++++++++++++++++++++++++++ shared-core/radeon_drm.h | 9 ++++ shared-core/radeon_drv.h | 48 ++++++++++++++++- shared-core/radeon_irq.c | 20 +++---- 7 files changed, 351 insertions(+), 12 deletions(-) create mode 100644 linux-core/radeon_buffer.c create mode 100644 linux-core/radeon_fence.c diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 6f5b021..510509c 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -27,7 +27,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ nv40_graph.o -radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o +radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o radeon_fence.o radeon_buffer.o sis-objs := sis_drv.o sis_mm.o ffb-objs := ffb_drv.o ffb_context.o savage-objs := savage_drv.o savage_bci.o savage_state.o diff --git a/linux-core/radeon_buffer.c b/linux-core/radeon_buffer.c new file mode 100644 index 0000000..796191c --- /dev/null +++ b/linux-core/radeon_buffer.c @@ -0,0 +1,117 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" +#include "radeon_drm.h" +#include "radeon_drv.h" + + +drm_ttm_backend_t *radeon_create_ttm_backend_entry(drm_device_t * dev) +{ + return drm_agp_init_ttm(dev, NULL); +} + +int radeon_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type) +{ + *class = 0; + if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) + *type = 3; + else + *type = 1; + return 0; +} + +int radeon_invalidate_caches(drm_device_t * dev, uint32_t flags) +{ + /* + * FIXME: Only emit once per batchbuffer submission. + */ +#if 0 + uint32_t flush_cmd = MI_NO_WRITE_FLUSH; + + if (flags & DRM_BO_FLAG_READ) + flush_cmd |= MI_READ_FLUSH; + if (flags & DRM_BO_FLAG_EXE) + flush_cmd |= MI_EXE_FLUSH; + + return 0; +// return radeon_emit_mi_flush(dev, flush_cmd); +#endif + return 0; +} + +uint32_t radeon_evict_mask(drm_buffer_object_t *bo) +{ + switch (bo->mem.mem_type) { + case DRM_BO_MEM_LOCAL: + case DRM_BO_MEM_TT: + return DRM_BO_FLAG_MEM_LOCAL; + default: + return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; + } +} + +int radeon_init_mem_type(drm_device_t * dev, uint32_t type, + drm_mem_type_manager_t * man) +{ + switch (type) { + case DRM_BO_MEM_LOCAL: + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CACHED; + man->drm_bus_maptype = 0; + break; + case DRM_BO_MEM_TT: + if (!(drm_core_has_AGP(dev) && dev->agp)) { + DRM_ERROR("AGP is not enabled for memory type %u\n", + (unsigned)type); + return -EINVAL; + } + man->io_offset = dev->agp->agp_info.aper_base; + man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024; + man->io_addr = NULL; + man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | + _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; + man->drm_bus_maptype = _DRM_AGP; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); + return -EINVAL; + } + return 0; +} + +int radeon_move(drm_buffer_object_t * bo, + int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +{ + + return 0; +} + diff --git a/linux-core/radeon_drv.c b/linux-core/radeon_drv.c index 43b9aca..6f63a7c 100644 --- a/linux-core/radeon_drv.c +++ b/linux-core/radeon_drv.c @@ -56,6 +56,38 @@ static struct pci_device_id pciidlist[] = { radeon_PCI_IDS }; + +#ifdef RADEON_HAVE_FENCE +static drm_fence_driver_t radeon_fence_driver = { + .num_classes = 1, + .wrap_diff = (1 << 30), + .flush_diff = (1 << 29), + .sequence_mask = 0xffffffffU, + .lazy_capable = 1, + .emit = radeon_fence_emit_sequence, + .poke_flush = radeon_poke_flush, + .has_irq = radeon_fence_has_irq, +}; +#endif +#ifdef RADEON_HAVE_BUFFER + +static uint32_t radeon_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL}; +static uint32_t radeon_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL}; + +static drm_bo_driver_t radeon_bo_driver = { + .mem_type_prio = radeon_mem_prios, + .mem_busy_prio = radeon_busy_prios, + .num_mem_type_prio = sizeof(radeon_mem_prios)/sizeof(uint32_t), + .num_mem_busy_prio = sizeof(radeon_busy_prios)/sizeof(uint32_t), + .create_ttm_backend_entry = radeon_create_ttm_backend_entry, + .fence_type = radeon_fence_types, + .invalidate_caches = radeon_invalidate_caches, + .init_mem_type = radeon_init_mem_type, + .evict_mask = radeon_evict_mask, + .move = radeon_move, +}; +#endif + static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); static struct drm_driver driver = { .driver_features = @@ -100,6 +132,13 @@ static struct drm_driver driver = { .remove = __devexit_p(drm_cleanup_pci), }, +#ifdef RADEON_HAVE_FENCE + .fence_driver = &radeon_fence_driver, +#endif +#ifdef RADEON_HAVE_BUFFER + .bo_driver = &radeon_bo_driver, +#endif + .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, diff --git a/linux-core/radeon_fence.c b/linux-core/radeon_fence.c new file mode 100644 index 0000000..57b318a --- /dev/null +++ b/linux-core/radeon_fence.c @@ -0,0 +1,128 @@ +/************************************************************************** + * + * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + **************************************************************************/ +/* + * Authors: Thomas Hellström + */ + +#include "drmP.h" +#include "drm.h" +#include "radeon_drm.h" +#include "radeon_drv.h" + +/* + * Implements an intel sync flush operation. + */ + +static void radeon_perform_flush(drm_device_t * dev) +{ + drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; + drm_fence_manager_t *fm = &dev->fm; + drm_fence_class_manager_t *fc = &dev->fm.class[0]; + drm_fence_driver_t *driver = dev->driver->fence_driver; + uint32_t pending_flush_types = 0; + uint32_t flush_flags = 0; + uint32_t flush_sequence = 0; + uint32_t i_status; + uint32_t diff; + uint32_t sequence; + + if (!dev_priv) + return; + + pending_flush_types = fc->pending_flush | + ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0); + + if (pending_flush_types) { + drm_fence_handler(dev, 0, 0,0); + + } + + return; +} + +void radeon_poke_flush(drm_device_t * dev, uint32_t class) +{ + drm_fence_manager_t *fm = &dev->fm; + unsigned long flags; + + if (class != 0) + return; + + write_lock_irqsave(&fm->lock, flags); + radeon_perform_flush(dev); + write_unlock_irqrestore(&fm->lock, flags); +} + +int radeon_fence_emit_sequence(drm_device_t *dev, uint32_t class, + uint32_t flags, uint32_t *sequence, + uint32_t *native_type) +{ + drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; + RING_LOCALS; + + if (!dev_priv) + return -EINVAL; + + *native_type = DRM_FENCE_TYPE_EXE; + if (flags & DRM_RADEON_FENCE_FLAG_FLUSHED) { + *native_type |= DRM_RADEON_FENCE_TYPE_RW; + + BEGIN_RING(4); + + RADEON_FLUSH_CACHE(); + RADEON_FLUSH_ZCACHE(); + ADVANCE_RING(); + } + + radeon_emit_irq(dev); + *sequence = (uint32_t) dev_priv->counter; + + + return 0; +} + +void radeon_fence_handler(drm_device_t * dev) +{ + drm_fence_manager_t *fm = &dev->fm; + + write_lock(&fm->lock); + radeon_perform_flush(dev); + write_unlock(&fm->lock); +} + +int radeon_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags) +{ + /* + * We have an irq that tells us when we have a new breadcrumb. + */ + + if (class == 0 && flags == DRM_FENCE_TYPE_EXE) + return 1; + + return 0; +} diff --git a/shared-core/radeon_drm.h b/shared-core/radeon_drm.h index e96e785..bdf4580 100644 --- a/shared-core/radeon_drm.h +++ b/shared-core/radeon_drm.h @@ -434,8 +434,17 @@ typedef struct { int pfCurrentPage; /* which buffer is being displayed? */ int crtc2_base; /* CRTC2 frame offset */ int tiling_enabled; /* set by drm, read by 2d + 3d clients */ + + unsigned int last_fence; } drm_radeon_sarea_t; +/* The only fence class we support */ +#define DRM_RADEON_FENCE_CLASS_ACCEL 0 +/* Fence type that guarantees read-write flush */ +#define DRM_RADEON_FENCE_TYPE_RW 2 +/* cache flushes programmed just before the fence */ +#define DRM_RADEON_FENCE_FLAG_FLUSHED 0x01000000 + /* WARNING: If you change any of these defines, make sure to change the * defines in the Xserver file (xf86drmRadeon.h) * diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index 3e56af3..9f6cff8 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -102,6 +102,11 @@ #define DRIVER_MINOR 26 #define DRIVER_PATCHLEVEL 0 +#if defined(__linux__) +#define RADEON_HAVE_FENCE +#define RADEON_HAVE_BUFFER +#endif + /* * Radeon chip families */ @@ -276,8 +281,8 @@ typedef struct drm_radeon_private { struct mem_block *fb_heap; /* SW interrupt */ - wait_queue_head_t swi_queue; - atomic_t swi_emitted; + wait_queue_head_t irq_queue; + int counter; struct radeon_surface surfaces[RADEON_MAX_SURFACES]; struct radeon_virt_surface virt_surfaces[2*RADEON_MAX_SURFACES]; @@ -376,6 +381,30 @@ extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp, drm_file_t* filp_priv, drm_radeon_kcmd_buffer_t* cmdbuf); + +#ifdef RADEON_HAVE_FENCE +/* i915_fence.c */ + + +extern void radeon_fence_handler(drm_device_t *dev); +extern int radeon_fence_emit_sequence(drm_device_t *dev, uint32_t class, + uint32_t flags, uint32_t *sequence, + uint32_t *native_type); +extern void radeon_poke_flush(drm_device_t *dev, uint32_t class); +extern int radeon_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags); +#endif + +#ifdef RADEON_HAVE_BUFFER +/* radeon_buffer.c */ +extern drm_ttm_backend_t *radeon_create_ttm_backend_entry(drm_device_t *dev); +extern int radeon_fence_types(struct drm_buffer_object *bo, uint32_t *class, uint32_t *type); +extern int radeon_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags); +extern uint32_t radeon_evict_mask(drm_buffer_object_t *bo); +extern int radeon_init_mem_type(drm_device_t * dev, uint32_t type, + drm_mem_type_manager_t * man); +extern int radeon_move(drm_buffer_object_t * bo, + int evict, int no_wait, drm_bo_mem_reg_t * new_mem); +#endif /* Flags for stats.boxes */ #define RADEON_BOX_DMA_IDLE 0x1 @@ -1184,4 +1213,19 @@ do { \ write &= mask; \ } while (0) +/* Breadcrumb - swi irq */ +#define READ_BREADCRUMB(dev_priv) RADEON_READ(RADEON_LAST_SWI_REG) + +static inline int radeon_update_breadcrumb(drm_device_t *dev) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + + dev_priv->sarea_priv->last_fence = ++dev_priv->counter; + + if (dev_priv->counter > 0x7FFFFFFFUL) + dev_priv->sarea_priv->last_fence = dev_priv->counter = 1; + + return dev_priv->counter; +} + #endif /* __RADEON_DRV_H__ */ diff --git a/shared-core/radeon_irq.c b/shared-core/radeon_irq.c index 3ff0baa..8678f5d 100644 --- a/shared-core/radeon_irq.c +++ b/shared-core/radeon_irq.c @@ -79,7 +79,10 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) /* SW interrupt */ if (stat & RADEON_SW_INT_TEST) { - DRM_WAKEUP(&dev_priv->swi_queue); + DRM_WAKEUP(&dev_priv->irq_queue); +#ifdef RADEON_HAVE_FENCE + radeon_fence_handler(dev); +#endif } /* VBLANK interrupt */ @@ -98,8 +101,7 @@ static int radeon_emit_irq(drm_device_t * dev) unsigned int ret; RING_LOCALS; - atomic_inc(&dev_priv->swi_emitted); - ret = atomic_read(&dev_priv->swi_emitted); + ret = radeon_update_breadcrumb(dev); BEGIN_RING(4); OUT_RING_REG(RADEON_LAST_SWI_REG, ret); @@ -110,19 +112,19 @@ static int radeon_emit_irq(drm_device_t * dev) return ret; } -static int radeon_wait_irq(drm_device_t * dev, int swi_nr) +static int radeon_wait_irq(drm_device_t * dev, int irq_nr) { drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; int ret = 0; - if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr) + if (READ_BREADCRUMB(dev_priv) >= irq_nr) return 0; dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; - DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * DRM_HZ, - RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr); + DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, + READ_BREADCRUMB(dev_priv) >= irq_nr); return ret; } @@ -224,8 +226,8 @@ void radeon_driver_irq_postinstall(drm_device_t * dev) drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; - atomic_set(&dev_priv->swi_emitted, 0); - DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); + dev_priv->counter = 0; + DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); /* Turn on SW and VBL ints */ RADEON_WRITE(RADEON_GEN_INT_CNTL, -- 2.7.4