Leftover files from previous commit.
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Fri, 16 Feb 2007 19:25:26 +0000 (20:25 +0100)
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>
Fri, 16 Feb 2007 19:25:26 +0000 (20:25 +0100)
linux-core/via_buffer.c [new file with mode: 0644]
linux-core/via_fence.c [new file with mode: 0644]

diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c
new file mode 100644 (file)
index 0000000..f156ee6
--- /dev/null
@@ -0,0 +1,163 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA,
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "via_drm.h"
+#include "via_drv.h"
+
+drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t * dev)
+{
+       return drm_agp_init_ttm(dev, NULL);
+}
+
+int via_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)
+{
+       *class = 0;
+       *type = 3;
+       return 0;
+}
+
+int via_invalidate_caches(drm_device_t * dev, uint32_t flags)
+{
+       /*
+        * FIXME: Invalidate texture caches here.
+        */
+
+       return 0;
+}
+
+
+static int via_vram_info(drm_device_t *dev,
+                        unsigned long *offset,
+                        unsigned long *size)
+{
+       struct pci_dev *pdev = dev->pdev;
+       unsigned long flags;
+
+       int ret = DRM_ERR(EINVAL);
+       int i;
+       for (i=0; i<6; ++i) {
+               flags = pci_resource_flags(pdev, i);
+               if ((flags & (IORESOURCE_MEM | IORESOURCE_PREFETCH)) ==
+                   (IORESOURCE_MEM | IORESOURCE_PREFETCH)) {
+                       ret = 0;
+                       break;
+               }
+       }
+
+       if (ret) {
+               DRM_ERROR("Could not find VRAM PCI resource\n");
+               return ret;
+       }
+
+       *offset = pci_resource_start(pdev, i);
+       *size = pci_resource_end(pdev, i) - *offset + 1;
+       return 0;
+}
+
+int via_init_mem_type(drm_device_t * dev, uint32_t type,
+                      drm_mem_type_manager_t * man)
+{
+       switch (type) {
+       case DRM_BO_MEM_LOCAL:
+               /* System memory */
+
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+                       _DRM_FLAG_MEMTYPE_CACHED;
+               man->drm_bus_maptype = 0;
+               break;
+
+       case DRM_BO_MEM_TT: 
+               /* Dynamic agpgart memory */
+               
+               if (!(drm_core_has_AGP(dev) && dev->agp)) {
+                       DRM_ERROR("AGP is not enabled for memory type %u\n",
+                                 (unsigned)type);
+                       return -EINVAL;
+               }
+               man->io_offset = dev->agp->agp_info.aper_base;
+               man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
+               man->io_addr = NULL;
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
+
+               /* Only to get pte protection right. */
+
+               man->drm_bus_maptype = _DRM_AGP; 
+               break;
+
+       case DRM_BO_MEM_VRAM: 
+               /* "On-card" video ram */
+               
+               man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
+               man->drm_bus_maptype = _DRM_FRAME_BUFFER;
+               man->io_addr = NULL;
+               return via_vram_info(dev, &man->io_offset, &man->io_size);
+               break;
+
+       case DRM_BO_MEM_PRIV0: 
+               /* Pre-bound agpgart memory */
+               
+               if (!(drm_core_has_AGP(dev) && dev->agp)) {
+                       DRM_ERROR("AGP is not enabled for memory type %u\n",
+                                 (unsigned)type);
+                       return -EINVAL;
+               }
+               man->io_offset = dev->agp->agp_info.aper_base;
+               man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
+               man->io_addr = NULL;
+               man->flags =  _DRM_FLAG_MEMTYPE_MAPPABLE |
+                   _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP;
+               man->drm_bus_maptype = _DRM_AGP;
+               break;
+
+       default:
+               DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+uint32_t via_evict_mask(drm_buffer_object_t *bo)
+{
+       switch (bo->mem.mem_type) {
+       case DRM_BO_MEM_LOCAL:
+       case DRM_BO_MEM_TT:
+               return DRM_BO_FLAG_MEM_LOCAL; /* Evict TT to local */
+       case DRM_BO_MEM_PRIV0: /* Evict pre-bound AGP to TT */
+               return DRM_BO_MEM_TT;
+       case DRM_BO_MEM_VRAM:
+               if (bo->mem.num_pages > 128)
+                       return DRM_BO_MEM_TT;
+               else
+                       return DRM_BO_MEM_LOCAL;
+       default:
+               return DRM_BO_MEM_LOCAL;
+       }
+}
diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c
new file mode 100644 (file)
index 0000000..0224993
--- /dev/null
@@ -0,0 +1,230 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA,
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "via_drm.h"
+#include "via_drv.h"
+
+/*
+ * DRM_FENCE_TYPE_EXE guarantees that all command buffers can be evicted.
+ * DRM_VIA_FENCE_TYPE_ACCEL guarantees that all 2D & 3D rendering is complete.
+ */
+
+
+static uint32_t via_perform_flush(drm_device_t *dev, uint32_t class)
+{
+       drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+       drm_fence_class_manager_t *fc = &dev->fm.class[class];
+       uint32_t pending_flush_types = 0;
+       uint32_t signaled_flush_types = 0;
+       uint32_t status;
+
+       if (class != 0)
+               return 0;
+
+       if (!dev_priv)
+               return 0;
+
+       spin_lock(&dev_priv->fence_lock);
+
+       pending_flush_types = fc->pending_flush |
+               ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
+
+       if (pending_flush_types) {
+
+               /*
+                * Take the idlelock. This guarantees that the next time a client tries
+                * to grab the lock, it will stall until the idlelock is released. This
+                * guarantees that eventually, the GPU engines will be idle, but nothing
+                * else. It cannot be used to protect the hardware.
+                */
+
+
+               if (!dev_priv->have_idlelock) {
+                       drm_idlelock_take(&dev->lock);
+                       dev_priv->have_idlelock = 1;
+               }
+
+               /*
+                * Check if AGP command reader is idle.
+                */
+
+               if (pending_flush_types & DRM_FENCE_TYPE_EXE)
+                       if (VIA_READ(0x41C) & 0x80000000)
+                               signaled_flush_types |= DRM_FENCE_TYPE_EXE;
+
+               /*
+                * Check VRAM command queue empty and 2D + 3D engines idle.
+                */
+
+               if (pending_flush_types & DRM_VIA_FENCE_TYPE_ACCEL) {
+                       status = VIA_READ(VIA_REG_STATUS);
+                       if ((status & VIA_VR_QUEUE_BUSY) &&
+                           !(status & (VIA_CMD_RGTR_BUSY | VIA_2D_ENG_BUSY | VIA_3D_ENG_BUSY)))
+                               signaled_flush_types |= DRM_VIA_FENCE_TYPE_ACCEL;
+               }
+
+               if (signaled_flush_types) {
+                       pending_flush_types &= ~signaled_flush_types;
+                       if (!pending_flush_types && dev_priv->have_idlelock) {
+                               drm_idlelock_release(&dev->lock);
+                               dev_priv->have_idlelock = 0;
+                       }
+                       drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, signaled_flush_types);
+               }
+       }
+
+       spin_unlock(&dev_priv->fence_lock);
+
+       return fc->pending_flush |
+               ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
+}
+
+
+/**
+ * Emit a fence sequence.
+ */
+
+int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags,
+                            uint32_t * sequence, uint32_t * native_type)
+{
+       drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+       int ret = 0;
+
+       if (!dev_priv)
+               return -EINVAL;
+
+       switch(class) {
+       case 0: /* AGP command stream */
+
+               /*
+                * The sequence number isn't really used by the hardware yet.
+                */
+
+               spin_lock(&dev_priv->fence_lock);
+               *sequence = ++dev_priv->emit_0_sequence;
+               spin_unlock(&dev_priv->fence_lock);
+
+               /*
+                * When drm_fence_handler() is called with flush type 0x01, and a
+                * sequence number, That means that the EXE flag is expired.
+                * Nothing else. No implicit flushing or other engines idle.
+                */
+
+               *native_type = DRM_FENCE_TYPE_EXE;
+               break;
+       default:
+               ret = DRM_ERR(EINVAL);
+               break;
+       }
+       return ret;
+}
+
+/**
+ * Manual poll (from the fence manager).
+ */
+
+void via_poke_flush(drm_device_t * dev, uint32_t class)
+{
+       drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+       drm_fence_manager_t *fm = &dev->fm;
+       unsigned long flags;
+       uint32_t pending_flush;
+
+       if (!dev_priv)
+               return ;
+
+       write_lock_irqsave(&fm->lock, flags);
+       pending_flush = via_perform_flush(dev, class);
+       if (pending_flush)
+               pending_flush = via_perform_flush(dev, class);
+       write_unlock_irqrestore(&fm->lock, flags);
+
+       /*
+        * Kick the timer if there are more flushes pending.
+        */
+
+       if (pending_flush && !timer_pending(&dev_priv->fence_timer)) {
+               dev_priv->fence_timer.expires = jiffies + 1;
+               add_timer(&dev_priv->fence_timer);
+       }
+}
+
+/**
+ * No irq fence expirations implemented yet.
+ * Although both the HQV engines and PCI dmablit engines signal
+ * idle with an IRQ, we haven't implemented this yet.
+ * This means that the drm fence manager will always poll for engine idle,
+ * unless the caller wanting to wait for a fence object has indicated a lazy wait.
+ */
+
+int via_fence_has_irq(struct drm_device * dev, uint32_t class,
+                     uint32_t flags)
+{
+       return 0;
+}
+
+/**
+ * Regularly call the flush function. This enables lazy waits, so we can
+ * set lazy_capable. Lazy waits don't really care when the fence expires,
+ * so a timer tick delay should be fine.
+ */
+
+void via_fence_timer(unsigned long data)
+{
+       drm_device_t *dev = (drm_device_t *) data;
+       drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+       drm_fence_manager_t *fm = &dev->fm;
+       uint32_t pending_flush;
+       drm_fence_class_manager_t *fc = &dev->fm.class[0];
+
+       if (!dev_priv)
+               return;
+       if (!fm->initialized)
+               goto out_unlock;
+
+       via_poke_flush(dev, 0);
+       pending_flush = fc->pending_flush |
+               ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
+
+       /*
+        * disable timer if there are no more flushes pending.
+        */
+
+       if (!pending_flush && timer_pending(&dev_priv->fence_timer)) {
+               BUG_ON(dev_priv->have_idlelock);
+               del_timer(&dev_priv->fence_timer);
+       }
+       return;
+out_unlock:
+       return;
+
+}