Initial pass at converting driver to DRM infrastructure.
authorIan Romanick <idr@us.ibm.com>
Thu, 19 Jul 2007 17:29:18 +0000 (10:29 -0700)
committerIan Romanick <idr@us.ibm.com>
Thu, 19 Jul 2007 17:29:18 +0000 (10:29 -0700)
14 files changed:
linux-core/Makefile
linux-core/xgi_cmdlist.c
linux-core/xgi_cmdlist.h
linux-core/xgi_drv.c
linux-core/xgi_drv.h
linux-core/xgi_fb.c
linux-core/xgi_fb.h [deleted file]
linux-core/xgi_linux.h [deleted file]
linux-core/xgi_misc.c
linux-core/xgi_misc.h
linux-core/xgi_pcie.c
linux-core/xgi_pcie.h [deleted file]
linux-core/xgi_regs.h
shared-core/xgi_drm.h

index 2052459..55e2525 100644 (file)
@@ -91,8 +91,7 @@ MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS)
 NVHEADERS =     nv_drv.h $(DRMHEADERS)
 FFBHEADERS =   ffb_drv.h $(DRMHEADERS)
 NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS)
-XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_fb.h xgi_linux.h xgi_misc.h \
-               xgi_pcie.h xgi_regs.h xgi_types.h
+XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_misc.h xgi_regs.h $(DRMHEADERS)
 
 PROGS = dristat drmstat
 
index 6137346..d201805 100644 (file)
@@ -26,7 +26,6 @@
  * DEALINGS IN THE SOFTWARE.                                                                                           
  ***************************************************************************/
 
-#include "xgi_linux.h"
 #include "xgi_drv.h"
 #include "xgi_regs.h"
 #include "xgi_misc.h"
@@ -55,18 +54,19 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size)
 
        s_cmdring._cmdRingSize = mem_alloc.size;
        s_cmdring._cmdRingBuffer = mem_alloc.hw_addr;
-       s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr;
+       s_cmdring._cmdRingAllocOffset = mem_alloc.offset;
        s_cmdring._lastBatchStartAddr = 0;
        s_cmdring._cmdRingOffset = 0;
 
        return 1;
 }
 
-void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo)
+static void xgi_submit_cmdlist(struct xgi_info * info,
+                              struct xgi_cmd_info * pCmdInfo)
 {
        const unsigned int beginPort = getCurBatchBeginPort(pCmdInfo);
 
-       XGI_INFO("After getCurBatchBeginPort()\n");
+       DRM_INFO("After getCurBatchBeginPort()\n");
 
        if (s_cmdring._lastBatchStartAddr == 0) {
                const unsigned int portOffset = BASE_3D_ENG + beginPort;
@@ -75,50 +75,53 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo)
                /* xgi_waitfor_pci_idle(info); */
 
                // Enable PCI Trigger Mode
-               XGI_INFO("Enable PCI Trigger Mode \n");
+               DRM_INFO("Enable PCI Trigger Mode \n");
 
 
                /* Jong 06/14/2006; 0x400001a */
-               dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
+               dwWriteReg(info->mmio_map,
+                          BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
                           (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
                           M2REG_CLEAR_COUNTERS_MASK | 0x08 |
                           M2REG_PCI_TRIGGER_MODE_MASK);
 
                /* Jong 06/14/2006; 0x400000a */
-               dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
+               dwWriteReg(info->mmio_map,
+                          BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
                           (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
                           M2REG_PCI_TRIGGER_MODE_MASK);
 
                // Send PCI begin command
-               XGI_INFO("Send PCI begin command \n");
+               DRM_INFO("Send PCI begin command \n");
 
-               XGI_INFO("portOffset=%d, beginPort=%d\n",
+               DRM_INFO("portOffset=%d, beginPort=%d\n",
                         portOffset, beginPort);
 
                /* beginPort = 48; */
                /* 0xc100000 */
-               dwWriteReg(portOffset,
+               dwWriteReg(info->mmio_map, portOffset,
                           (beginPort << 22) + (BEGIN_VALID_MASK) +
                           pCmdInfo->_curDebugID);
 
-               XGI_INFO("Send PCI begin command- After\n");
+               DRM_INFO("Send PCI begin command- After\n");
 
                /* 0x80000024 */
-               dwWriteReg(portOffset + 4,
+               dwWriteReg(info->mmio_map, portOffset + 4,
                           BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize);
 
                /* 0x1010000 */
-               dwWriteReg(portOffset + 8, (pCmdInfo->_firstBeginAddr >> 4));
+               dwWriteReg(info->mmio_map, portOffset + 8, 
+                          (pCmdInfo->_firstBeginAddr >> 4));
 
                /* Jong 06/12/2006; system hang; marked for test */
-               dwWriteReg(portOffset + 12, 0);
+               dwWriteReg(info->mmio_map, portOffset + 12, 0);
 
                /* Jong 06/13/2006; remove marked for system hang test */
                /* xgi_waitfor_pci_idle(info); */
        } else {
                u32 *lastBatchVirtAddr;
 
-               XGI_INFO("s_cmdring._lastBatchStartAddr != 0\n");
+               DRM_INFO("s_cmdring._lastBatchStartAddr != 0\n");
 
                if (pCmdInfo->_firstBeginType == BTYPE_3D) {
                        addFlush2D(info);
@@ -146,21 +149,38 @@ void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo)
                        /* Jong 06/12/2006; system hang; marked for test */
                        triggerHWCommandList(info, pCmdInfo->_beginCount);
                } else {
-                       XGI_ERROR("lastBatchVirtAddr is NULL\n");
+                       DRM_ERROR("lastBatchVirtAddr is NULL\n");
                }
        }
 
        s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr;
-       XGI_INFO("End\n");
+       DRM_INFO("End\n");
 }
 
+
+int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS)
+{
+       DRM_DEVICE;
+       struct xgi_cmd_info  cmd_list;
+       struct xgi_info *info = dev->dev_private;
+
+       DRM_COPY_FROM_USER_IOCTL(cmd_list, 
+                                (struct xgi_cmd_info __user *) data,
+                                sizeof(cmd_list));
+
+       xgi_submit_cmdlist(info, &cmd_list);
+       return 0;
+}
+
+
 /*
     state:      0 - console
                 1 - graphic
                 2 - fb
                 3 - logout
 */
-void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo)
+int xgi_state_change(struct xgi_info * info, unsigned int to, 
+                    unsigned int from)
 {
 #define STATE_CONSOLE   0
 #define STATE_GRAPHIC   1
@@ -169,26 +189,40 @@ void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo
 #define STATE_REBOOT    4
 #define STATE_SHUTDOWN  5
 
-       if ((pStateInfo->_fromState == STATE_GRAPHIC)
-           && (pStateInfo->_toState == STATE_CONSOLE)) {
-               XGI_INFO("[kd] I see, now is to leaveVT\n");
+       if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) {
+               DRM_INFO("[kd] I see, now is to leaveVT\n");
                // stop to received batch
-       } else if ((pStateInfo->_fromState == STATE_CONSOLE)
-                  && (pStateInfo->_toState == STATE_GRAPHIC)) {
-               XGI_INFO("[kd] I see, now is to enterVT\n");
+       } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) {
+               DRM_INFO("[kd] I see, now is to enterVT\n");
                xgi_cmdlist_reset();
-       } else if ((pStateInfo->_fromState == STATE_GRAPHIC)
-                  && ((pStateInfo->_toState == STATE_LOGOUT)
-                      || (pStateInfo->_toState == STATE_REBOOT)
-                      || (pStateInfo->_toState == STATE_SHUTDOWN))) {
-               XGI_INFO("[kd] I see, not is to exit from X\n");
+       } else if ((from == STATE_GRAPHIC)
+                  && ((to == STATE_LOGOUT)
+                      || (to == STATE_REBOOT)
+                      || (to == STATE_SHUTDOWN))) {
+               DRM_INFO("[kd] I see, not is to exit from X\n");
                // stop to received batch
        } else {
-               XGI_ERROR("[kd] Should not happen\n");
+               DRM_ERROR("[kd] Should not happen\n");
+               return DRM_ERR(EINVAL);
        }
 
+       return 0;
 }
 
+
+int xgi_state_change_ioctl(DRM_IOCTL_ARGS)
+{
+       DRM_DEVICE;
+       struct xgi_state_info  state;
+       struct xgi_info *info = dev->dev_private;
+
+       DRM_COPY_FROM_USER_IOCTL(state, (struct xgi_state_info __user *) data,
+                                sizeof(state));
+
+       return xgi_state_change(info, state._toState, state._fromState);
+}
+
+
 void xgi_cmdlist_reset(void)
 {
        s_cmdring._lastBatchStartAddr = 0;
@@ -198,7 +232,7 @@ void xgi_cmdlist_reset(void)
 void xgi_cmdlist_cleanup(struct xgi_info * info)
 {
        if (s_cmdring._cmdRingBuffer != 0) {
-               xgi_pcie_free(info, s_cmdring._cmdRingBusAddr);
+               xgi_pcie_free(info, s_cmdring._cmdRingAllocOffset, NULL);
                s_cmdring._cmdRingBuffer = 0;
                s_cmdring._cmdRingOffset = 0;
                s_cmdring._cmdRingSize = 0;
@@ -212,7 +246,8 @@ static void triggerHWCommandList(struct xgi_info * info,
 
        //Fix me, currently we just trigger one time
        while (triggerCounter--) {
-               dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
+               dwWriteReg(info->mmio_map,
+                          BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
                           0x05000000 + (0x0ffff & s_triggerID++));
                // xgi_waitfor_pci_idle(info);
        }
index d2b95c0..4bc56ec 100644 (file)
@@ -60,16 +60,15 @@ typedef enum {
 struct xgi_cmdring_info {
        unsigned int _cmdRingSize;
        u32 _cmdRingBuffer;
-       unsigned long _cmdRingBusAddr;
+       unsigned long _cmdRingAllocOffset;
        u32 _lastBatchStartAddr;
        u32 _cmdRingOffset;
 };
 
 extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size);
 
-extern void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo);
-
-extern void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo);
+extern int xgi_state_change(struct xgi_info * info, unsigned int to,
+       unsigned int from);
 
 extern void xgi_cmdlist_cleanup(struct xgi_info * info);
 
index bd39dfd..3b9f4cb 100644 (file)
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER                       
  * DEALINGS IN THE SOFTWARE.                                                                                           
  ***************************************************************************/
-#include "xgi_linux.h"
+
+#include "drmP.h"
+#include "drm.h"
 #include "xgi_drv.h"
 #include "xgi_regs.h"
-#include "xgi_pcie.h"
 #include "xgi_misc.h"
 #include "xgi_cmdlist.h"
 
-/* for debug */
-static int xgi_temp = 1;
-/*
- * global parameters
- */
-static struct xgi_dev {
-       u16 vendor;
-       u16 device;
-       const char *name;
-} xgidev_list[] = {
-       {
-       PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XP5, "XP5"}, {
-       PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XG47, "XG47"}, {
-       0, 0, NULL}
-};
-
-int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */
+#include "drm_pciids.h"
 
-static int xgi_num_devices = 0;
+static struct pci_device_id pciidlist[] = {
+       xgi_PCI_IDS
+};
 
-struct xgi_info xgi_devices[XGI_MAX_DEVICES];
+static int xgi_bootstrap(DRM_IOCTL_ARGS);
 
-#if defined(XGI_PM_SUPPORT_APM)
-static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 };
-#endif
+static drm_ioctl_desc_t xgi_ioctls[] = {
+       [DRM_IOCTL_NR(DRM_XGI_BOOTSTRAP)] = {xgi_bootstrap, DRM_AUTH},
 
-/* add one for the control device */
-struct xgi_info xgi_ctl_device;
-wait_queue_head_t xgi_ctl_waitqueue;
+       [DRM_IOCTL_NR(DRM_XGI_FB_ALLOC)] = {xgi_fb_alloc_ioctl, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_XGI_FB_FREE)] = {xgi_fb_free_ioctl, DRM_AUTH},
 
-#ifdef CONFIG_PROC_FS
-struct proc_dir_entry *proc_xgi;
-#endif
+       [DRM_IOCTL_NR(DRM_XGI_PCIE_ALLOC)] = {xgi_pcie_alloc_ioctl, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_XGI_PCIE_FREE)] = {xgi_pcie_free_ioctl, DRM_AUTH},
 
-#ifdef CONFIG_DEVFS_FS
-devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES];
-#endif
+       [DRM_IOCTL_NR(DRM_XGI_GE_RESET)] = {xgi_ge_reset_ioctl, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_XGI_DUMP_REGISTER)] = {xgi_dump_register_ioctl, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_XGI_DEBUG_INFO)] = {xgi_restore_registers_ioctl, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_XGI_SUBMIT_CMDLIST)] = {xgi_submit_cmdlist_ioctl, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_XGI_TEST_RWINKERNEL)] = {xgi_test_rwinkernel_ioctl, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_XGI_STATE_CHANGE)] = {xgi_state_change_ioctl, DRM_AUTH},
+};
 
-struct list_head xgi_mempid_list;
+static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls);
+
+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int xgi_driver_load(struct drm_device *dev, unsigned long flags);
+static int xgi_driver_unload(struct drm_device *dev);
+static void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp);
+static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS);
+
+
+static struct drm_driver driver = {
+       .driver_features =
+               DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ |
+               DRIVER_IRQ_SHARED | DRIVER_SG,
+       .dev_priv_size = sizeof(struct xgi_info),
+       .load = xgi_driver_load,
+       .unload = xgi_driver_unload,
+       .preclose = xgi_driver_preclose,
+       .dma_quiescent = NULL,
+       .irq_preinstall = NULL,
+       .irq_postinstall = NULL,
+       .irq_uninstall = NULL,
+       .irq_handler = xgi_kern_isr,
+       .reclaim_buffers = drm_core_reclaim_buffers,
+       .get_map_ofs = drm_core_get_map_ofs,
+       .get_reg_ofs = drm_core_get_reg_ofs,
+       .ioctls = xgi_ioctls,
+       .dma_ioctl = NULL,
+
+       .fops = {
+               .owner = THIS_MODULE,
+               .open = drm_open,
+               .release = drm_release,
+               .ioctl = drm_ioctl,
+               .mmap = drm_mmap,
+               .poll = drm_poll,
+               .fasync = drm_fasync,
+       },
+
+       .pci_driver = {
+               .name = DRIVER_NAME,
+               .id_table = pciidlist,
+               .probe = probe,
+               .remove = __devexit_p(drm_cleanup_pci),
+       },
+
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
 
-/* xgi_ functions.. do not take a state device parameter  */
-static int xgi_post_vbios(struct xgi_ioctl_post_vbios * info);
-static void xgi_proc_create(void);
-static void xgi_proc_remove_all(struct proc_dir_entry *);
-static void xgi_proc_remove(void);
+};
 
-/* xgi_kern_ functions, interfaces used by linux kernel */
-int xgi_kern_probe(struct pci_dev *, const struct pci_device_id *);
+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       return drm_get_dev(pdev, ent, &driver);
+}
 
-unsigned int xgi_kern_poll(struct file *, poll_table *);
-int xgi_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
-int xgi_kern_mmap(struct file *, struct vm_area_struct *);
-int xgi_kern_open(struct inode *, struct file *);
-int xgi_kern_release(struct inode *inode, struct file *filp);
 
-void xgi_kern_vma_open(struct vm_area_struct *vma);
-void xgi_kern_vma_release(struct vm_area_struct *vma);
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1))
-struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,
-                                unsigned long address, int *type);
-#else
-struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,
-                                unsigned long address, int write_access);
-#endif
+static int __init xgi_init(void)
+{
+       driver.num_ioctls = xgi_max_ioctl;
+       return drm_init(&driver, pciidlist);
+}
 
-int xgi_kern_read_card_info(char *, char **, off_t off, int, int *, void *);
-int xgi_kern_read_status(char *, char **, off_t off, int, int *, void *);
-int xgi_kern_read_pcie_info(char *, char **, off_t off, int, int *, void *);
-int xgi_kern_read_version(char *, char **, off_t off, int, int *, void *);
+static void __exit xgi_exit(void)
+{
+       drm_exit(&driver);
+}
 
-int xgi_kern_ctl_open(struct inode *, struct file *);
-int xgi_kern_ctl_close(struct inode *, struct file *);
-unsigned int xgi_kern_ctl_poll(struct file *, poll_table *);
+module_init(xgi_init);
+module_exit(xgi_exit);
 
-void xgi_kern_isr_bh(unsigned long);
-irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
 
-static void xgi_lock_init(struct xgi_info * info);
 
-#if defined(XGI_PM_SUPPORT_ACPI)
-int xgi_kern_acpi_standby(struct pci_dev *, u32);
-int xgi_kern_acpi_resume(struct pci_dev *);
-#endif
+void xgi_kern_isr_bh(struct drm_device *dev);
 
 /*
  * verify access to pci config space wasn't disabled behind our back
@@ -129,1361 +152,206 @@ int xgi_kern_acpi_resume(struct pci_dev *);
 
 static inline void xgi_check_pci_config(struct xgi_info * info, int line)
 {
-       unsigned short cmd, flag = 0;
-
-       // don't do this on the control device, only the actual devices
-       if (info->flags & XGI_FLAG_CONTROL)
-               return;
+       u16 cmd;
+       bool flag = 0;
 
-       pci_read_config_word(info->dev, PCI_COMMAND, &cmd);
+       pci_read_config_word(info->dev->pdev, PCI_COMMAND, &cmd);
        if (!(cmd & PCI_COMMAND_MASTER)) {
-               XGI_INFO("restoring bus mastering! (%d)\n", line);
+               DRM_INFO("restoring bus mastering! (%d)\n", line);
                cmd |= PCI_COMMAND_MASTER;
                flag = 1;
        }
 
        if (!(cmd & PCI_COMMAND_MEMORY)) {
-               XGI_INFO("restoring MEM access! (%d)\n", line);
+               DRM_INFO("restoring MEM access! (%d)\n", line);
                cmd |= PCI_COMMAND_MEMORY;
                flag = 1;
        }
 
        if (flag)
-               pci_write_config_word(info->dev, PCI_COMMAND, cmd);
+               pci_write_config_word(info->dev->pdev, PCI_COMMAND, cmd);
 }
 
-/*
- * struct pci_device_id {
- *  unsigned int vendor, device;        // Vendor and device ID or PCI_ANY_ID
- *  unsigned int subvendor, subdevice;  // Subsystem ID's or PCI_ANY_ID
- *  unsigned int class, class_mask;     // (class,subclass,prog-if) triplet
- *  unsigned long driver_data;          // Data private to the driver
- * };
- */
 
-static struct pci_device_id xgi_dev_table[] = {
-       {
-        .vendor = PCI_VENDOR_ID_XGI,
-        .device = PCI_ANY_ID,
-        .subvendor = PCI_ANY_ID,
-        .subdevice = PCI_ANY_ID,
-        .class = (PCI_CLASS_DISPLAY_VGA << 8),
-        .class_mask = ~0,
-        },
-       {}
-};
-
-/*
- *  #define MODULE_DEVICE_TABLE(type,name) \
- *      MODULE_GENERIC_TABLE(type##_device,name)
- */
-MODULE_DEVICE_TABLE(pci, xgi_dev_table);
-
-/*
- * struct pci_driver {
- *  struct list_head node;
- *  char *name;
- *  const struct pci_device_id *id_table;   // NULL if wants all devices
- *  int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id); // New device inserted
- *  void (*remove)(struct pci_dev *dev);    // Device removed (NULL if not a hot-plug capable driver)
- *  int  (*save_state)(struct pci_dev *dev, u32 state);     // Save Device Context
- *  int  (*suspend)(struct pci_dev *dev, u32 state);        // Device suspended
- *  int  (*resume)(struct pci_dev *dev);                    // Device woken up
- *  int  (*enable_wake)(struct pci_dev *dev, u32 state, int enable);   // Enable wake event
- * };
- */
-static struct pci_driver xgi_pci_driver = {
-       .name = "xgi",
-       .id_table = xgi_dev_table,
-       .probe = xgi_kern_probe,
-#if defined(XGI_SUPPORT_ACPI)
-       .suspend = xgi_kern_acpi_standby,
-       .resume = xgi_kern_acpi_resume,
-#endif
-};
-
-/*
- * find xgi devices and set initial state
- */
-int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table)
+int xgi_bootstrap(DRM_IOCTL_ARGS)
 {
-       struct xgi_info *info;
-
-       if ((dev->vendor != PCI_VENDOR_ID_XGI)
-           || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) {
-               return -1;
-       }
-
-       if (xgi_num_devices == XGI_MAX_DEVICES) {
-               XGI_INFO("maximum device number (%d) reached!\n",
-                        xgi_num_devices);
-               return -1;
-       }
-
-       /* enable io, mem, and bus-mastering in pci config space */
-       if (pci_enable_device(dev) != 0) {
-               XGI_INFO("pci_enable_device failed, aborting\n");
-               return -1;
-       }
-
-       XGI_INFO("maximum device number (%d) reached \n", xgi_num_devices);
-
-       pci_set_master(dev);
-
-       info = &xgi_devices[xgi_num_devices];
-       info->dev = dev;
+       DRM_DEVICE;
+       struct xgi_info *info = dev->dev_private;
+       struct xgi_bootstrap bs;
+       int err;
 
-       xgi_lock_init(info);
 
-       info->mmio.base = XGI_PCI_RESOURCE_START(dev, 1);
-       info->mmio.size = XGI_PCI_RESOURCE_SIZE(dev, 1);
+       DRM_COPY_FROM_USER_IOCTL(bs, (struct xgi_bootstrap __user *) data,
+                                sizeof(bs));
 
-       /* check IO region */
-       if (!request_mem_region(info->mmio.base, info->mmio.size, "xgi")) {
-               XGI_ERROR("cannot reserve MMIO memory\n");
-               goto error_disable_dev;
+       if (info->bootstrap_done) {
+               return 0;
        }
 
-       XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base);
-       XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size);
-
-       info->mmio.vbase = ioremap_nocache(info->mmio.base, info->mmio.size);
-       if (!info->mmio.vbase) {
-               release_mem_region(info->mmio.base, info->mmio.size);
-               XGI_ERROR("info->mmio.vbase failed\n");
-               goto error_disable_dev;
-       }
        xgi_enable_mmio(info);
 
-       //xgi_enable_ge(info);
-
-       XGI_INFO("info->mmio.vbase: 0x%p \n", info->mmio.vbase);
-
-       info->fb.base = XGI_PCI_RESOURCE_START(dev, 0);
-       info->fb.size = XGI_PCI_RESOURCE_SIZE(dev, 0);
-
-       XGI_INFO("info->fb.base: 0x%lx \n", info->fb.base);
-       XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size);
-
-       info->fb.size = bIn3cf(0x54) * 8 * 1024 * 1024;
-       XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size);
-
-       /* check frame buffer region
-          if (!request_mem_region(info->fb.base, info->fb.size, "xgi"))
-          {
-          release_mem_region(info->mmio.base, info->mmio.size);
-          XGI_ERROR("cannot reserve frame buffer memory\n");
-          goto error_disable_dev;
-          }
-
-          info->fb.vbase = ioremap_nocache(info->fb.base, info->fb.size);
-
-          if (!info->fb.vbase)
-          {
-          release_mem_region(info->mmio.base, info->mmio.size);
-          release_mem_region(info->fb.base, info->fb.size);
-          XGI_ERROR("info->fb.vbase failed\n");
-          goto error_disable_dev;
-          }
-        */
-       info->fb.vbase = NULL;
-       XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase);
-
-
-       /* check common error condition */
-       if (info->dev->irq == 0) {
-               XGI_ERROR("Can't find an IRQ for your XGI card!  \n");
-               goto error_zero_dev;
-       }
-       XGI_INFO("info->irq: %lx \n", info->dev->irq);
-
-       //xgi_enable_dvi_interrupt(info);
-
-       /* sanity check the IO apertures */
-       if ((info->mmio.base == 0) || (info->mmio.size == 0)
-           || (info->fb.base == 0) || (info->fb.size == 0)) {
-               XGI_ERROR("The IO regions for your XGI card are invalid.\n");
-
-               if ((info->mmio.base == 0) || (info->mmio.size == 0)) {
-                       XGI_ERROR("mmio appears to be wrong: 0x%lx 0x%lx\n",
-                                 info->mmio.base, info->mmio.size);
-               }
-
-               if ((info->fb.base == 0) || (info->fb.size == 0)) {
-                       XGI_ERROR
-                           ("frame buffer appears to be wrong: 0x%lx 0x%lx\n",
-                            info->fb.base, info->fb.size);
-               }
-
-               goto error_zero_dev;
-       }
-       //xgi_num_devices++;
-
-       return 0;
-
-      error_zero_dev:
-       release_mem_region(info->fb.base, info->fb.size);
-       release_mem_region(info->mmio.base, info->mmio.size);
-
-      error_disable_dev:
-       pci_disable_device(dev);
-       return -1;
+       info->pcie.size = bs.gart_size * (1024 * 1024);
 
-}
-
-/*
- * vma operations...
- * this is only called when the vmas are duplicated. this
- * appears to only happen when the process is cloned to create
- * a new process, and not when the process is threaded.
- *
- * increment the usage count for the physical pages, so when
- * this clone unmaps the mappings, the pages are not
- * deallocated under the original process.
- */
-struct vm_operations_struct xgi_vm_ops = {
-       .open = xgi_kern_vma_open,
-       .close = xgi_kern_vma_release,
-       .nopage = xgi_kern_vma_nopage,
-};
-
-void xgi_kern_vma_open(struct vm_area_struct *vma)
-{
-       XGI_INFO("VM: vma_open for 0x%lx - 0x%lx, offset 0x%lx\n",
-                vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma));
-
-       if (XGI_VMA_PRIVATE(vma)) {
-               struct xgi_pcie_block *block =
-                   (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma);
-               XGI_ATOMIC_INC(block->use_count);
-       }
-}
-
-void xgi_kern_vma_release(struct vm_area_struct *vma)
-{
-       XGI_INFO("VM: vma_release for 0x%lx - 0x%lx, offset 0x%lx\n",
-                vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma));
-
-       if (XGI_VMA_PRIVATE(vma)) {
-               struct xgi_pcie_block *block =
-                   (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma);
-               XGI_ATOMIC_DEC(block->use_count);
-
-               /*
-                * if use_count is down to 0, the kernel virtual mapping was freed
-                * but the underlying physical pages were not, we need to clear the
-                * bit and free the physical pages.
-                */
-               if (XGI_ATOMIC_READ(block->use_count) == 0) {
-                       // Need TO Finish
-                       XGI_VMA_PRIVATE(vma) = NULL;
-               }
-       }
-}
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1))
-struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,
-                                unsigned long address, int *type)
-{
-       struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma);
-       struct page *page = NOPAGE_SIGBUS;
-       unsigned long offset = 0;
-       unsigned long page_addr = 0;
-/*
-    XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n",
-              vma->vm_start,
-              vma->vm_end,
-              XGI_VMA_OFFSET(vma),
-              address);
-*/
-       offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma);
-
-       offset = offset - block->bus_addr;
-
-       offset >>= PAGE_SHIFT;
-
-       page_addr = block->page_table[offset].virt_addr;
-
-       if (xgi_temp) {
-               XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx"
-                        "block->page_count: 0x%lx block->page_order: 0x%lx"
-                        "block->page_table[0x%lx].virt_addr: 0x%lx\n",
-                        block->bus_addr, block->hw_addr,
-                        block->page_count, block->page_order,
-                        offset, block->page_table[offset].virt_addr);
-               xgi_temp = 0;
-       }
-
-       if (!page_addr)
-               goto out;       /* hole or end-of-file */
-       page = virt_to_page(page_addr);
-
-       /* got it, now increment the count */
-       get_page(page);
-      out:
-       return page;
-
-}
-#else
-struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,
-                                unsigned long address, int write_access)
-{
-       struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma);
-       struct page *page = NOPAGE_SIGBUS;
-       unsigned long offset = 0;
-       unsigned long page_addr = 0;
-/*
-    XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n",
-              vma->vm_start,
-              vma->vm_end,
-              XGI_VMA_OFFSET(vma),
-              address);
-*/
-       offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma);
-
-       offset = offset - block->bus_addr;
-
-       offset >>= PAGE_SHIFT;
-
-       page_addr = block->page_table[offset].virt_addr;
-
-       if (xgi_temp) {
-               XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx"
-                        "block->page_count: 0x%lx block->page_order: 0x%lx"
-                        "block->page_table[0x%lx].virt_addr: 0x%lx\n",
-                        block->bus_addr, block->hw_addr,
-                        block->page_count, block->page_order,
-                        offset, block->page_table[offset].virt_addr);
-               xgi_temp = 0;
-       }
-
-       if (!page_addr)
-               goto out;       /* hole or end-of-file */
-       page = virt_to_page(page_addr);
-
-       /* got it, now increment the count */
-       get_page(page);
-      out:
-       return page;
-}
-#endif
-
-#if 0
-static struct file_operations xgi_fops = {
-       /* owner:      THIS_MODULE, */
-      poll:xgi_kern_poll,
-      ioctl:xgi_kern_ioctl,
-      mmap:xgi_kern_mmap,
-      open:xgi_kern_open,
-      release:xgi_kern_release,
-};
-#endif
-
-static struct file_operations xgi_fops = {
-       .owner = THIS_MODULE,
-       .poll = xgi_kern_poll,
-       .ioctl = xgi_kern_ioctl,
-       .mmap = xgi_kern_mmap,
-       .open = xgi_kern_open,
-       .release = xgi_kern_release,
-};
-
-static struct xgi_file_private *xgi_alloc_file_private(void)
-{
-       struct xgi_file_private *fp;
-
-       XGI_KMALLOC(fp, sizeof(struct xgi_file_private));
-       if (!fp)
-               return NULL;
-
-       memset(fp, 0, sizeof(struct xgi_file_private));
-
-       /* initialize this file's event queue */
-       init_waitqueue_head(&fp->wait_queue);
-
-       xgi_init_lock(fp->fp_lock);
-
-       return fp;
-}
-
-static void xgi_free_file_private(struct xgi_file_private * fp)
-{
-       if (fp == NULL)
-               return;
-
-       XGI_KFREE(fp, sizeof(struct xgi_file_private));
-}
-
-int xgi_kern_open(struct inode *inode, struct file *filp)
-{
-       struct xgi_info *info = NULL;
-       int dev_num;
-       int result = 0, status;
-
-       /*
-        * the type and num values are only valid if we are not using devfs.
-        * However, since we use them to retrieve the device pointer, we
-        * don't need them with devfs as filp->private_data is already
-        * initialized
-        */
-       filp->private_data = xgi_alloc_file_private();
-       if (filp->private_data == NULL)
-               return -ENOMEM;
-
-       XGI_INFO("filp->private_data %p\n", filp->private_data);
-       /*
-        * for control device, just jump to its open routine
-        * after setting up the private data
-        */
-       if (XGI_IS_CONTROL_DEVICE(inode))
-               return xgi_kern_ctl_open(inode, filp);
-
-       /* what device are we talking about? */
-       dev_num = XGI_DEVICE_NUMBER(inode);
-       if (dev_num >= XGI_MAX_DEVICES) {
-               xgi_free_file_private(filp->private_data);
-               filp->private_data = NULL;
-               return -ENODEV;
-       }
-
-       info = &xgi_devices[dev_num];
-
-       XGI_INFO("Jong-xgi_kern_open on device %d\n", dev_num);
-
-       xgi_down(info->info_sem);
-       XGI_CHECK_PCI_CONFIG(info);
-
-       XGI_INFO_FROM_FP(filp) = info;
-
-       /*
-        * map the memory and allocate isr on first open
-        */
-
-       if (!(info->flags & XGI_FLAG_OPEN)) {
-               XGI_INFO("info->flags & XGI_FLAG_OPEN \n");
-
-               if (info->dev->device == 0) {
-                       XGI_INFO("open of nonexistent device %d\n", dev_num);
-                       result = -ENXIO;
-                       goto failed;
-               }
-
-               /* initialize struct irqaction */
-               status = request_irq(info->dev->irq, xgi_kern_isr,
-                                    SA_INTERRUPT | SA_SHIRQ, "xgi",
-                                    (void *)info);
-               if (status != 0) {
-                       if (info->dev->irq && (status == -EBUSY)) {
-                               XGI_ERROR
-                                   ("Tried to get irq %d, but another driver",
-                                    (unsigned int)info->dev->irq);
-                               XGI_ERROR("has it and is not sharing it.\n");
-                       }
-                       XGI_ERROR("isr request failed 0x%x\n", status);
-                       result = -EIO;
-                       goto failed;
-               }
-
-               /*
-                * #define DECLARE_TASKLET(name, func, data) \
-                * struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
-                */
-               info->tasklet.func = xgi_kern_isr_bh;
-               info->tasklet.data = (unsigned long)info;
-               tasklet_enable(&info->tasklet);
-
-               /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */
-               xgi_cmdlist_initialize(info, 0x100000);
-
-               info->flags |= XGI_FLAG_OPEN;
-       }
-
-       XGI_ATOMIC_INC(info->use_count);
-
-      failed:
-       xgi_up(info->info_sem);
-
-       if ((result) && filp->private_data) {
-               xgi_free_file_private(filp->private_data);
-               filp->private_data = NULL;
-       }
-
-       return result;
-}
-
-int xgi_kern_release(struct inode *inode, struct file *filp)
-{
-       struct xgi_info *info = XGI_INFO_FROM_FP(filp);
-
-       XGI_CHECK_PCI_CONFIG(info);
-
-       /*
-        * for control device, just jump to its open routine
-        * after setting up the private data
-        */
-       if (XGI_IS_CONTROL_DEVICE(inode))
-               return xgi_kern_ctl_close(inode, filp);
-
-       XGI_INFO("Jong-xgi_kern_release on device %d\n",
-                XGI_DEVICE_NUMBER(inode));
-
-       xgi_down(info->info_sem);
-       if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) {
-
-               /*
-                * The usage count for this device has dropped to zero, it can be shut
-                * down safely; disable its interrupts.
-                */
-
-               /*
-                * Disable this device's tasklet to make sure that no bottom half will
-                * run with undefined device state.
-                */
-               tasklet_disable(&info->tasklet);
-
-               /*
-                * Free the IRQ, which may block until all pending interrupt processing
-                * has completed.
-                */
-               free_irq(info->dev->irq, (void *)info);
-
-               xgi_cmdlist_cleanup(info);
-
-               /* leave INIT flag alone so we don't reinit every time */
-               info->flags &= ~XGI_FLAG_OPEN;
-       }
-
-       xgi_up(info->info_sem);
-
-       if (FILE_PRIVATE(filp)) {
-               xgi_free_file_private(FILE_PRIVATE(filp));
-               FILE_PRIVATE(filp) = NULL;
-       }
-
-       return 0;
-}
-
-int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma)
-{
-       //struct inode        *inode = INODE_FROM_FP(filp);
-       struct xgi_info *info = XGI_INFO_FROM_FP(filp);
-       struct xgi_pcie_block *block;
-       int pages = 0;
-       unsigned long prot;
-
-       XGI_INFO("Jong-VM: mmap([0x%lx-0x%lx] off=0x%lx)\n",
-                vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma));
-
-       XGI_CHECK_PCI_CONFIG(info);
-
-       if (XGI_MASK_OFFSET(vma->vm_start)
-           || XGI_MASK_OFFSET(vma->vm_end)) {
-               XGI_ERROR("VM: bad mmap range: %lx - %lx\n",
-                         vma->vm_start, vma->vm_end);
-               return -ENXIO;
-       }
-
-       pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-
-       vma->vm_ops = &xgi_vm_ops;
-
-       /* XGI IO(reg) space */
-       if (IS_IO_OFFSET
-           (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) {
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-               if (XGI_REMAP_PAGE_RANGE(vma->vm_start,
-                                        XGI_VMA_OFFSET(vma),
-                                        vma->vm_end - vma->vm_start,
-                                        vma->vm_page_prot))
-                       return -EAGAIN;
-
-               /* mark it as IO so that we don't dump it on core dump */
-               vma->vm_flags |= VM_IO;
-               XGI_INFO("VM: mmap io space \n");
-       }
-       /* XGI fb space */
-       /* Jong 06/14/2006; moved behind PCIE or modify IS_FB_OFFSET */
-       else if (IS_FB_OFFSET
-                (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) {
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-               if (XGI_REMAP_PAGE_RANGE(vma->vm_start,
-                                        XGI_VMA_OFFSET(vma),
-                                        vma->vm_end - vma->vm_start,
-                                        vma->vm_page_prot))
-                       return -EAGAIN;
-
-               // mark it as IO so that we don't dump it on core dump
-               vma->vm_flags |= VM_IO;
-               XGI_INFO("VM: mmap fb space \n");
-       }
-       /* PCIE allocator */
-       /* XGI_VMA_OFFSET(vma) is offset based on pcie.base (HW address space) */
-       else if (IS_PCIE_OFFSET
-                (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) {
-               xgi_down(info->pcie_sem);
-
-               block = xgi_find_pcie_block(info, XGI_VMA_OFFSET(vma));
-
-               if (block == NULL) {
-                       XGI_ERROR("couldn't find pre-allocated PCIE memory!\n");
-                       xgi_up(info->pcie_sem);
-                       return -EAGAIN;
-               }
-
-               if (block->page_count != pages) {
-                       XGI_ERROR
-                           ("pre-allocated PCIE memory has wrong number of pages!\n");
-                       xgi_up(info->pcie_sem);
-                       return -EAGAIN;
-               }
-
-               vma->vm_private_data = block;
-               XGI_ATOMIC_INC(block->use_count);
-               xgi_up(info->pcie_sem);
-
-               /*
-                * prevent the swapper from swapping it out
-                * mark the memory i/o so the buffers aren't
-                * dumped on core dumps */
-               vma->vm_flags |= (VM_LOCKED | VM_IO);
-
-               /* un-cached */
-               prot = pgprot_val(vma->vm_page_prot);
-               /* 
-                  if (boot_cpu_data.x86 > 3)
-                  prot |= _PAGE_PCD | _PAGE_PWT;
-                */
-               vma->vm_page_prot = __pgprot(prot);
-
-               XGI_INFO("VM: mmap pcie space \n");
-       }
-#if 0
-       else if (IS_FB_OFFSET
-                (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) {
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-               if (XGI_REMAP_PAGE_RANGE(vma->vm_start,
-                                        XGI_VMA_OFFSET(vma),
-                                        vma->vm_end - vma->vm_start,
-                                        vma->vm_page_prot))
-                       return -EAGAIN;
-
-               // mark it as IO so that we don't dump it on core dump
-               vma->vm_flags |= VM_IO;
-               XGI_INFO("VM: mmap fb space \n");
-       }
-#endif
-       else {
-               vma->vm_flags |= (VM_IO | VM_LOCKED);
-               XGI_ERROR("VM: mmap wrong range \n");
-       }
-
-       vma->vm_file = filp;
-
-       return 0;
-}
-
-unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait)
-{
-       struct xgi_file_private *fp;
-       struct xgi_info *info;
-       unsigned int mask = 0;
-       unsigned long eflags;
-
-       info = XGI_INFO_FROM_FP(filp);
-
-       if (info->device_number == XGI_CONTROL_DEVICE_NUMBER)
-               return xgi_kern_ctl_poll(filp, wait);
-
-       fp = XGI_GET_FP(filp);
-
-       if (!(filp->f_flags & O_NONBLOCK)) {
-               /* add us to the list */
-               poll_wait(filp, &fp->wait_queue, wait);
-       }
-
-       xgi_lock_irqsave(fp->fp_lock, eflags);
-
-       /* wake the user on any event */
-       if (fp->num_events) {
-               XGI_INFO("Hey, an event occured!\n");
-               /*
-                * trigger the client, when they grab the event,
-                * we'll decrement the event count
-                */
-               mask |= (POLLPRI | POLLIN);
-       }
-       xgi_unlock_irqsave(fp->fp_lock, eflags);
-
-       return mask;
-}
-
-int xgi_kern_ioctl(struct inode *inode, struct file *filp,
-                  unsigned int cmd, unsigned long arg)
-{
-       struct xgi_info *info;
-       struct xgi_mem_alloc *alloc = NULL;
-
-       int status = 0;
-       void *arg_copy;
-       int arg_size;
-       int err = 0;
-
-       info = XGI_INFO_FROM_FP(filp);
-
-       XGI_INFO("Jong-ioctl(0x%x, 0x%x, 0x%lx, 0x%x)\n", _IOC_TYPE(cmd),
-                _IOC_NR(cmd), arg, _IOC_SIZE(cmd));
-       /*
-        * extract the type and number bitfields, and don't decode
-        * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
-        */
-       if (_IOC_TYPE(cmd) != XGI_IOCTL_MAGIC)
-               return -ENOTTY;
-       if (_IOC_NR(cmd) > XGI_IOCTL_MAXNR)
-               return -ENOTTY;
-
-       /*
-        * the direction is a bitmask, and VERIFY_WRITE catches R/W
-        * transfers. `Type' is user-oriented, while
-        * access_ok is kernel-oriented, so the concept of "read" and
-        * "write" is reversed
-        */
-       if (_IOC_DIR(cmd) & _IOC_READ) {
-               err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
-       } else if (_IOC_DIR(cmd) & _IOC_WRITE) {
-               err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
-       }
-       if (err)
-               return -EFAULT;
-
-       XGI_CHECK_PCI_CONFIG(info);
-
-       arg_size = _IOC_SIZE(cmd);
-       XGI_KMALLOC(arg_copy, arg_size);
-       if (arg_copy == NULL) {
-               XGI_ERROR("failed to allocate ioctl memory\n");
-               return -ENOMEM;
-       }
-
-       /* Jong 05/25/2006 */
-       /* copy_from_user(arg_copy, (void *)arg, arg_size); */
-       if (copy_from_user(arg_copy, (void *)arg, arg_size)) {
-               XGI_ERROR("failed to copyin ioctl data\n");
-               XGI_INFO("Jong-copy_from_user-fail! \n");
-       } else
-               XGI_INFO("Jong-copy_from_user-OK! \n");
-
-       alloc = (struct xgi_mem_alloc *) arg_copy;
-       XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg,
-                arg_size);
-
-       switch (_IOC_NR(cmd)) {
-       case XGI_ESC_POST_VBIOS:
-               XGI_INFO("Jong-xgi_ioctl_post_vbios \n");
-               break;
-       case XGI_ESC_FB_ALLOC:
-               XGI_INFO("Jong-xgi_ioctl_fb_alloc \n");
-               xgi_fb_alloc(info, alloc, 0);
-               break;
-       case XGI_ESC_FB_FREE:
-               XGI_INFO("Jong-xgi_ioctl_fb_free \n");
-               xgi_fb_free(info, *(unsigned long *)arg_copy);
-               break;
-       case XGI_ESC_PCIE_ALLOC:
-               XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n");
-               xgi_pcie_alloc(info, alloc, 0);
-               break;
-       case XGI_ESC_PCIE_FREE:
-               XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n",
-                        *((unsigned long *)arg_copy));
-               xgi_pcie_free(info, *((unsigned long *)arg_copy));
-               break;
-       case XGI_ESC_GE_RESET:
-               XGI_INFO("Jong-xgi_ioctl_ge_reset \n");
-               xgi_ge_reset(info);
-               break;
-       case XGI_ESC_DUMP_REGISTER:
-               XGI_INFO("Jong-xgi_ioctl_dump_register \n");
-               xgi_dump_register(info);
-               break;
-       case XGI_ESC_DEBUG_INFO:
-               XGI_INFO("Jong-xgi_ioctl_restore_registers \n");
-               xgi_restore_registers(info);
-               break;
-       case XGI_ESC_SUBMIT_CMDLIST:
-               XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n");
-               xgi_submit_cmdlist(info, (struct xgi_cmd_info *) arg_copy);
-               break;
-       case XGI_ESC_TEST_RWINKERNEL:
-               XGI_INFO("Jong-xgi_test_rwinkernel \n");
-               xgi_test_rwinkernel(info, *(unsigned long *)arg_copy);
-               break;
-       case XGI_ESC_STATE_CHANGE:
-               XGI_INFO("Jong-xgi_state_change \n");
-               xgi_state_change(info, (struct xgi_state_info *) arg_copy);
-               break;
-       default:
-               XGI_INFO("Jong-xgi_ioctl_default \n");
-               status = -EINVAL;
-               break;
-       }
-
-       if (copy_to_user((void *)arg, arg_copy, arg_size)) {
-               XGI_ERROR("failed to copyout ioctl data\n");
-               XGI_INFO("Jong-copy_to_user-fail! \n");
-       } else
-               XGI_INFO("Jong-copy_to_user-OK! \n");
-
-       XGI_KFREE(arg_copy, arg_size);
-       return status;
-}
-
-/*
- * xgi control driver operations defined here
- */
-int xgi_kern_ctl_open(struct inode *inode, struct file *filp)
-{
-       struct xgi_info *info = &xgi_ctl_device;
-
-       int rc = 0;
-
-       XGI_INFO("Jong-xgi_kern_ctl_open\n");
-
-       xgi_down(info->info_sem);
-       info->device_number = XGI_CONTROL_DEVICE_NUMBER;
-
-       /* save the xgi info in file->private_data */
-       filp->private_data = info;
-
-       if (XGI_ATOMIC_READ(info->use_count) == 0) {
-               init_waitqueue_head(&xgi_ctl_waitqueue);
-       }
-
-       info->flags |= XGI_FLAG_OPEN + XGI_FLAG_CONTROL;
-
-       XGI_ATOMIC_INC(info->use_count);
-       xgi_up(info->info_sem);
-
-       return rc;
-}
-
-int xgi_kern_ctl_close(struct inode *inode, struct file *filp)
-{
-       struct xgi_info *info = XGI_INFO_FROM_FP(filp);
-
-       XGI_INFO("Jong-xgi_kern_ctl_close\n");
-
-       xgi_down(info->info_sem);
-       if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) {
-               info->flags = 0;
-       }
-       xgi_up(info->info_sem);
-
-       if (FILE_PRIVATE(filp)) {
-               xgi_free_file_private(FILE_PRIVATE(filp));
-               FILE_PRIVATE(filp) = NULL;
-       }
-
-       return 0;
-}
-
-unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table * wait)
-{
-       //struct xgi_info  *info = XGI_INFO_FROM_FP(filp);;
-       unsigned int ret = 0;
-
-       if (!(filp->f_flags & O_NONBLOCK)) {
-               poll_wait(filp, &xgi_ctl_waitqueue, wait);
-       }
-
-       return ret;
-}
-
-/*
- * xgi proc system
- */
-static u8 xgi_find_pcie_capability(struct pci_dev *dev)
-{
-       u16 status;
-       u8 cap_ptr, cap_id;
-
-       pci_read_config_word(dev, PCI_STATUS, &status);
-       status &= PCI_STATUS_CAP_LIST;
-       if (!status)
-               return 0;
-
-       switch (dev->hdr_type) {
-       case PCI_HEADER_TYPE_NORMAL:
-       case PCI_HEADER_TYPE_BRIDGE:
-               pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr);
-               break;
-       default:
-               return 0;
+       /* Init the resource manager */
+       err = xgi_pcie_heap_init(info);
+       if (err) {
+               DRM_ERROR("xgi_pcie_heap_init() failed\n");
+               return err;
        }
 
-       do {
-               cap_ptr &= 0xFC;
-               pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id);
-               pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT,
-                                    &cap_ptr);
-       } while (cap_ptr && cap_id != 0xFF);
-
-       return 0;
-}
-
-int xgi_kern_read_card_info(char *page, char **start, off_t off,
-                           int count, int *eof, void *data)
-{
-       struct pci_dev *dev;
-       char *type;
-       int len = 0;
-
-       struct xgi_info *info;
-       info = (struct xgi_info *) data;
-
-       dev = info->dev;
-       if (!dev)
-               return 0;
-
-       type = xgi_find_pcie_capability(dev) ? "PCIE" : "PCI";
-       len += sprintf(page + len, "Card Type: \t %s\n", type);
-
-       XGI_PCI_DEV_PUT(dev);
-       return len;
-}
-
-int xgi_kern_read_version(char *page, char **start, off_t off,
-                         int count, int *eof, void *data)
-{
-       int len = 0;
-
-       len += sprintf(page + len, "XGI version: %s\n", "1.0");
-       len += sprintf(page + len, "GCC version:  %s\n", "3.0");
+       /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */
+       xgi_cmdlist_initialize(info, 0x100000);
 
-       return len;
-}
-
-int xgi_kern_read_pcie_info(char *page, char **start, off_t off,
-                           int count, int *eof, void *data)
-{
+       info->bootstrap_done = 1;
        return 0;
 }
 
-int xgi_kern_read_status(char *page, char **start, off_t off,
-                        int count, int *eof, void *data)
-{
-       return 0;
-}
 
-static void xgi_proc_create(void)
+void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp)
 {
-#ifdef CONFIG_PROC_FS
-
-       struct pci_dev *dev;
-       int i = 0;
-       char name[6];
-
-       struct proc_dir_entry *entry;
-       struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards;
-
-       struct xgi_info *info;
-       struct xgi_info *xgi_max_devices;
-
-       /* world readable directory */
-       int flags = S_IFDIR | S_IRUGO | S_IXUGO;
-
-       proc_xgi = create_proc_entry("xgi", flags, proc_root_driver);
-       if (!proc_xgi)
-               goto failed;
-
-       proc_xgi_cards = create_proc_entry("cards", flags, proc_xgi);
-       if (!proc_xgi_cards)
-               goto failed;
-
-       proc_xgi_pcie = create_proc_entry("pcie", flags, proc_xgi);
-       if (!proc_xgi_pcie)
-               goto failed;
-
-       /*
-        * Set the module owner to ensure that the reference
-        * count reflects accesses to the proc files.
-        */
-       proc_xgi->owner = THIS_MODULE;
-       proc_xgi_cards->owner = THIS_MODULE;
-       proc_xgi_pcie->owner = THIS_MODULE;
-
-       xgi_max_devices = xgi_devices + XGI_MAX_DEVICES;
-       for (info = xgi_devices; info < xgi_max_devices; info++) {
-               /* world readable file */
-               flags = S_IFREG | S_IRUGO;
-
-               dev = info->dev;
-               if (!dev)
-                       break;
-
-               sprintf(name, "%d", i++);
-               entry = create_proc_entry(name, flags, proc_xgi_cards);
-               if (!entry) {
-                       XGI_PCI_DEV_PUT(dev);
-                       goto failed;
-               }
-
-               entry->data = info;
-               entry->read_proc = xgi_kern_read_card_info;
-               entry->owner = THIS_MODULE;
-
-               if (xgi_find_pcie_capability(dev)) {
-                       entry =
-                           create_proc_entry("status", flags, proc_xgi_pcie);
-                       if (!entry) {
-                               XGI_PCI_DEV_PUT(dev);
-                               goto failed;
-                       }
-
-                       entry->data = info;
-                       entry->read_proc = xgi_kern_read_status;
-                       entry->owner = THIS_MODULE;
-
-                       entry = create_proc_entry("card", flags, proc_xgi_pcie);
-                       if (!entry) {
-                               XGI_PCI_DEV_PUT(dev);
-                               goto failed;
-                       }
-
-                       entry->data = info;
-                       entry->read_proc = xgi_kern_read_pcie_info;
-                       entry->owner = THIS_MODULE;
-               }
-
-               XGI_PCI_DEV_PUT(dev);
-       }
-
-       entry = create_proc_entry("version", flags, proc_xgi);
-       if (!entry)
-               goto failed;
-
-       entry->read_proc = xgi_kern_read_version;
-       entry->owner = THIS_MODULE;
-
-       entry = create_proc_entry("host-bridge", flags, proc_xgi_pcie);
-       if (!entry)
-               goto failed;
+       struct xgi_info * info = dev->dev_private;
 
-       entry->data = NULL;
-       entry->read_proc = xgi_kern_read_pcie_info;
-       entry->owner = THIS_MODULE;
-
-       return;
-
-      failed:
-       XGI_ERROR("failed to create /proc entries!\n");
-       xgi_proc_remove_all(proc_xgi);
-#endif
+       xgi_pcie_free_all(info, filp);
+       xgi_fb_free_all(info, filp);
 }
 
-#ifdef CONFIG_PROC_FS
-static void xgi_proc_remove_all(struct proc_dir_entry *entry)
-{
-       while (entry) {
-               struct proc_dir_entry *next = entry->next;
-               if (entry->subdir)
-                       xgi_proc_remove_all(entry->subdir);
-               remove_proc_entry(entry->name, entry->parent);
-               if (entry == proc_xgi)
-                       break;
-               entry = next;
-       }
-}
-#endif
-
-static void xgi_proc_remove(void)
-{
-#ifdef CONFIG_PROC_FS
-       xgi_proc_remove_all(proc_xgi);
-#endif
-}
 
 /*
  * driver receives an interrupt if someone waiting, then hand it off.
  */
-irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs)
+irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
 {
-       struct xgi_info *info = (struct xgi_info *) dev_id;
+       struct drm_device *dev = (struct drm_device *) arg;
+//     struct xgi_info *info = dev->dev_private;
        u32 need_to_run_bottom_half = 0;
 
-       //XGI_INFO("xgi_kern_isr \n");
+       //DRM_INFO("xgi_kern_isr \n");
 
        //XGI_CHECK_PCI_CONFIG(info);
 
        //xgi_dvi_irq_handler(info);
 
        if (need_to_run_bottom_half) {
-               tasklet_schedule(&info->tasklet);
+               drm_locked_tasklet(dev, xgi_kern_isr_bh);
        }
 
        return IRQ_HANDLED;
 }
 
-void xgi_kern_isr_bh(unsigned long data)
+void xgi_kern_isr_bh(struct drm_device *dev)
 {
-       struct xgi_info *info = (struct xgi_info *) data;
+       struct xgi_info *info = dev->dev_private;
 
-       XGI_INFO("xgi_kern_isr_bh \n");
+       DRM_INFO("xgi_kern_isr_bh \n");
 
        //xgi_dvi_irq_handler(info);
 
        XGI_CHECK_PCI_CONFIG(info);
 }
 
-static void xgi_lock_init(struct xgi_info * info)
+int xgi_driver_load(struct drm_device *dev, unsigned long flags)
 {
-       if (info == NULL)
-               return;
-
-       spin_lock_init(&info->info_lock);
-
-       sema_init(&info->info_sem, 1);
-       sema_init(&info->fb_sem, 1);
-       sema_init(&info->pcie_sem, 1);
-
-       XGI_ATOMIC_SET(info->use_count, 0);
-}
+       struct xgi_info *info;
+       int err;
 
-static void xgi_dev_init(struct xgi_info * info)
-{
-       struct pci_dev *pdev = NULL;
-       struct xgi_dev *dev;
-       int found = 0;
-       u16 pci_cmd;
 
-       XGI_INFO("Enter xgi_dev_init \n");
+       info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER);
+       if (!info)
+               return DRM_ERR(ENOMEM);
 
-       //XGI_PCI_FOR_EACH_DEV(pdev)
-       {
-               for (dev = xgidev_list; dev->vendor; dev++) {
-                       if ((dev->vendor == pdev->vendor)
-                           && (dev->device == pdev->device)) {
-                               u8 rev_id;
+       (void) memset(info, 0, sizeof(*info));
+       dev->dev_private = info;
+       info->dev = dev;
 
-                               XGI_INFO("dev->vendor = pdev->vendor= %x \n",
-                                        dev->vendor);
-                               XGI_INFO("dev->device = pdev->device= %x \n",
-                                        dev->device);
+       sema_init(&info->fb_sem, 1);
+       sema_init(&info->pcie_sem, 1);
 
-                               xgi_devices[found].dev = pdev;
+       info->mmio.base = drm_get_resource_start(dev, 1);
+       info->mmio.size = drm_get_resource_len(dev, 1);
 
-                               pci_read_config_byte(pdev, PCI_REVISION_ID,
-                                                    rev_id);
+       DRM_INFO("mmio base: 0x%lx, size: 0x%x\n",
+                (unsigned long) info->mmio.base, info->mmio.size);
 
-                               XGI_INFO("PCI_REVISION_ID= %x \n", rev_id);
 
-                               pci_read_config_word(pdev, PCI_COMMAND,
-                                                    &pci_cmd);
+       if ((info->mmio.base == 0) || (info->mmio.size == 0)) {
+               DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n",
+                         (unsigned long) info->mmio.base, info->mmio.size);
+               return DRM_ERR(EINVAL);
+       }
 
-                               XGI_INFO("PCI_COMMAND = %x \n", pci_cmd);
 
-                               break;
-                       }
-               }
+       err = drm_addmap(dev, info->mmio.base, info->mmio.size,
+                        _DRM_REGISTERS, _DRM_KERNEL | _DRM_READ_ONLY,
+                        &info->mmio_map);
+       if (err) {
+               DRM_ERROR("Unable to map MMIO region: %d\n", err);
+               return err;
        }
-}
 
-/*
- * Export to Linux Kernel
- */
+       xgi_enable_mmio(info);
+       //xgi_enable_ge(info);
 
-static int __init xgi_init_module(void)
-{
-       struct xgi_info *info = &xgi_devices[xgi_num_devices];
-       int i, result;
+       info->fb.base = drm_get_resource_start(dev, 0);
+       info->fb.size = drm_get_resource_len(dev, 0);
 
-       XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION);
-       //SET_MODULE_OWNER(&xgi_fops);
+       DRM_INFO("fb   base: 0x%lx, size: 0x%x\n",
+                (unsigned long) info->fb.base, info->fb.size);
 
-       memset(xgi_devices, 0, sizeof(xgi_devices));
+       info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024;
 
-       if (pci_register_driver(&xgi_pci_driver) < 0) {
-               pci_unregister_driver(&xgi_pci_driver);
-               XGI_ERROR("no XGI graphics adapter found\n");
-               return -ENODEV;
-       }
+       DRM_INFO("fb   base: 0x%lx, size: 0x%x (probed)\n",
+                (unsigned long) info->fb.base, info->fb.size);
 
-       XGI_INFO("Jong-xgi_devices[%d].fb.base.: 0x%lx \n", xgi_num_devices,
-                xgi_devices[xgi_num_devices].fb.base);
-       XGI_INFO("Jong-xgi_devices[%d].fb.size.: 0x%lx \n", xgi_num_devices,
-                xgi_devices[xgi_num_devices].fb.size);
 
-/* Jong 07/27/2006; test for ubuntu */
-/*
-#ifdef CONFIG_DEVFS_FS
-
-    XGI_INFO("Jong-Use devfs \n");
-    do
-    {
-        xgi_devfs_handles[0] = XGI_DEVFS_REGISTER("xgi", 0);
-        if (xgi_devfs_handles[0] == NULL)
-        {
-            result = -ENOMEM;
-            XGI_ERROR("devfs register failed\n");
-            goto failed;
-        }
-    } while(0);
-       #else *//* no devfs, do it the "classic" way  */
-
-       XGI_INFO("Jong-Use non-devfs \n");
-       /*
-        * Register your major, and accept a dynamic number. This is the
-        * first thing to do, in order to avoid releasing other module's
-        * fops in scull_cleanup_module()
-        */
-       result = XGI_REGISTER_CHRDEV(xgi_major, "xgi", &xgi_fops);
-       if (result < 0) {
-               XGI_ERROR("register chrdev failed\n");
-               pci_unregister_driver(&xgi_pci_driver);
-               return result;
+       if ((info->fb.base == 0) || (info->fb.size == 0)) {
+               DRM_ERROR("frame buffer appears to be wrong: 0x%lx 0x%x\n",
+                         (unsigned long) info->fb.base, info->fb.size);
+               return DRM_ERR(EINVAL);
        }
-       if (xgi_major == 0)
-               xgi_major = result;     /* dynamic */
 
-       /* #endif *//* CONFIG_DEVFS_FS */
 
-       XGI_INFO("Jong-major number %d\n", xgi_major);
 
-       /* instantiate tasklets */
-       for (i = 0; i < XGI_MAX_DEVICES; i++) {
-               /*
-                * We keep one tasklet per card to avoid latency issues with more
-                * than one device; no two instances of a single tasklet are ever
-                * executed concurrently.
-                */
-               XGI_ATOMIC_SET(xgi_devices[i].tasklet.count, 1);
+       xgi_mem_block_cache = kmem_cache_create("xgi_mem_block",
+                                               sizeof(struct xgi_mem_block),
+                                               0,
+                                               SLAB_HWCACHE_ALIGN,
+                                               NULL, NULL);
+       if (xgi_mem_block_cache == NULL) {
+               return DRM_ERR(ENOMEM);
        }
 
-       /* init the xgi control device */
-       {
-               struct xgi_info *info_ctl = &xgi_ctl_device;
-               xgi_lock_init(info_ctl);
-       }
 
        /* Init the resource manager */
-       INIT_LIST_HEAD(&xgi_mempid_list);
-       if (!xgi_fb_heap_init(info)) {
-               XGI_ERROR("xgi_fb_heap_init() failed\n");
-               result = -EIO;
-               goto failed;
+       err = xgi_fb_heap_init(info);
+       if (err) {
+               DRM_ERROR("xgi_fb_heap_init() failed\n");
+               return err;
        }
 
-       /* Init the resource manager */
-       if (!xgi_pcie_heap_init(info)) {
-               XGI_ERROR("xgi_pcie_heap_init() failed\n");
-               result = -EIO;
-               goto failed;
-       }
-
-       /* create /proc/driver/xgi */
-       xgi_proc_create();
-
-#if defined(DEBUG)
-       inter_module_register("xgi_devices", THIS_MODULE, xgi_devices);
-#endif
-
        return 0;
-
-      failed:
-#ifdef CONFIG_DEVFS_FS
-       XGI_DEVFS_REMOVE_CONTROL();
-       XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices);
-#endif
-
-       if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0)
-               XGI_ERROR("unregister xgi chrdev failed\n");
-
-       for (i = 0; i < xgi_num_devices; i++) {
-               if (xgi_devices[i].dev) {
-                       release_mem_region(xgi_devices[i].fb.base,
-                                          xgi_devices[i].fb.size);
-                       release_mem_region(xgi_devices[i].mmio.base,
-                                          xgi_devices[i].mmio.size);
-               }
-       }
-
-       pci_unregister_driver(&xgi_pci_driver);
-       return result;
-
-       return 1;
 }
 
-void __exit xgi_exit_module(void)
+int xgi_driver_unload(struct drm_device *dev)
 {
-       int i;
-
-#ifdef CONFIG_DEVFS_FS
-       XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices);
-#endif
-
-       if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0)
-               XGI_ERROR("unregister xgi chrdev failed\n");
-
-       XGI_INFO("Jong-unregister xgi chrdev scceeded\n");
-       for (i = 0; i < XGI_MAX_DEVICES; i++) {
-               if (xgi_devices[i].dev) {
-                       /* clean up the flush2D batch array */
-                       xgi_cmdlist_cleanup(&xgi_devices[i]);
-
-                       if (xgi_devices[i].fb.vbase != NULL) {
-                               iounmap(xgi_devices[i].fb.vbase);
-                               xgi_devices[i].fb.vbase = NULL;
-                       }
-                       if (xgi_devices[i].mmio.vbase != NULL) {
-                               iounmap(xgi_devices[i].mmio.vbase);
-                               xgi_devices[i].mmio.vbase = NULL;
-                       }
-                       //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size);
-                       //XGI_INFO("release frame buffer mem region scceeded\n");
-
-                       release_mem_region(xgi_devices[i].mmio.base,
-                                          xgi_devices[i].mmio.size);
-                       XGI_INFO("release MMIO mem region scceeded\n");
-
-                       xgi_fb_heap_cleanup(&xgi_devices[i]);
-                       XGI_INFO("xgi_fb_heap_cleanup scceeded\n");
-
-                       xgi_pcie_heap_cleanup(&xgi_devices[i]);
-                       XGI_INFO("xgi_pcie_heap_cleanup scceeded\n");
-
-                       XGI_PCI_DISABLE_DEVICE(xgi_devices[i].dev);
-               }
-       }
-
-       pci_unregister_driver(&xgi_pci_driver);
-
-       /* remove /proc/driver/xgi */
-       xgi_proc_remove();
+       struct xgi_info * info = dev->dev_private;
 
-#if defined(DEBUG)
-       inter_module_unregister("xgi_devices");
-#endif
-}
+       xgi_cmdlist_cleanup(info);
+       if (info->fb_map != NULL) {
+               drm_rmmap(info->dev, info->fb_map);
+       }
 
-module_init(xgi_init_module);
-module_exit(xgi_exit_module);
+       if (info->mmio_map != NULL) {
+               drm_rmmap(info->dev, info->mmio_map);
+       }
 
-#if defined(XGI_PM_SUPPORT_ACPI)
-int xgi_acpi_event(struct pci_dev *dev, u32 state)
-{
-       return 1;
-}
+       xgi_mem_heap_cleanup(&info->fb_heap);
+       xgi_mem_heap_cleanup(&info->pcie_heap);
+       xgi_pcie_lut_cleanup(info);
 
-int xgi_kern_acpi_standby(struct pci_dev *dev, u32 state)
-{
-       return 1;
-}
+       if (xgi_mem_block_cache) {
+               kmem_cache_destroy(xgi_mem_block_cache);
+               xgi_mem_block_cache = NULL;
+       }
 
-int xgi_kern_acpi_resume(struct pci_dev *dev)
-{
-       return 1;
+       return 0;
 }
-#endif
-
-MODULE_AUTHOR("Andrea Zhang <andrea_zhang@macrosynergy.com>");
-MODULE_DESCRIPTION("xgi kernel driver for xgi cards");
-MODULE_LICENSE("GPL");
index 382bb7a..2096587 100644 (file)
 #ifndef _XGI_DRV_H_
 #define _XGI_DRV_H_
 
-#include "xgi_drm.h"
-
-#define XGI_MAJOR_VERSION   0
-#define XGI_MINOR_VERSION   7
-#define XGI_PATCHLEVEL      5
-
-#define XGI_DRV_VERSION     "0.7.5"
-
-#ifndef XGI_DRV_NAME
-#define XGI_DRV_NAME        "xgi"
-#endif
-
-/*
- * xgi reserved major device number, Set this to 0 to
- * request dynamic major number allocation.
- */
-#ifndef XGI_DEV_MAJOR
-#define XGI_DEV_MAJOR   0
-#endif
-
-#ifndef XGI_MAX_DEVICES
-#define XGI_MAX_DEVICES 1
-#endif
-
-/* Jong 06/06/2006 */
-/* #define XGI_DEBUG */
+#include "drmP.h"
+#include "drm.h"
 
-#ifndef PCI_VENDOR_ID_XGI
-/*
-#define PCI_VENDOR_ID_XGI       0x1023
-*/
-#define PCI_VENDOR_ID_XGI       0x18CA
+#define DRIVER_AUTHOR          "Andrea Zhang <andrea_zhang@macrosynergy.com>"
 
-#endif
-
-#ifndef PCI_DEVICE_ID_XP5
-#define PCI_DEVICE_ID_XP5       0x2200
-#endif
-
-#ifndef PCI_DEVICE_ID_XG47
-#define PCI_DEVICE_ID_XG47      0x0047
-#endif
+#define DRIVER_NAME            "xgi"
+#define DRIVER_DESC            "XGI XP5 / XP10 / XG47"
+#define DRIVER_DATE            "20070710"
 
-/* Macros to make printk easier */
-#define XGI_ERROR(fmt, arg...) \
-    printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg)
+#define DRIVER_MAJOR           0
+#define DRIVER_MINOR           8
+#define DRIVER_PATCHLEVEL      0
 
-#define XGI_MEM_ERROR(area, fmt, arg...) \
-    printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg)
+#include "xgi_drm.h"
 
-/* #define XGI_DEBUG */
+struct xgi_aperture {
+       dma_addr_t base;
+       unsigned int size;
+};
 
-#ifdef XGI_DEBUG
-#define XGI_INFO(fmt, arg...) \
-    printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg)
-/*    printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */
-#else
-#define XGI_INFO(fmt, arg...)   do { } while (0)
-#endif
+struct xgi_mem_block {
+       struct list_head list;
+       unsigned long offset;
+       unsigned long size;
+       DRMFILE filp;
 
-/* device name length; must be atleast 8 */
-#define XGI_DEVICE_NAME_LENGTH      40
+       unsigned int owner;
+};
 
-/* need a fake device number for control device; just to flag it for msgs */
-#define XGI_CONTROL_DEVICE_NUMBER   100
+struct xgi_mem_heap {
+       struct list_head free_list;
+       struct list_head used_list;
+       struct list_head sort_list;
+       unsigned long max_freesize;
 
-struct xgi_aperture {
-       unsigned long base;
-       unsigned int size;
-       void *vbase;
+       bool initialized;
 };
 
 struct xgi_info {
-       struct pci_dev *dev;
-       int flags;
-       int device_number;
+       struct drm_device *dev;
+
+       bool bootstrap_done;
 
        /* physical characteristics */
        struct xgi_aperture mmio;
        struct xgi_aperture fb;
        struct xgi_aperture pcie;
 
+       struct drm_map *mmio_map;
+       struct drm_map *pcie_map;
+       struct drm_map *fb_map;
+
        /* look up table parameters */
-       u32 *lut_base;
+       struct drm_dma_handle *lut_handle;
        unsigned int lutPageSize;
-       unsigned int lutPageOrder;
-       bool isLUTInLFB;
-       unsigned int sdfbPageSize;
-
-       u32 pcie_config;
-       u32 pcie_status;
-
-       atomic_t use_count;
 
-       /* keep track of any pending bottom halfes */
-       struct tasklet_struct tasklet;
+       struct xgi_mem_heap fb_heap;
+       struct xgi_mem_heap pcie_heap;
 
-       spinlock_t info_lock;
-
-       struct semaphore info_sem;
        struct semaphore fb_sem;
        struct semaphore pcie_sem;
 };
 
-struct xgi_ioctl_post_vbios {
-       unsigned int bus;
-       unsigned int slot;
-};
-
 enum PcieOwner {
        PCIE_2D = 0,
        /*
@@ -151,64 +105,47 @@ enum PcieOwner {
        PCIE_INVALID = 0x7fffffff
 };
 
-struct xgi_mem_pid {
-       struct list_head list;
-       enum xgi_mem_location location;
-       unsigned long bus_addr;
-       unsigned long pid;
-};
-
-
-/*
- * flags
- */
-#define XGI_FLAG_OPEN            0x0001
-#define XGI_FLAG_NEEDS_POSTING   0x0002
-#define XGI_FLAG_WAS_POSTED      0x0004
-#define XGI_FLAG_CONTROL         0x0010
-#define XGI_FLAG_MAP_REGS_EARLY  0x0200
-
-/* mmap(2) offsets */
-
-#define IS_IO_OFFSET(info, offset, length) \
-            (((offset) >= (info)->mmio.base) \
-            && (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size))
-
-/* Jong 06/14/2006 */
-/* (info)->fb.base is a base address for physical (bus) address space */
-/* what's the definition of offest? on  physical (bus) address space or HW address space */
-/* Jong 06/15/2006; use HW address space */
-#define IS_FB_OFFSET(info, offset, length) \
-            (((offset) >= 0) \
-            && (((offset) + (length)) <= (info)->fb.size))
-#if 0
-#define IS_FB_OFFSET(info, offset, length) \
-            (((offset) >= (info)->fb.base) \
-            && (((offset) + (length)) <= (info)->fb.base + (info)->fb.size))
-#endif
 
-#define IS_PCIE_OFFSET(info, offset, length) \
-            (((offset) >= (info)->pcie.base) \
-            && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size))
+extern struct kmem_cache *xgi_mem_block_cache;
+extern struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap,
+       unsigned long size, enum PcieOwner owner);
+extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset,
+       DRMFILE filp);
+extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start,
+       unsigned int end);
+extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap);
 
 extern int xgi_fb_heap_init(struct xgi_info * info);
-extern void xgi_fb_heap_cleanup(struct xgi_info * info);
 
-extern void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
-                        pid_t pid);
-extern void xgi_fb_free(struct xgi_info * info, unsigned long offset);
-extern void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt);
+extern int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
+       DRMFILE filp);
+
+extern int xgi_fb_free(struct xgi_info * info, unsigned long offset,
+       DRMFILE filp);
 
 extern int xgi_pcie_heap_init(struct xgi_info * info);
-extern void xgi_pcie_heap_cleanup(struct xgi_info * info);
+extern void xgi_pcie_lut_cleanup(struct xgi_info * info);
+
+extern int xgi_pcie_alloc(struct xgi_info * info,
+                         struct xgi_mem_alloc * alloc, DRMFILE filp);
+
+extern int xgi_pcie_free(struct xgi_info * info, unsigned long offset,
+       DRMFILE filp);
+
+extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address);
 
-extern void xgi_pcie_alloc(struct xgi_info * info, 
-                          struct xgi_mem_alloc * alloc, pid_t pid);
-extern void xgi_pcie_free(struct xgi_info * info, unsigned long offset);
-extern struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info,
-                                                   unsigned long address);
-extern void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address);
+extern void xgi_pcie_free_all(struct xgi_info *, DRMFILE);
+extern void xgi_fb_free_all(struct xgi_info *, DRMFILE);
 
-extern void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address);
+extern int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_fb_free_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_dump_register_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_state_change_ioctl(DRM_IOCTL_ARGS);
 
 #endif
index 7d390d4..ce68984 100644 (file)
  * DEALINGS IN THE SOFTWARE.                                                                                           
  ***************************************************************************/
 
-#include "xgi_linux.h"
 #include "xgi_drv.h"
-#include "xgi_fb.h"
 
 #define XGI_FB_HEAP_START 0x1000000
 
-static struct xgi_mem_heap *xgi_fb_heap;
-static struct kmem_cache *xgi_fb_cache_block = NULL;
-extern struct list_head xgi_mempid_list;
+struct kmem_cache *xgi_mem_block_cache = NULL;
 
 static struct xgi_mem_block *xgi_mem_new_node(void);
-static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long size);
-static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset);
 
-void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, 
-                 pid_t pid)
-{
-       struct xgi_mem_block *block;
-       struct xgi_mem_pid *mempid_block;
-
-       if (alloc->is_front) {
-               alloc->location = XGI_MEMLOC_LOCAL;
-               alloc->bus_addr = info->fb.base;
-               alloc->hw_addr = 0;
-               XGI_INFO
-                   ("Video RAM allocation on front buffer successfully! \n");
-       } else {
-               xgi_down(info->fb_sem);
-               block = xgi_mem_alloc(info, alloc->size);
-               xgi_up(info->fb_sem);
-
-               if (block == NULL) {
-                       alloc->location = XGI_MEMLOC_LOCAL;
-                       alloc->size = 0;
-                       alloc->bus_addr = 0;
-                       alloc->hw_addr = 0;
-                       XGI_ERROR("Video RAM allocation failed\n");
-               } else {
-                       XGI_INFO("Video RAM allocation succeeded: 0x%p\n",
-                                (char *)block->offset);
-                       alloc->location = XGI_MEMLOC_LOCAL;
-                       alloc->size = block->size;
-                       alloc->bus_addr = info->fb.base + block->offset;
-                       alloc->hw_addr = block->offset;
-
-                       /* manage mempid */
-                       mempid_block =
-                           kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL);
-                       mempid_block->location = XGI_MEMLOC_LOCAL;
-                       mempid_block->bus_addr = alloc->bus_addr;
-                       mempid_block->pid = pid;
-
-                       if (!mempid_block)
-                               XGI_ERROR("mempid_block alloc failed\n");
-
-                       XGI_INFO
-                           ("Memory ProcessID add one fb block pid:%ld successfully! \n",
-                            mempid_block->pid);
-                       list_add(&mempid_block->list, &xgi_mempid_list);
-               }
-       }
-}
 
-void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr)
+int xgi_mem_heap_init(struct xgi_mem_heap *heap, unsigned int start,
+                     unsigned int end)
 {
        struct xgi_mem_block *block;
-       unsigned long offset = bus_addr - info->fb.base;
-       struct xgi_mem_pid *mempid_block;
-       struct xgi_mem_pid *mempid_freeblock = NULL;
-
-       if (offset < 0) {
-               XGI_INFO("free onscreen frame buffer successfully !\n");
-       } else {
-               xgi_down(info->fb_sem);
-               block = xgi_mem_free(info, offset);
-               xgi_up(info->fb_sem);
-
-               if (block == NULL) {
-                       XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n",
-                                 offset);
-               }
-
-               /* manage mempid */
-               list_for_each_entry(mempid_block, &xgi_mempid_list, list) {
-                       if (mempid_block->location == XGI_MEMLOC_LOCAL
-                           && mempid_block->bus_addr == bus_addr) {
-                               mempid_freeblock = mempid_block;
-                               break;
-                       }
-               }
-               if (mempid_freeblock) {
-                       list_del(&mempid_freeblock->list);
-                       XGI_INFO
-                           ("Memory ProcessID delete one fb block pid:%ld successfully! \n",
-                            mempid_freeblock->pid);
-                       kfree(mempid_freeblock);
-               }
-       }
-}
-
-int xgi_fb_heap_init(struct xgi_info * info)
-{
-       struct xgi_mem_block *block;
-
-       xgi_fb_heap = kmalloc(sizeof(struct xgi_mem_heap), GFP_KERNEL);
-       if (!xgi_fb_heap) {
-               XGI_ERROR("xgi_fb_heap alloc failed\n");
-               return 0;
-       }
-
-       INIT_LIST_HEAD(&xgi_fb_heap->free_list);
-       INIT_LIST_HEAD(&xgi_fb_heap->used_list);
-       INIT_LIST_HEAD(&xgi_fb_heap->sort_list);
 
-       xgi_fb_cache_block =
-           kmem_cache_create("xgi_fb_block", sizeof(struct xgi_mem_block), 0,
-                             SLAB_HWCACHE_ALIGN, NULL, NULL);
+       INIT_LIST_HEAD(&heap->free_list);
+       INIT_LIST_HEAD(&heap->used_list);
+       INIT_LIST_HEAD(&heap->sort_list);
+       heap->initialized = TRUE;
 
-       if (NULL == xgi_fb_cache_block) {
-               XGI_ERROR("Fail to creat xgi_fb_block\n");
-               goto fail1;
-       }
-
-       block =
-           (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block,
-                                                GFP_KERNEL);
+       block = kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL);
        if (!block) {
-               XGI_ERROR("kmem_cache_alloc failed\n");
-               goto fail2;
+               return DRM_ERR(ENOMEM);
        }
 
-       block->offset = XGI_FB_HEAP_START;
-       block->size = info->fb.size - XGI_FB_HEAP_START;
-
-       list_add(&block->list, &xgi_fb_heap->free_list);
+       block->offset = start;
+       block->size = end - start;
 
-       xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START;
+       list_add(&block->list, &heap->free_list);
 
-       XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset,
-                block->size);
-       XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n",
-                xgi_fb_heap->max_freesize);
+       heap->max_freesize = end - start;
 
-       return 1;
-
-      fail2:
-       if (xgi_fb_cache_block) {
-               kmem_cache_destroy(xgi_fb_cache_block);
-               xgi_fb_cache_block = NULL;
-       }
-      fail1:
-       if (xgi_fb_heap) {
-               kfree(xgi_fb_heap);
-               xgi_fb_heap = NULL;
-       }
        return 0;
 }
 
-void xgi_fb_heap_cleanup(struct xgi_info * info)
+
+void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap)
 {
        struct list_head *free_list;
        struct xgi_mem_block *block;
        struct xgi_mem_block *next;
        int i;
 
-       if (xgi_fb_heap) {
-               free_list = &xgi_fb_heap->free_list;
-               for (i = 0; i < 3; i++, free_list++) {
-                       list_for_each_entry_safe(block, next, free_list, list) {
-                               XGI_INFO
-                                   ("No. %d block->offset: 0x%lx block->size: 0x%lx \n",
-                                    i, block->offset, block->size);
-                               //XGI_INFO("No. %d free block: 0x%p \n", i, block);
-                               kmem_cache_free(xgi_fb_cache_block, block);
-                               block = NULL;
-                       }
+       free_list = &heap->free_list;
+       for (i = 0; i < 3; i++, free_list++) {
+               list_for_each_entry_safe(block, next, free_list, list) {
+                       DRM_INFO
+                               ("No. %d block->offset: 0x%lx block->size: 0x%lx \n",
+                                i, block->offset, block->size);
+                       kmem_cache_free(xgi_mem_block_cache, block);
+                       block = NULL;
                }
-               XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap);
-               kfree(xgi_fb_heap);
-               xgi_fb_heap = NULL;
-       }
-
-       if (xgi_fb_cache_block) {
-               kmem_cache_destroy(xgi_fb_cache_block);
-               xgi_fb_cache_block = NULL;
        }
+       
+       heap->initialized = 0;
 }
 
-static struct xgi_mem_block *xgi_mem_new_node(void)
+
+struct xgi_mem_block *xgi_mem_new_node(void)
 {
-       struct xgi_mem_block *block;
+       struct xgi_mem_block *block =
+               kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL);
 
-       block =
-           (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block,
-                                                GFP_KERNEL);
        if (!block) {
-               XGI_ERROR("kmem_cache_alloc failed\n");
+               DRM_ERROR("kmem_cache_alloc failed\n");
                return NULL;
        }
 
-       return block;
-}
-
-#if 0
-static void xgi_mem_insert_node_after(struct xgi_mem_list * list,
-                                     struct xgi_mem_block * current,
-                                     struct xgi_mem_block * block);
-static void xgi_mem_insert_node_before(struct xgi_mem_list * list,
-                                      struct xgi_mem_block * current,
-                                      struct xgi_mem_block * block);
-static void xgi_mem_insert_node_head(struct xgi_mem_list * list,
-                                    struct xgi_mem_block * block);
-static void xgi_mem_insert_node_tail(struct xgi_mem_list * list,
-                                    struct xgi_mem_block * block);
-static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block);
-/*
- *  insert node:block after node:current
- */
-static void xgi_mem_insert_node_after(struct xgi_mem_list * list,
-                                     struct xgi_mem_block * current,
-                                     struct xgi_mem_block * block)
-{
-       block->prev = current;
-       block->next = current->next;
-       current->next = block;
-
-       if (current == list->tail) {
-               list->tail = block;
-       } else {
-               block->next->prev = block;
-       }
-}
+       block->offset = 0;
+       block->size = 0;
+       block->owner = PCIE_INVALID;
+       block->filp = (DRMFILE) -1;
 
-/*
- *  insert node:block before node:current
- */
-static void xgi_mem_insert_node_before(struct xgi_mem_list * list,
-                                      struct xgi_mem_block * current,
-                                      struct xgi_mem_block * block)
-{
-       block->prev = current->prev;
-       block->next = current;
-       current->prev = block;
-       if (current == list->head) {
-               list->head = block;
-       } else {
-               block->prev->next = block;
-       }
-}
-void xgi_mem_insert_node_head(struct xgi_mem_list * list, struct xgi_mem_block * block)
-{
-       block->next = list->head;
-       block->prev = NULL;
-
-       if (NULL == list->head) {
-               list->tail = block;
-       } else {
-               list->head->prev = block;
-       }
-       list->head = block;
-}
-
-static void xgi_mem_insert_node_tail(struct xgi_mem_list * list,
-                                    struct xgi_mem_block * block)
-{
-       block->next = NULL;
-       block->prev = list->tail;
-       if (NULL == list->tail) {
-               list->head = block;
-       } else {
-               list->tail->next = block;
-       }
-       list->tail = block;
+       return block;
 }
 
-static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block)
-{
-       if (block == list->head) {
-               list->head = block->next;
-       }
-       if (block == list->tail) {
-               list->tail = block->prev;
-       }
-
-       if (block->prev) {
-               block->prev->next = block->next;
-       }
-       if (block->next) {
-               block->next->prev = block->prev;
-       }
 
-       block->next = block->prev = NULL;
-}
-#endif
-static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info,
-                                     unsigned long originalSize)
+struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap,
+                                   unsigned long originalSize,
+                                   enum PcieOwner owner)
 {
        struct xgi_mem_block *block, *free_block, *used_block;
-
        unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;
 
-       XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n",
+
+       DRM_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n",
                 originalSize, size);
 
        if (size == 0) {
-               XGI_ERROR("size == 0\n");
+               DRM_ERROR("size == 0\n");
                return (NULL);
        }
-       XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize);
-       if (size > xgi_fb_heap->max_freesize) {
-               XGI_ERROR
+       DRM_INFO("max_freesize: 0x%lx \n", heap->max_freesize);
+       if (size > heap->max_freesize) {
+               DRM_ERROR
                    ("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n",
-                    size, xgi_fb_heap->max_freesize);
+                    size, heap->max_freesize);
                return (NULL);
        }
 
-       list_for_each_entry(block, &xgi_fb_heap->free_list, list) {
-               XGI_INFO("free_list: 0x%px \n", free_list);
+       list_for_each_entry(block, &heap->free_list, list) {
+               DRM_INFO("block: 0x%px \n", block);
                if (size <= block->size) {
                        break;
                }
        }
 
-       if (&block->list == &xgi_fb_heap->free_list) {
-               XGI_ERROR
+       if (&block->list == &heap->free_list) {
+               DRM_ERROR
                    ("Can't allocate %ldk size from frame buffer memory !\n",
                     size / 1024);
                return (NULL);
        }
 
        free_block = block;
-       XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",
+       DRM_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",
                 size, free_block->offset, free_block->size);
 
        if (size == free_block->size) {
                used_block = free_block;
-               XGI_INFO("size == free_block->size: free_block = 0x%p\n",
+               DRM_INFO("size == free_block->size: free_block = 0x%p\n",
                         free_block);
                list_del(&free_block->list);
        } else {
@@ -372,7 +155,7 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info,
                        return (NULL);
 
                if (used_block == free_block) {
-                       XGI_ERROR("used_block == free_block = 0x%p\n",
+                       DRM_ERROR("used_block == free_block = 0x%p\n",
                                  used_block);
                }
 
@@ -383,14 +166,16 @@ static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info,
                free_block->size -= size;
        }
 
-       xgi_fb_heap->max_freesize -= size;
+       heap->max_freesize -= size;
 
-       list_add(&used_block->list, &xgi_fb_heap->used_list);
+       list_add(&used_block->list, &heap->used_list);
+       used_block->owner = owner;
 
        return (used_block);
 }
 
-static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset)
+int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset,
+                DRMFILE filp)
 {
        struct xgi_mem_block *used_block = NULL, *block;
        struct xgi_mem_block *prev, *next;
@@ -398,28 +183,32 @@ static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long
        unsigned long upper;
        unsigned long lower;
 
-       list_for_each_entry(block, &xgi_fb_heap->used_list, list) {
+       list_for_each_entry(block, &heap->used_list, list) {
                if (block->offset == offset) {
                        break;
                }
        }
 
-       if (&block->list == &xgi_fb_heap->used_list) {
-               XGI_ERROR("can't find block: 0x%lx to free!\n", offset);
-               return (NULL);
+       if (&block->list == &heap->used_list) {
+               DRM_ERROR("can't find block: 0x%lx to free!\n", offset);
+               return DRM_ERR(ENOENT);
+       }
+
+       if (block->filp != filp) {
+               return DRM_ERR(EPERM);
        }
 
        used_block = block;
-       XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n",
+       DRM_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n",
                 used_block, used_block->offset, used_block->size);
 
-       xgi_fb_heap->max_freesize += used_block->size;
+       heap->max_freesize += used_block->size;
 
        prev = next = NULL;
        upper = used_block->offset + used_block->size;
        lower = used_block->offset;
 
-       list_for_each_entry(block, &xgi_fb_heap->free_list, list) {
+       list_for_each_entry(block, &heap->free_list, list) {
                if (block->offset == upper) {
                        next = block;
                } else if ((block->offset + block->size) == lower) {
@@ -427,41 +216,157 @@ static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long
                }
        }
 
-       XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev);
+       DRM_INFO("next = 0x%p, prev = 0x%p\n", next, prev);
        list_del(&used_block->list);
 
        if (prev && next) {
                prev->size += (used_block->size + next->size);
                list_del(&next->list);
-               XGI_INFO("free node 0x%p\n", next);
-               kmem_cache_free(xgi_fb_cache_block, next);
-               kmem_cache_free(xgi_fb_cache_block, used_block);
-
-               next = NULL;
-               used_block = NULL;
-               return (prev);
+               DRM_INFO("free node 0x%p\n", next);
+               kmem_cache_free(xgi_mem_block_cache, next);
+               kmem_cache_free(xgi_mem_block_cache, used_block);
        }
-
-       if (prev) {
+       else if (prev) {
                prev->size += used_block->size;
-               XGI_INFO("free node 0x%p\n", used_block);
-               kmem_cache_free(xgi_fb_cache_block, used_block);
-               used_block = NULL;
-               return (prev);
+               DRM_INFO("free node 0x%p\n", used_block);
+               kmem_cache_free(xgi_mem_block_cache, used_block);
        }
-
-       if (next) {
+       else if (next) {
                next->size += used_block->size;
                next->offset = used_block->offset;
-               XGI_INFO("free node 0x%p\n", used_block);
-               kmem_cache_free(xgi_fb_cache_block, used_block);
-               used_block = NULL;
-               return (next);
+               DRM_INFO("free node 0x%p\n", used_block);
+               kmem_cache_free(xgi_mem_block_cache, used_block);
+       }
+       else {
+               list_add(&used_block->list, &heap->free_list);
+               DRM_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",
+                        used_block, used_block->offset, used_block->size);
        }
 
-       list_add(&used_block->list, &xgi_fb_heap->free_list);
-       XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",
-                used_block, used_block->offset, used_block->size);
+       return 0;
+}
 
-       return (used_block);
+
+int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
+                DRMFILE filp)
+{
+       struct xgi_mem_block *block;
+
+       if (alloc->is_front) {
+               alloc->location = XGI_MEMLOC_LOCAL;
+               alloc->offset = 0;
+               alloc->hw_addr = 0;
+               DRM_INFO
+                   ("Video RAM allocation on front buffer successfully! \n");
+       } else {
+               down(&info->fb_sem);
+               block = xgi_mem_alloc(&info->fb_heap, alloc->size, PCIE_2D);
+               up(&info->fb_sem);
+
+               if (block == NULL) {
+                       alloc->location = XGI_MEMLOC_LOCAL;
+                       alloc->size = 0;
+                       DRM_ERROR("Video RAM allocation failed\n");
+                       return DRM_ERR(ENOMEM);
+               } else {
+                       DRM_INFO("Video RAM allocation succeeded: 0x%p\n",
+                                (char *)block->offset);
+                       alloc->location = XGI_MEMLOC_LOCAL;
+                       alloc->size = block->size;
+                       alloc->offset = block->offset;
+                       alloc->hw_addr = block->offset;
+
+                       block->filp = filp;
+               }
+       }
+
+       return 0;
+}
+
+
+int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS)
+{
+       DRM_DEVICE;
+       struct xgi_mem_alloc alloc;
+       struct xgi_info *info = dev->dev_private;
+       int err;
+
+       DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data,
+                                sizeof(alloc));
+
+       err = xgi_fb_alloc(info, & alloc, filp);
+       if (err) {
+               return err;
+       }
+
+       DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data,
+                              alloc, sizeof(alloc));
+
+       return 0;
+}
+
+
+int xgi_fb_free(struct xgi_info * info, unsigned long offset, DRMFILE filp)
+{
+       int err = 0;
+
+       if (offset == 0) {
+               DRM_INFO("free onscreen frame buffer successfully !\n");
+       } else {
+               down(&info->fb_sem);
+               err = xgi_mem_free(&info->fb_heap, offset, filp);
+               up(&info->fb_sem);
+       }
+
+       return err;
+}
+
+
+int xgi_fb_free_ioctl(DRM_IOCTL_ARGS)
+{
+       DRM_DEVICE;
+       struct xgi_info *info = dev->dev_private;
+       u32 offset;
+
+       DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data,
+                                sizeof(offset));
+
+       return xgi_fb_free(info, offset, filp);
+}
+
+
+int xgi_fb_heap_init(struct xgi_info * info)
+{
+       return xgi_mem_heap_init(&info->fb_heap, XGI_FB_HEAP_START,
+                                info->fb.size);
+}
+
+/**
+ * Free all blocks associated with a particular file handle.
+ */
+void xgi_fb_free_all(struct xgi_info * info, DRMFILE filp)
+{
+       if (!info->fb_heap.initialized) {
+               return;
+       }
+
+       down(&info->fb_sem);
+
+       do {
+               struct xgi_mem_block *block;
+
+               list_for_each_entry(block, &info->fb_heap.used_list, list) {
+                       if (block->filp == filp) {
+                               break;
+                       }
+               }
+
+               if (&block->list == &info->fb_heap.used_list) {
+                       break;
+               }
+
+               (void) xgi_fb_free(info, block->offset, filp);
+       } while(1);
+
+       up(&info->fb_sem);
 }
diff --git a/linux-core/xgi_fb.h b/linux-core/xgi_fb.h
deleted file mode 100644 (file)
index 363c8bc..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-
-/****************************************************************************
- * Copyright (C) 2003-2006 by XGI Technology, Taiwan.                  
- *                                                                                                                                                     *
- * All Rights Reserved.                                                                                                                *
- *                                                                                                                                                     *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the     
- * "Software"), to deal in the Software without restriction, including 
- * without limitation on the rights to use, copy, modify, merge,       
- * publish, distribute, sublicense, and/or sell copies of the Software,        
- * and to permit persons to whom the Software is furnished to do so,   
- * subject to the following conditions:                                        
- *                                                                                                                                                     *
- * The above copyright notice and this permission notice (including the        
- * next paragraph) shall be included in all copies or substantial      
- * portions of the Software.                                           
- *                                                                                                                                                     *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,     
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF  
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND               
- * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR                     
- * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,          
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,          
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER                       
- * DEALINGS IN THE SOFTWARE.                                                                                           
- ***************************************************************************/
-
-#ifndef _XGI_FB_H_
-#define _XGI_FB_H_
-
-struct xgi_mem_block {
-       struct list_head list;
-       unsigned long offset;
-       unsigned long size;
-       atomic_t use_count;
-};
-
-struct xgi_mem_heap {
-       struct list_head free_list;
-       struct list_head used_list;
-       struct list_head sort_list;
-       unsigned long max_freesize;
-       spinlock_t lock;
-};
-
-#endif
diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h
deleted file mode 100644 (file)
index 99bf2d0..0000000
+++ /dev/null
@@ -1,490 +0,0 @@
-
-/****************************************************************************
- * Copyright (C) 2003-2006 by XGI Technology, Taiwan.                  
- *                                                                                                                                                     *
- * All Rights Reserved.                                                                                                                *
- *                                                                                                                                                     *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the     
- * "Software"), to deal in the Software without restriction, including 
- * without limitation on the rights to use, copy, modify, merge,       
- * publish, distribute, sublicense, and/or sell copies of the Software,        
- * and to permit persons to whom the Software is furnished to do so,   
- * subject to the following conditions:                                        
- *                                                                                                                                                     *
- * The above copyright notice and this permission notice (including the        
- * next paragraph) shall be included in all copies or substantial      
- * portions of the Software.                                           
- *                                                                                                                                                     *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,     
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF  
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND               
- * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR                     
- * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,          
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,          
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER                       
- * DEALINGS IN THE SOFTWARE.                                                                                           
- ***************************************************************************/
-
-#ifndef _XGI_LINUX_H_
-#define _XGI_LINUX_H_
-
-#ifndef LINUX_VERSION_CODE
-#include <linux/version.h>
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
-#   error "This driver does not support pre-2.6 kernels!"
-#endif
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
-#   define XGI_REMAP_PFN_RANGE_PRESENT
-#else
-#   define XGI_REMAP_PAGE_RANGE_5
-#endif
-
-#if defined (CONFIG_SMP) && !defined (__SMP__)
-#define __SMP__
-#endif
-
-#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS)
-#define MODVERSIONS
-#endif
-
-#include <linux/kernel.h>      /* printk */
-#include <linux/module.h>
-
-#include <linux/init.h>                /* module_init, module_exit         */
-#include <linux/types.h>       /* pic_t, size_t, __u32, etc        */
-#include <linux/errno.h>       /* error codes                      */
-#include <linux/list.h>                /* circular linked list             */
-#include <linux/stddef.h>      /* NULL, offsetof                   */
-#include <linux/wait.h>                /* wait queues                      */
-
-#include <linux/slab.h>                /* kmalloc, kfree, etc              */
-#include <linux/vmalloc.h>     /* vmalloc, vfree, etc              */
-
-#include <linux/poll.h>                /* poll_wait                        */
-#include <linux/delay.h>       /* mdelay, udelay                   */
-#include <asm/msr.h>           /* rdtsc rdtscl                     */
-
-#include <linux/sched.h>       /* suser(), capable() replacement
-                                  for_each_task, for_each_process  */
-#ifdef for_each_process
-#define XGI_SCAN_PROCESS(p) for_each_process(p)
-#else
-#define XGI_SCAN_PROCESS(p) for_each_task(p)
-#endif
-
-#include <linux/moduleparam.h> /* module_param()                   */
-#include <linux/smp_lock.h>    /* kernel_locked                    */
-#include <asm/tlbflush.h>      /* flush_tlb(), flush_tlb_all()     */
-#include <asm/kmap_types.h>    /* page table entry lookup          */
-
-#include <linux/pci.h>         /* pci_find_class, etc              */
-#include <linux/interrupt.h>   /* tasklets, interrupt helpers      */
-#include <linux/timer.h>
-
-#include <asm/system.h>                /* cli, sli, save_flags             */
-#include <asm/io.h>            /* ioremap, virt_to_phys            */
-#include <asm/uaccess.h>       /* access_ok                        */
-#include <asm/page.h>          /* PAGE_OFFSET                      */
-#include <asm/pgtable.h>       /* pte bit definitions              */
-
-#include <linux/spinlock.h>
-#include <asm/semaphore.h>
-#include <linux/highmem.h>
-
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#endif
-
-#ifdef CONFIG_DEVFS_FS
-#include <linux/devfs_fs_kernel.h>
-#endif
-
-#ifdef CONFIG_KMOD
-#include <linux/kmod.h>
-#endif
-
-#ifdef CONFIG_PM
-#include <linux/pm.h>
-#endif
-
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
-
-#ifdef CONFIG_KDB
-#include <linux/kdb.h>
-#include <asm/kdb.h>
-#endif
-
-#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE)
-#define AGPGART
-#include <linux/agp_backend.h>
-#include <linux/agpgart.h>
-#endif
-
-#ifndef MAX_ORDER
-#define MAX_ORDER 11
-#endif
-
-#ifndef module_init
-#define module_init(x)  int init_module(void) { return x(); }
-#define module_exit(x)  void cleanup_module(void) { x(); }
-#endif
-
-#ifndef minor
-#define minor(x) MINOR(x)
-#endif
-
-#ifndef IRQ_HANDLED
-typedef void irqreturn_t;
-#define IRQ_NONE
-#define IRQ_HANDLED
-#define IRQ_RETVAL(x)
-#endif
-
-#if !defined (list_for_each)
-#define list_for_each(pos, head) \
-    for (pos = (head)->next, prefetch(pos->next); pos != (head); \
-         pos = pos->next, prefetch(pos->next))
-#endif
-
-extern struct list_head pci_devices;   /* list of all devices */
-#define XGI_PCI_FOR_EACH_DEV(dev) \
-    for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next))
-
-/*
- * the following macro causes problems when used in the same module
- * as module_param(); undef it so we don't accidentally mix the two
- */
-#undef  MODULE_PARM
-
-#ifdef EXPORT_NO_SYMBOLS
-EXPORT_NO_SYMBOLS;
-#endif
-
-#define XGI_IS_SUSER()                 capable(CAP_SYS_ADMIN)
-#define XGI_PCI_DEVICE_NAME(dev)       ((dev)->pretty_name)
-#define XGI_NUM_CPUS()                 num_online_cpus()
-#define XGI_CLI()                      local_irq_disable()
-#define XGI_SAVE_FLAGS(eflags)         local_save_flags(eflags)
-#define XGI_RESTORE_FLAGS(eflags)      local_irq_restore(eflags)
-#define XGI_MAY_SLEEP()                (!in_interrupt() && !in_atomic())
-#define XGI_MODULE_PARAMETER(x)        module_param(x, int, 0)
-
-
-#define XGI_PCI_DISABLE_DEVICE(dev)      pci_disable_device(dev)
-
-/* common defines */
-#define GET_MODULE_SYMBOL(mod,sym)    (const void *) inter_module_get(sym)
-#define PUT_MODULE_SYMBOL(sym)        inter_module_put((char *) sym)
-
-#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page))
-#define XGI_VMA_OFFSET(vma)            (((vma)->vm_pgoff) << PAGE_SHIFT)
-#define XGI_VMA_PRIVATE(vma)           ((vma)->vm_private_data)
-
-#define XGI_DEVICE_NUMBER(x)           minor((x)->i_rdev)
-#define XGI_IS_CONTROL_DEVICE(x)       (minor((x)->i_rdev) == 255)
-
-#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start)
-#define XGI_PCI_RESOURCE_SIZE(dev, bar)  ((dev)->resource[bar].end - (dev)->resource[bar].start + 1)
-
-#define XGI_PCI_BUS_NUMBER(dev)        (dev)->bus->number
-#define XGI_PCI_SLOT_NUMBER(dev)       PCI_SLOT((dev)->devfn)
-
-#define XGI_PCI_GET_CLASS_PRESENT
-#ifdef XGI_PCI_GET_CLASS_PRESENT
-#define XGI_PCI_DEV_PUT(dev)                    pci_dev_put(dev)
-#define XGI_PCI_GET_DEVICE(vendor,device,from)  pci_get_device(vendor,device,from)
-#else
-#define XGI_PCI_DEV_PUT(dev)
-#define XGI_PCI_GET_DEVICE(vendor,device,from)  pci_find_device(vendor,device,from)
-#endif
-
-/*
- * acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver
- * model is not sufficient for full acpi support. it may work in some cases,
- * but not enough for us to officially support this configuration.
- */
-#if defined(CONFIG_ACPI)
-#define XGI_PM_SUPPORT_ACPI
-#endif
-
-#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
-#define XGI_PM_SUPPORT_APM
-#endif
-
-#if defined(CONFIG_DEVFS_FS)
-typedef void *devfs_handle_t;
-#define XGI_DEVFS_REGISTER(_name, _minor) \
-    ({ \
-        devfs_handle_t __handle = NULL; \
-        if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \
-                          S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \
-        { \
-            __handle = (void *) 1; /* XXX Fix me! (boolean) */ \
-        } \
-        __handle; \
-    })
-/*
-#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i)
-*/
-#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl")
-#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi")
-#endif                         /* defined(CONFIG_DEVFS_FS) */
-
-#define XGI_REGISTER_CHRDEV(x...)    register_chrdev(x)
-#define XGI_UNREGISTER_CHRDEV(x...)  unregister_chrdev(x)
-
-#if defined(XGI_REMAP_PFN_RANGE_PRESENT)
-#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \
-    remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x)
-#elif defined(XGI_REMAP_PAGE_RANGE_5)
-#define XGI_REMAP_PAGE_RANGE(x...)      remap_page_range(vma, x)
-#elif defined(XGI_REMAP_PAGE_RANGE_4)
-#define XGI_REMAP_PAGE_RANGE(x...)      remap_page_range(x)
-#else
-#warning "xgi_configure.sh failed, assuming remap_page_range(5)!"
-#define XGI_REMAP_PAGE_RANGE(x...)      remap_page_range(vma, x)
-#endif
-
-#if defined(pmd_offset_map)
-#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \
-    { \
-        pg_mid_dir = pmd_offset_map(pg_dir, address); \
-    }
-#define XGI_PMD_UNMAP(pg_mid_dir) \
-    { \
-        pmd_unmap(pg_mid_dir); \
-    }
-#else
-#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \
-    { \
-        pg_mid_dir = pmd_offset(pg_dir, address); \
-    }
-#define XGI_PMD_UNMAP(pg_mid_dir)
-#endif
-
-#define XGI_PMD_PRESENT(pg_mid_dir) \
-    ({ \
-        if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \
-        { \
-            XGI_PMD_UNMAP(pg_mid_dir); \
-            pg_mid_dir = NULL; \
-        } \
-        pg_mid_dir != NULL; \
-    })
-
-#if defined(pte_offset_atomic)
-#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \
-    { \
-        pte = pte_offset_atomic(pg_mid_dir, address); \
-        XGI_PMD_UNMAP(pg_mid_dir); \
-    }
-#define XGI_PTE_UNMAP(pte) \
-    { \
-        pte_kunmap(pte); \
-    }
-#elif defined(pte_offset)
-#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \
-    { \
-        pte = pte_offset(pg_mid_dir, address); \
-        XGI_PMD_UNMAP(pg_mid_dir); \
-    }
-#define XGI_PTE_UNMAP(pte)
-#else
-#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \
-    { \
-        pte = pte_offset_map(pg_mid_dir, address); \
-        XGI_PMD_UNMAP(pg_mid_dir); \
-    }
-#define XGI_PTE_UNMAP(pte) \
-    { \
-        pte_unmap(pte); \
-    }
-#endif
-
-#define XGI_PTE_PRESENT(pte) \
-    ({ \
-        if (pte) \
-        { \
-            if (!pte_present(*pte)) \
-            { \
-                XGI_PTE_UNMAP(pte); pte = NULL; \
-            } \
-        } \
-        pte != NULL; \
-    })
-
-#define XGI_PTE_VALUE(pte) \
-    ({ \
-        unsigned long __pte_value = pte_val(*pte); \
-        XGI_PTE_UNMAP(pte); \
-        __pte_value; \
-    })
-
-#define XGI_PAGE_ALIGN(addr)             (((addr) + PAGE_SIZE - 1) / PAGE_SIZE)
-#define XGI_MASK_OFFSET(addr)            ((addr) & (PAGE_SIZE - 1))
-
-#if !defined (pgprot_noncached)
-static inline pgprot_t pgprot_noncached(pgprot_t old_prot)
-{
-       pgprot_t new_prot = old_prot;
-       if (boot_cpu_data.x86 > 3)
-               new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD);
-       return new_prot;
-}
-#endif
-
-#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined)
-/* Added define for write combining page, only valid if pat enabled. */
-#define _PAGE_WRTCOMB _PAGE_PWT
-#define __PAGE_KERNEL_WRTCOMB \
-    (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED)
-#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB)
-
-static inline pgprot_t pgprot_writecombined(pgprot_t old_prot)
-{
-       pgprot_t new_prot = old_prot;
-       if (boot_cpu_data.x86 > 3) {
-               pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT);
-               new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB);
-       }
-       return new_prot;
-}
-#endif
-
-#if !defined(page_to_pfn)
-#define page_to_pfn(page)  ((page) - mem_map)
-#endif
-
-#define XGI_VMALLOC(ptr, size) \
-    { \
-        (ptr) = vmalloc_32(size); \
-    }
-
-#define XGI_VFREE(ptr, size) \
-    { \
-        vfree((void *) (ptr)); \
-    }
-
-#define XGI_IOREMAP(ptr, physaddr, size) \
-    { \
-        (ptr) = ioremap(physaddr, size); \
-    }
-
-#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \
-    { \
-        (ptr) = ioremap_nocache(physaddr, size); \
-    }
-
-#define XGI_IOUNMAP(ptr, size) \
-    { \
-        iounmap(ptr); \
-    }
-
-/*
- * only use this because GFP_KERNEL may sleep..
- * GFP_ATOMIC is ok, it won't sleep
- */
-#define XGI_KMALLOC(ptr, size) \
-    { \
-        (ptr) = kmalloc(size, GFP_KERNEL); \
-    }
-
-#define XGI_KMALLOC_ATOMIC(ptr, size) \
-    { \
-        (ptr) = kmalloc(size, GFP_ATOMIC); \
-    }
-
-#define XGI_KFREE(ptr, size) \
-    { \
-        kfree((void *) (ptr)); \
-    }
-
-#define XGI_GET_FREE_PAGES(ptr, order) \
-    { \
-        (ptr) = __get_free_pages(GFP_KERNEL, order); \
-    }
-
-#define XGI_FREE_PAGES(ptr, order) \
-    { \
-        free_pages(ptr, order); \
-    }
-
-struct xgi_pte {
-       unsigned long phys_addr;
-       unsigned long virt_addr;
-};
-
-/*
- * AMD Athlon processors expose a subtle bug in the Linux
- * kernel, that may lead to AGP memory corruption. Recent
- * kernel versions had a workaround for this problem, but
- * 2.4.20 is the first kernel to address it properly. The
- * page_attr API provides the means to solve the problem.
- */
-static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(struct xgi_pte * page_ptr)
-{
-       struct page *page = virt_to_page(__va(page_ptr->phys_addr));
-       change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
-}
-static inline void XGI_SET_PAGE_ATTRIB_CACHED(struct xgi_pte * page_ptr)
-{
-       struct page *page = virt_to_page(__va(page_ptr->phys_addr));
-       change_page_attr(page, 1, PAGE_KERNEL);
-}
-
-/* add for SUSE 9, Jill*/
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4)
-#define XGI_INC_PAGE_COUNT(page)    atomic_inc(&(page)->count)
-#define XGI_DEC_PAGE_COUNT(page)    atomic_dec(&(page)->count)
-#define XGI_PAGE_COUNT(page)           atomic_read(&(page)->count)
-#define XGI_SET_PAGE_COUNT(page,v)     atomic_set(&(page)->count, v)
-#else
-#define XGI_INC_PAGE_COUNT(page)    atomic_inc(&(page)->_count)
-#define XGI_DEC_PAGE_COUNT(page)    atomic_dec(&(page)->_count)
-#define XGI_PAGE_COUNT(page)           atomic_read(&(page)->_count)
-#define XGI_SET_PAGE_COUNT(page,v)     atomic_set(&(page)->_count, v)
-#endif
-#define XGILockPage(page)           SetPageLocked(page)
-#define XGIUnlockPage(page)         ClearPageLocked(page)
-
-struct xgi_file_private {
-       struct xgi_info *info;
-       unsigned int num_events;
-       spinlock_t fp_lock;
-       wait_queue_head_t wait_queue;
-};
-
-#define FILE_PRIVATE(filp)      ((filp)->private_data)
-
-#define XGI_GET_FP(filp)        ((struct xgi_file_private *) FILE_PRIVATE(filp))
-
-/* for the card devices */
-#define XGI_INFO_FROM_FP(filp)  (XGI_GET_FP(filp)->info)
-
-#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode)
-
-#define XGI_ATOMIC_SET(data,val)         atomic_set(&(data), (val))
-#define XGI_ATOMIC_INC(data)             atomic_inc(&(data))
-#define XGI_ATOMIC_DEC(data)             atomic_dec(&(data))
-#define XGI_ATOMIC_DEC_AND_TEST(data)    atomic_dec_and_test(&(data))
-#define XGI_ATOMIC_READ(data)            atomic_read(&(data))
-
-/*
- * lock-related functions that should only be called from this file
- */
-#define xgi_init_lock(lock)             spin_lock_init(&lock)
-#define xgi_lock(lock)                  spin_lock(&lock)
-#define xgi_unlock(lock)                spin_unlock(&lock)
-#define xgi_down(lock)                  down(&lock)
-#define xgi_up(lock)                    up(&lock)
-
-#define xgi_lock_irqsave(lock,flags)    spin_lock_irqsave(&lock,flags)
-#define xgi_unlock_irqsave(lock,flags)  spin_unlock_irqrestore(&lock,flags)
-
-#endif
index bb2813c..7f3d9d6 100644 (file)
  * DEALINGS IN THE SOFTWARE.                                                                                           
  ***************************************************************************/
 
-#include "xgi_linux.h"
 #include "xgi_drv.h"
 #include "xgi_regs.h"
-#include "xgi_pcie.h"
 
-void xgi_ge_reset(struct xgi_info * info)
+int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS)
 {
+       DRM_DEVICE;
+       struct xgi_info *info = dev->dev_private;
+
        xgi_disable_ge(info);
        xgi_enable_ge(info);
+
+       return 0;
 }
 
+
 /*
  * irq functions
  */
@@ -113,7 +117,7 @@ static void xgi_ge_hang_reset(volatile u8 *mmio_vbase)
                        u8 old_index;
                        u8 old_36;
 
-                       XGI_INFO("Can not reset back 0x%x!\n",
+                       DRM_INFO("Can not reset back 0x%x!\n",
                                 ge_3d_status[0x00]);
 
                        *(mmio_vbase + 0xb057) = 0;
@@ -151,7 +155,7 @@ static void xgi_ge_hang_reset(volatile u8 *mmio_vbase)
        
 bool xgi_ge_irq_handler(struct xgi_info * info)
 {
-       volatile u8 *const mmio_vbase = info->mmio.vbase;
+       volatile u8 *const mmio_vbase = info->mmio_map->handle;
        volatile u32 *const ge_3d_status =
                (volatile u32 *)(mmio_vbase + 0x2800);
        const u32 int_status = ge_3d_status[4];
@@ -185,7 +189,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info)
                                                continue_int_count = 0;
 
                                                /* GE Hung up, need reset. */
-                                               XGI_INFO("Reset GE!\n");
+                                               DRM_INFO("Reset GE!\n");
 
                                                xgi_ge_hang_reset(mmio_vbase);
                                        }
@@ -205,23 +209,23 @@ bool xgi_ge_irq_handler(struct xgi_info * info)
 bool xgi_crt_irq_handler(struct xgi_info * info)
 {
        bool ret = FALSE;
-       u8 save_3ce = bReadReg(0x3ce);
+       u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
 
-       if (bIn3cf(0x37) & 0x01)        // CRT1 interrupt just happened
+       if (IN3CFB(info->mmio_map, 0x37) & 0x01)        // CRT1 interrupt just happened
        {
                u8 op3cf_3d;
                u8 op3cf_37;
 
                // What happened?
-               op3cf_37 = bIn3cf(0x37);
+               op3cf_37 = IN3CFB(info->mmio_map, 0x37);
 
                // Clear CRT interrupt
-               op3cf_3d = bIn3cf(0x3d);
-               bOut3cf(0x3d, (op3cf_3d | 0x04));
-               bOut3cf(0x3d, (op3cf_3d & ~0x04));
+               op3cf_3d = IN3CFB(info->mmio_map, 0x3d);
+               OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04));
+               OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04));
                ret = TRUE;
        }
-       bWriteReg(0x3ce, save_3ce);
+       DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
 
        return (ret);
 }
@@ -229,36 +233,36 @@ bool xgi_crt_irq_handler(struct xgi_info * info)
 bool xgi_dvi_irq_handler(struct xgi_info * info)
 {
        bool ret = FALSE;
-       u8 save_3ce = bReadReg(0x3ce);
+       const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
 
-       if (bIn3cf(0x38) & 0x20)        // DVI interrupt just happened
-       {
+       if (IN3CFB(info->mmio_map, 0x38) & 0x20) {      // DVI interrupt just happened
+               const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4);
                u8 op3cf_39;
                u8 op3cf_37;
                u8 op3x5_5a;
-               u8 save_3x4 = bReadReg(0x3d4);;
 
                // What happened?
-               op3cf_37 = bIn3cf(0x37);
+               op3cf_37 = IN3CFB(info->mmio_map, 0x37);
 
                //Notify BIOS that DVI plug/unplug happened
-               op3x5_5a = bIn3x5(0x5a);
-               bOut3x5(0x5a, op3x5_5a & 0xf7);
+               op3x5_5a = IN3X5B(info->mmio_map, 0x5a);
+               OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7);
 
-               bWriteReg(0x3d4, save_3x4);
+               DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4);
 
                // Clear DVI interrupt
-               op3cf_39 = bIn3cf(0x39);
-               bOut3c5(0x39, (op3cf_39 & ~0x01));      //Set 3cf.39 bit 0 to 0
-               bOut3c5(0x39, (op3cf_39 | 0x01));       //Set 3cf.39 bit 0 to 1
+               op3cf_39 = IN3CFB(info->mmio_map, 0x39);
+               OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01));      //Set 3cf.39 bit 0 to 0
+               OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01));       //Set 3cf.39 bit 0 to 1
 
                ret = TRUE;
        }
-       bWriteReg(0x3ce, save_3ce);
+       DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
 
        return (ret);
 }
 
+
 void xgi_dump_register(struct xgi_info * info)
 {
        int i, j;
@@ -281,7 +285,7 @@ void xgi_dump_register(struct xgi_info * info)
                printk("%1x ", i);
 
                for (j = 0; j < 0x10; j++) {
-                       temp = bIn3c5(i * 0x10 + j);
+                       temp = IN3C5B(info->mmio_map, i * 0x10 + j);
                        printk("%3x", temp);
                }
                printk("\r\n");
@@ -303,7 +307,7 @@ void xgi_dump_register(struct xgi_info * info)
                printk("%1x ", i);
 
                for (j = 0; j < 0x10; j++) {
-                       temp = bIn3x5(i * 0x10 + j);
+                       temp = IN3X5B(info->mmio_map, i * 0x10 + j);
                        printk("%3x", temp);
                }
                printk("\r\n");
@@ -325,7 +329,7 @@ void xgi_dump_register(struct xgi_info * info)
                printk("%1x ", i);
 
                for (j = 0; j < 0x10; j++) {
-                       temp = bIn3cf(i * 0x10 + j);
+                       temp = IN3CFB(info->mmio_map, i * 0x10 + j);
                        printk("%3x", temp);
                }
                printk("\r\n");
@@ -346,7 +350,7 @@ void xgi_dump_register(struct xgi_info * info)
                printk("%1x ", i);
 
                for (j = 0; j < 0x10; j++) {
-                       temp = bReadReg(0xB000 + i * 0x10 + j);
+                       temp = DRM_READ8(info->mmio_map, 0xB000 + i * 0x10 + j);
                        printk("%3x", temp);
                }
                printk("\r\n");
@@ -366,7 +370,7 @@ void xgi_dump_register(struct xgi_info * info)
                printk("%1x ", i);
 
                for (j = 0; j < 0x10; j++) {
-                       temp = bReadReg(0x2200 + i * 0x10 + j);
+                       temp = DRM_READ8(info->mmio_map, 0x2200 + i * 0x10 + j);
                        printk("%3x", temp);
                }
                printk("\r\n");
@@ -386,7 +390,7 @@ void xgi_dump_register(struct xgi_info * info)
                printk("%1x ", i);
 
                for (j = 0; j < 0x10; j++) {
-                       temp = bReadReg(0x2300 + i * 0x10 + j);
+                       temp = DRM_READ8(info->mmio_map, 0x2300 + i * 0x10 + j);
                        printk("%3x", temp);
                }
                printk("\r\n");
@@ -406,7 +410,7 @@ void xgi_dump_register(struct xgi_info * info)
                printk("%1x ", i);
 
                for (j = 0; j < 0x10; j++) {
-                       temp = bReadReg(0x2400 + i * 0x10 + j);
+                       temp = DRM_READ8(info->mmio_map, 0x2400 + i * 0x10 + j);
                        printk("%3x", temp);
                }
                printk("\r\n");
@@ -426,17 +430,34 @@ void xgi_dump_register(struct xgi_info * info)
                printk("%1x ", i);
 
                for (j = 0; j < 0x10; j++) {
-                       temp = bReadReg(0x2800 + i * 0x10 + j);
+                       temp = DRM_READ8(info->mmio_map, 0x2800 + i * 0x10 + j);
                        printk("%3x", temp);
                }
                printk("\r\n");
        }
 }
 
-void xgi_restore_registers(struct xgi_info * info)
+
+int xgi_dump_register_ioctl(DRM_IOCTL_ARGS)
 {
-       bOut3x5(0x13, 0);
-       bOut3x5(0x8b, 2);
+       DRM_DEVICE;
+       struct xgi_info *info = dev->dev_private;
+
+       xgi_dump_register(info);
+
+       return 0;
+}
+
+
+int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS)
+{
+       DRM_DEVICE;
+       struct xgi_info *info = dev->dev_private;
+
+       OUT3X5B(info->mmio_map, 0x13, 0);
+       OUT3X5B(info->mmio_map, 0x8b, 2);
+
+       return 0;
 }
 
 void xgi_waitfor_pci_idle(struct xgi_info * info)
@@ -446,60 +467,10 @@ void xgi_waitfor_pci_idle(struct xgi_info * info)
 
        int idleCount = 0;
        while (idleCount < 5) {
-               if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK) {
+               if (DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) & IDLE_MASK) {
                        idleCount = 0;
                } else {
                        idleCount++;
                }
        }
 }
-
-
-/*memory collect function*/
-extern struct list_head xgi_mempid_list;
-void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt)
-{
-       struct xgi_mem_pid *block;
-       struct xgi_mem_pid *next;
-       struct task_struct *p, *find;
-       unsigned int cnt = 0;
-
-       list_for_each_entry_safe(block, next, &xgi_mempid_list, list) {
-
-               find = NULL;
-               XGI_SCAN_PROCESS(p) {
-                       if (p->pid == block->pid) {
-                               XGI_INFO
-                                   ("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n",
-                                    block->pid, p->state,
-                                    block->location,
-                                    block->bus_addr);
-                               find = p;
-                               if (block->bus_addr == 0xFFFFFFFF)
-                                       ++cnt;
-                               break;
-                       }
-               }
-               if (!find) {
-                       if (block->location == XGI_MEMLOC_LOCAL) {
-                               XGI_INFO
-                                   ("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n",
-                                    block->pid, block->bus_addr);
-                               xgi_fb_free(info, block->bus_addr);
-                       } else if (block->bus_addr != 0xFFFFFFFF) {
-                               XGI_INFO
-                                   ("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n",
-                                    block->pid, block->bus_addr);
-                               xgi_pcie_free(info, block->bus_addr);
-                       } else {
-                               /*only delete the memory block */
-                               list_del(&block->list);
-                               XGI_INFO
-                                   ("Memory ProcessID delete one pcie block pid:%ld successfully! \n",
-                                    block->pid);
-                               kfree(block);
-                       }
-               }
-       }
-       *pcnt = cnt;
-}
index 9c0591b..10638b2 100644 (file)
@@ -30,9 +30,7 @@
 #define _XGI_MISC_H_
 
 extern void xgi_dump_register(struct xgi_info * info);
-extern void xgi_ge_reset(struct xgi_info * info);
 
-extern void xgi_restore_registers(struct xgi_info * info);
 extern bool xgi_ge_irq_handler(struct xgi_info * info);
 extern bool xgi_crt_irq_handler(struct xgi_info * info);
 extern bool xgi_dvi_irq_handler(struct xgi_info * info);
index cfc9feb..49c531f 100644 (file)
  * DEALINGS IN THE SOFTWARE.                                                                                           
  ***************************************************************************/
 
-#include "xgi_linux.h"
 #include "xgi_drv.h"
 #include "xgi_regs.h"
-#include "xgi_pcie.h"
 #include "xgi_misc.h"
 
-static struct xgi_pcie_heap *xgi_pcie_heap = NULL;
-static struct kmem_cache *xgi_pcie_cache_block = NULL;
-static struct xgi_pcie_block *xgi_pcie_vertex_block = NULL;
-static struct xgi_pcie_block *xgi_pcie_cmdlist_block = NULL;
-static struct xgi_pcie_block *xgi_pcie_scratchpad_block = NULL;
-extern struct list_head xgi_mempid_list;
-
-static unsigned long xgi_pcie_lut_alloc(unsigned long page_order)
-{
-       struct page *page;
-       unsigned long page_addr = 0;
-       unsigned long page_count = 0;
-       int i;
-
-       page_count = (1 << page_order);
-       page_addr = __get_free_pages(GFP_KERNEL, page_order);
-
-       if (page_addr == 0UL) {
-               XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n",
-                         page_count);
-               return 0;
-       }
-
-       page = virt_to_page(page_addr);
-
-       for (i = 0; i < page_count; i++, page++) {
-               XGI_INC_PAGE_COUNT(page);
-               XGILockPage(page);
-       }
-
-       XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n",
-                page_count, page_order, page_addr);
-       return page_addr;
-}
-
-static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order)
-{
-       struct page *page;
-       unsigned long page_count = 0;
-       int i;
-
-       page_count = (1 << page_order);
-       page = virt_to_page(page_addr);
-
-       for (i = 0; i < page_count; i++, page++) {
-               XGI_DEC_PAGE_COUNT(page);
-               XGIUnlockPage(page);
-       }
-
-       free_pages(page_addr, page_order);
-}
+static struct xgi_mem_block *xgi_pcie_vertex_block = NULL;
+static struct xgi_mem_block *xgi_pcie_cmdlist_block = NULL;
+static struct xgi_mem_block *xgi_pcie_scratchpad_block = NULL;
 
 static int xgi_pcie_lut_init(struct xgi_info * info)
 {
-       unsigned char *page_addr = NULL;
-       unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder;
-       unsigned long count = 0;
        u8 temp = 0;
+       int err;
+       unsigned i;
+       struct drm_scatter_gather request;
+       struct drm_sg_mem *sg;
+       u32 *lut;
 
-       /* Jong 06/06/2006 */
-       unsigned long pcie_aperture_size;
-
-       info->pcie.size = 128 * 1024 * 1024;
 
        /* Get current FB aperture size */
-       temp = In3x5(0x27);
-       XGI_INFO("In3x5(0x27): 0x%x \n", temp);
+       temp = IN3X5B(info->mmio_map, 0x27);
+       DRM_INFO("In3x5(0x27): 0x%x \n", temp);
 
        if (temp & 0x01) {      /* 256MB; Jong 06/05/2006; 0x10000000 */
-               /* Jong 06/06/2006; allocate memory */
-               pcie_aperture_size = 256 * 1024 * 1024;
-               /* info->pcie.base = 256 * 1024 * 1024; *//* pcie base is different from fb base */
+               info->pcie.base = 256 * 1024 * 1024;
        } else {                /* 128MB; Jong 06/05/2006; 0x08000000 */
-
-               /* Jong 06/06/2006; allocate memory */
-               pcie_aperture_size = 128 * 1024 * 1024;
-               /* info->pcie.base = 128 * 1024 * 1024; */
+               info->pcie.base = 128 * 1024 * 1024;
        }
 
-       /* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */
-       /* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */
-       /* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */
-       /* info->pcie.base=ioremap(0x0F000000, 0x10000000); *//* Cause system hang */
-       info->pcie.base = pcie_aperture_size;   /* works */
-       /* info->pcie.base=info->fb.base + info->fb.size; *//* System hang */
-       /* info->pcie.base=128 * 1024 * 1024; *//* System hang */
 
-       XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base);
+       DRM_INFO("info->pcie.base: 0x%lx\n", (unsigned long) info->pcie.base);
 
        /* Get current lookup table page size */
-       temp = bReadReg(0xB00C);
+       temp = DRM_READ8(info->mmio_map, 0xB00C);
        if (temp & 0x04) {      /* 8KB */
                info->lutPageSize = 8 * 1024;
        } else {                /* 4KB */
-
                info->lutPageSize = 4 * 1024;
        }
 
-       XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize);
+       DRM_INFO("info->lutPageSize: 0x%x \n", info->lutPageSize);
 
-#if 0
-       /* Get current lookup table location */
-       temp = bReadReg(0xB00C);
-       if (temp & 0x02) {      /* LFB */
-               info->isLUTInLFB = TRUE;
-               /* Current we only support lookup table in LFB */
-               temp &= 0xFD;
-               bWriteReg(0xB00C, temp);
-               info->isLUTInLFB = FALSE;
-       } else {                /* SFB */
 
-               info->isLUTInLFB = FALSE;
+       request.size = info->pcie.size;
+       err = drm_sg_alloc(info->dev, & request);
+       if (err) {
+               DRM_ERROR("cannot allocate PCIE GART backing store!  "
+                         "size = %d\n", info->pcie.size);
+               return err;
        }
 
-       XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize);
+       sg = info->dev->sg;
 
-       /* Get current SDFB page size */
-       temp = bReadReg(0xB00C);
-       if (temp & 0x08) {      /* 8MB */
-               info->sdfbPageSize = 8 * 1024 * 1024;
-       } else {                /* 4MB */
-
-               info->sdfbPageSize = 4 * 1024 * 1024;
+       info->lut_handle = drm_pci_alloc(info->dev, 
+                                        sizeof(u32) * sg->pages,
+                                        PAGE_SIZE,
+                                        DMA_31BIT_MASK);
+       if (info->lut_handle == NULL) {
+               DRM_ERROR("cannot allocate PCIE lut page!\n");
+               return DRM_ERR(ENOMEM);
        }
-#endif
-       pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE;
 
-       /*
-        * Allocate memory for PCIE GART table;
-        */
-       lutEntryNum = pciePageCount;
-       lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE;
-
-       /* get page_order base on page_count */
-       count = lutPageCount;
-       for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder) ;
-
-       if ((lutPageCount << 1) == (1 << lutPageOrder)) {
-               lutPageOrder -= 1;
-       }
-
-       XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n",
-                lutEntryNum, lutPageCount, lutPageOrder);
-
-       info->lutPageOrder = lutPageOrder;
-       page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder);
+       lut = info->lut_handle->vaddr;
+       for (i = 0; i < sg->pages; i++) {
+               info->dev->sg->busaddr[i] = pci_map_page(info->dev->pdev,
+                                                        sg->pagelist[i],
+                                                        0,
+                                                        PAGE_SIZE,
+                                                        DMA_BIDIRECTIONAL);
+               if (dma_mapping_error(info->dev->sg->busaddr[i])) {
+                       DRM_ERROR("cannot map GART backing store for DMA!\n");
+                       return DRM_ERR(-(info->dev->sg->busaddr[i]));
+               }
 
-       if (!page_addr) {
-               XGI_ERROR("cannot allocate PCIE lut page!\n");
-               goto fail;
+               lut[i] = info->dev->sg->busaddr[i];
        }
-       info->lut_base = (unsigned long *)page_addr;
-
-       XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n",
-                page_addr, virt_to_phys(page_addr));
-
-       XGI_INFO
-           ("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n",
-            info->lut_base, __pa(info->lut_base), info->lutPageOrder);
-
-       /*
-        * clean all PCIE GART Entry
-        */
-       memset(page_addr, 0, PAGE_SIZE << lutPageOrder);
 
 #if defined(__i386__) || defined(__x86_64__)
        asm volatile ("wbinvd":::"memory");
@@ -204,675 +109,186 @@ static int xgi_pcie_lut_init(struct xgi_info * info)
 #endif
 
        /* Set GART in SFB */
-       bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02);
+       temp = DRM_READ8(info->mmio_map, 0xB00C);
+       DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02);
+
        /* Set GART base address to HW */
-       dwWriteReg(0xB034, __pa(info->lut_base));
+       dwWriteReg(info->mmio_map, 0xB034, info->lut_handle->busaddr);
 
-       return 1;
-      fail:
        return 0;
 }
 
-static void xgi_pcie_lut_cleanup(struct xgi_info * info)
-{
-       if (info->lut_base) {
-               XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n",
-                        info->lut_base, info->lutPageOrder);
-               xgi_pcie_lut_free((unsigned long)info->lut_base,
-                                 info->lutPageOrder);
-               info->lut_base = NULL;
-       }
-}
-
-static struct xgi_pcie_block *xgi_pcie_new_node(void)
+void xgi_pcie_lut_cleanup(struct xgi_info * info)
 {
-       struct xgi_pcie_block *block =
-           (struct xgi_pcie_block *) kmem_cache_alloc(xgi_pcie_cache_block,
-                                                 GFP_KERNEL);
-       if (block == NULL) {
-               return NULL;
+       if (info->dev->sg) {
+               drm_sg_free(info->dev, info->dev->sg->handle);
        }
 
-       block->offset = 0;      /* block's offset in pcie memory, begin from 0 */
-       block->size = 0;        /* The block size.              */
-       block->bus_addr = 0;    /* CPU access address/bus address */
-       block->hw_addr = 0;     /* GE access address            */
-       block->page_count = 0;
-       block->page_order = 0;
-       block->page_block = NULL;
-       block->page_table = NULL;
-       block->owner = PCIE_INVALID;
-
-       return block;
-}
-
-static void xgi_pcie_block_stuff_free(struct xgi_pcie_block * block)
-{
-       struct page *page;
-       struct xgi_page_block *page_block = block->page_block;
-       struct xgi_page_block *free_block;
-       unsigned long page_count = 0;
-       int i;
-
-       //XGI_INFO("block->page_block: 0x%p \n", block->page_block);
-       while (page_block) {
-               page_count = page_block->page_count;
-
-               page = virt_to_page(page_block->virt_addr);
-               for (i = 0; i < page_count; i++, page++) {
-                       XGI_DEC_PAGE_COUNT(page);
-                       XGIUnlockPage(page);
-               }
-               free_pages(page_block->virt_addr, page_block->page_order);
-
-               page_block->phys_addr = 0;
-               page_block->virt_addr = 0;
-               page_block->page_count = 0;
-               page_block->page_order = 0;
-
-               free_block = page_block;
-               page_block = page_block->next;
-               //XGI_INFO("free free_block: 0x%p \n", free_block);
-               kfree(free_block);
-               free_block = NULL;
-       }
-
-       if (block->page_table) {
-               //XGI_INFO("free block->page_table: 0x%p \n", block->page_table);
-               kfree(block->page_table);
-               block->page_table = NULL;
+       if (info->lut_handle) {
+               drm_pci_free(info->dev, info->lut_handle);
+               info->lut_handle = NULL;
        }
 }
 
 int xgi_pcie_heap_init(struct xgi_info * info)
 {
-       struct xgi_pcie_block *block;
-
-       if (!xgi_pcie_lut_init(info)) {
-               XGI_ERROR("xgi_pcie_lut_init failed\n");
-               return 0;
-       }
-
-       xgi_pcie_heap =
-           (struct xgi_pcie_heap *) kmalloc(sizeof(struct xgi_pcie_heap), GFP_KERNEL);
-       if (!xgi_pcie_heap) {
-               XGI_ERROR("xgi_pcie_heap alloc failed\n");
-               goto fail1;
-       }
-       INIT_LIST_HEAD(&xgi_pcie_heap->free_list);
-       INIT_LIST_HEAD(&xgi_pcie_heap->used_list);
-       INIT_LIST_HEAD(&xgi_pcie_heap->sort_list);
-
-       xgi_pcie_heap->max_freesize = info->pcie.size;
-
-       xgi_pcie_cache_block =
-           kmem_cache_create("xgi_pcie_block", sizeof(struct xgi_pcie_block), 0,
-                             SLAB_HWCACHE_ALIGN, NULL, NULL);
+       int err;
 
-       if (NULL == xgi_pcie_cache_block) {
-               XGI_ERROR("Fail to creat xgi_pcie_block\n");
-               goto fail2;
+       err = xgi_pcie_lut_init(info);
+       if (err) {
+               DRM_ERROR("xgi_pcie_lut_init failed\n");
+               return err;
        }
 
-       block = (struct xgi_pcie_block *) xgi_pcie_new_node();
-       if (!block) {
-               XGI_ERROR("xgi_pcie_new_node failed\n");
-               goto fail3;
-       }
-
-       block->offset = 0;      /* block's offset in pcie memory, begin from 0 */
-       block->size = info->pcie.size;
-
-       list_add(&block->list, &xgi_pcie_heap->free_list);
 
-       XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n",
-                block->offset, block->size);
-       return 1;
-      fail3:
-       if (xgi_pcie_cache_block) {
-               kmem_cache_destroy(xgi_pcie_cache_block);
-               xgi_pcie_cache_block = NULL;
+       err = xgi_mem_heap_init(&info->pcie_heap, 0, info->pcie.size);
+       if (err) {
+               xgi_pcie_lut_cleanup(info);
        }
 
-      fail2:
-       if (xgi_pcie_heap) {
-               kfree(xgi_pcie_heap);
-               xgi_pcie_heap = NULL;
-       }
-      fail1:
-       xgi_pcie_lut_cleanup(info);
-       return 0;
+       return err;
 }
 
-void xgi_pcie_heap_cleanup(struct xgi_info * info)
-{
-       struct list_head *free_list;
-       struct xgi_pcie_block *block;
-       struct xgi_pcie_block *next;
-       int j;
-
-       xgi_pcie_lut_cleanup(info);
-       XGI_INFO("xgi_pcie_lut_cleanup scceeded\n");
-
-       if (xgi_pcie_heap) {
-               free_list = &xgi_pcie_heap->free_list;
-               for (j = 0; j < 3; j++, free_list++) {
-                       list_for_each_entry_safe(block, next, free_list, list) {
-                               XGI_INFO
-                                   ("No. %d block offset: 0x%lx size: 0x%lx\n",
-                                    j, block->offset, block->size);
-                               xgi_pcie_block_stuff_free(block);
-                               block->bus_addr = 0;
-                               block->hw_addr = 0;
-
-                               //XGI_INFO("No. %d free block: 0x%p \n", j, block);
-                               kmem_cache_free(xgi_pcie_cache_block, block);
-                       }
-               }
-
-               XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap);
-               kfree(xgi_pcie_heap);
-               xgi_pcie_heap = NULL;
-       }
 
-       if (xgi_pcie_cache_block) {
-               kmem_cache_destroy(xgi_pcie_cache_block);
-               xgi_pcie_cache_block = NULL;
-       }
-}
-
-static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info,
-                                           unsigned long originalSize,
-                                           enum PcieOwner owner)
+int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
+                  DRMFILE filp)
 {
-       struct xgi_pcie_block *block, *used_block, *free_block;
-       struct xgi_page_block *page_block, *prev_page_block;
-       struct page *page;
-       unsigned long page_order = 0, count = 0, index = 0;
-       unsigned long page_addr = 0;
-       u32 *lut_addr = NULL;
-       unsigned long lut_id = 0;
-       unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;
-       int i, j, page_count = 0;
-       int temp = 0;
-
-       XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n");
-       XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n",
-                originalSize, size);
-
-       if (owner == PCIE_3D) {
-               if (xgi_pcie_vertex_block) {
-                       XGI_INFO
-                           ("PCIE Vertex has been created, return directly.\n");
-                       return xgi_pcie_vertex_block;
-               }
-       }
+       struct xgi_mem_block *block;
 
-       if (owner == PCIE_3D_CMDLIST) {
-               if (xgi_pcie_cmdlist_block) {
-                       XGI_INFO
-                           ("PCIE Cmdlist has been created, return directly.\n");
-                       return xgi_pcie_cmdlist_block;
-               }
+       down(&info->pcie_sem);
+       if ((alloc->owner == PCIE_3D) && (xgi_pcie_vertex_block)) {
+               DRM_INFO("PCIE Vertex has been created, return directly.\n");
+               block = xgi_pcie_vertex_block;
        }
-
-       if (owner == PCIE_3D_SCRATCHPAD) {
-               if (xgi_pcie_scratchpad_block) {
-                       XGI_INFO
-                           ("PCIE Scratchpad has been created, return directly.\n");
-                       return xgi_pcie_scratchpad_block;
-               }
-       }
-
-       if (size == 0) {
-               XGI_ERROR("size == 0 \n");
-               return (NULL);
+       else if ((alloc->owner == PCIE_3D_CMDLIST) && (xgi_pcie_cmdlist_block)) {
+               DRM_INFO("PCIE Cmdlist has been created, return directly.\n");
+               block = xgi_pcie_cmdlist_block;
        }
-
-       XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize);
-       if (size > xgi_pcie_heap->max_freesize) {
-               XGI_ERROR
-                   ("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n",
-                    size, xgi_pcie_heap->max_freesize);
-               return (NULL);
+       else if ((alloc->owner == PCIE_3D_SCRATCHPAD) && (xgi_pcie_scratchpad_block)) {
+               DRM_INFO("PCIE Scratchpad has been created, return directly.\n");
+               block = xgi_pcie_scratchpad_block;
        }
+       else {
+               block = xgi_mem_alloc(&info->pcie_heap, alloc->size, alloc->owner);
 
-       /* Jong 05/30/2006; find next free list which has enough space */
-       list_for_each_entry(block, &xgi_pcie_heap->free_list, list) {
-               if (size <= block->size) {
-                       break;
+               if (alloc->owner == PCIE_3D) {
+                       xgi_pcie_vertex_block = block;
+               }
+               else if (alloc->owner == PCIE_3D_CMDLIST) {
+                       xgi_pcie_cmdlist_block = block;
+               }
+               else if (alloc->owner == PCIE_3D_SCRATCHPAD) {
+                       xgi_pcie_scratchpad_block = block;
                }
        }
+       up(&info->pcie_sem);
 
-       if (&block->list == &xgi_pcie_heap->free_list) {
-               XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n",
-                         size / 1024);
-               return (NULL);
-       }
-
-       free_block = block;
-       XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",
-                size, free_block->offset, free_block->size);
-
-       if (size == free_block->size) {
-               used_block = free_block;
-               XGI_INFO("size==free_block->size: free_block = 0x%p\n",
-                        free_block);
-               list_del(&free_block->list);
+       if (block == NULL) {
+               alloc->location = XGI_MEMLOC_INVALID;
+               alloc->size = 0;
+               DRM_ERROR("PCIE RAM allocation failed\n");
+               return DRM_ERR(ENOMEM);
        } else {
-               used_block = xgi_pcie_new_node();
-               if (used_block == NULL) {
-                       return NULL;
-               }
-
-               if (used_block == free_block) {
-                       XGI_ERROR("used_block == free_block = 0x%p\n",
-                                 used_block);
-               }
-
-               used_block->offset = free_block->offset;
-               used_block->size = size;
+               DRM_INFO("PCIE RAM allocation succeeded: offset = 0x%lx\n",
+                        block->offset);
+               alloc->location = XGI_MEMLOC_NON_LOCAL;
+               alloc->size = block->size;
+               alloc->hw_addr = block->offset + info->pcie.base;
+               alloc->offset = block->offset;
 
-               free_block->offset += size;
-               free_block->size -= size;
+               block->filp = filp;
+               return 0;
        }
+}
 
-       xgi_pcie_heap->max_freesize -= size;
 
-       used_block->bus_addr = info->pcie.base + used_block->offset;
-       used_block->hw_addr = info->pcie.base + used_block->offset;
-       used_block->page_count = page_count = size / PAGE_SIZE;
+int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS)
+{
+       DRM_DEVICE;
+       struct xgi_mem_alloc alloc;
+       struct xgi_info *info = dev->dev_private;
+       int err;
 
-       /* get page_order base on page_count */
-       for (used_block->page_order = 0; page_count; page_count >>= 1) {
-               ++used_block->page_order;
-       }
+       DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data,
+                                sizeof(alloc));
 
-       if ((used_block->page_count << 1) == (1 << used_block->page_order)) {
-               used_block->page_order--;
-       }
-       XGI_INFO
-           ("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n",
-            used_block->offset, used_block->size, used_block->bus_addr,
-            used_block->hw_addr, used_block->page_count,
-            used_block->page_order);
-
-       used_block->page_block = NULL;
-       //used_block->page_block = (struct xgi_pages_block *)kmalloc(sizeof(struct xgi_pages_block), GFP_KERNEL);
-       //if (!used_block->page_block) return NULL;_t
-       //used_block->page_block->next = NULL;
-
-       used_block->page_table =
-           (struct xgi_pte *) kmalloc(sizeof(struct xgi_pte) * used_block->page_count,
-                                 GFP_KERNEL);
-       if (used_block->page_table == NULL) {
-               goto fail;
+       err = xgi_pcie_alloc(info, & alloc, filp);
+       if (err) {
+               return err;
        }
+       
+       DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data,
+                              alloc, sizeof(alloc));
 
-       lut_id = (used_block->offset >> PAGE_SHIFT);
-       lut_addr = info->lut_base;
-       lut_addr += lut_id;
-       XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id);
-
-       /* alloc free pages from system */
-       page_count = used_block->page_count;
-       page_block = used_block->page_block;
-       prev_page_block = used_block->page_block;
-       for (i = 0; page_count > 0; i++) {
-               /* if size is bigger than 2M bytes, it should be split */
-               if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER)) {
-                       page_order = XGI_PCIE_ALLOC_MAX_ORDER;
-               } else {
-                       count = page_count;
-                       for (page_order = 0; count; count >>= 1, ++page_order) ;
-
-                       if ((page_count << 1) == (1 << page_order)) {
-                               page_order -= 1;
-                       }
-               }
+       return 0;
+}
 
-               count = (1 << page_order);
-               page_addr = __get_free_pages(GFP_KERNEL, page_order);
-               XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n",
-                        page_addr);
 
-               if (!page_addr) {
-                       XGI_ERROR
-                           ("No: %d :Can't get free pages: 0x%lx from system memory !\n",
-                            i, count);
-                       goto fail;
-               }
+/**
+ * Free all blocks associated with a particular file handle.
+ */
+void xgi_pcie_free_all(struct xgi_info * info, DRMFILE filp)
+{
+       if (!info->pcie_heap.initialized) {
+               return;
+       }
 
-               /* Jong 05/30/2006; test */
-               memset((unsigned char *)page_addr, 0xFF,
-                      PAGE_SIZE << page_order);
-               /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */
-
-               if (page_block == NULL) {
-                       page_block =
-                           (struct xgi_page_block *)
-                           kmalloc(sizeof(struct xgi_page_block), GFP_KERNEL);
-                       if (!page_block) {
-                               XGI_ERROR
-                                   ("Can't get memory for page_block! \n");
-                               goto fail;
-                       }
-               }
+       down(&info->pcie_sem);
 
-               if (prev_page_block == NULL) {
-                       used_block->page_block = page_block;
-                       prev_page_block = page_block;
-               } else {
-                       prev_page_block->next = page_block;
-                       prev_page_block = page_block;
-               }
+       do {
+               struct xgi_mem_block *block;
 
-               page_block->next = NULL;
-               page_block->phys_addr = __pa(page_addr);
-               page_block->virt_addr = page_addr;
-               page_block->page_count = count;
-               page_block->page_order = page_order;
-
-               XGI_INFO
-                   ("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n",
-                    page_block->phys_addr);
-               XGI_INFO
-                   ("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n",
-                    page_block->virt_addr);
-
-               page = virt_to_page(page_addr);
-
-               //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p"
-               //         "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n",
-               //          i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr);
-
-               for (j = 0; j < count; j++, page++, lut_addr++) {
-                       used_block->page_table[index + j].phys_addr =
-                           __pa(page_address(page));
-                       used_block->page_table[index + j].virt_addr =
-                           (unsigned long)page_address(page);
-
-                       XGI_INFO
-                           ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n",
-                            used_block->page_table[index + j].phys_addr);
-                       XGI_INFO
-                           ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n",
-                            used_block->page_table[index + j].virt_addr);
-
-                       *lut_addr = __pa(page_address(page));
-                       XGI_INC_PAGE_COUNT(page);
-                       XGILockPage(page);
-
-                       if (temp) {
-                               XGI_INFO
-                                   ("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n",
-                                    __pa(page_address(page)), lut_addr, j,
-                                    *lut_addr);
-                               temp--;
+               list_for_each_entry(block, &info->pcie_heap.used_list, list) {
+                       if (block->filp == filp) {
+                               break;
                        }
                }
 
-               page_block = page_block->next;
-               page_count -= count;
-               index += count;
-               temp = 0;
-       }
-
-       used_block->owner = owner;
-       list_add(&used_block->list, &xgi_pcie_heap->used_list);
-
-#if defined(__i386__) || defined(__x86_64__)
-       asm volatile ("wbinvd":::"memory");
-#else
-       mb();
-#endif
-
-       /* Flush GART Table */
-       bWriteReg(0xB03F, 0x40);
-       bWriteReg(0xB03F, 0x00);
-
-       if (owner == PCIE_3D) {
-               xgi_pcie_vertex_block = used_block;
-       }
-
-       if (owner == PCIE_3D_CMDLIST) {
-               xgi_pcie_cmdlist_block = used_block;
-       }
-
-       if (owner == PCIE_3D_SCRATCHPAD) {
-               xgi_pcie_scratchpad_block = used_block;
-       }
-
-       XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n");
-       return (used_block);
-
-      fail:
-       xgi_pcie_block_stuff_free(used_block);
-       kmem_cache_free(xgi_pcie_cache_block, used_block);
-       return NULL;
-}
-
-static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info,
-                                          unsigned long offset)
-{
-       struct xgi_pcie_block *used_block, *block;
-       struct xgi_pcie_block *prev, *next;
-       unsigned long upper, lower;
-
-       list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
-               if (block->offset == offset) {
+               if (&block->list == &info->pcie_heap.used_list) {
                        break;
                }
-       }
-
-       if (&block->list == &xgi_pcie_heap->used_list) {
-               XGI_ERROR("can't find block: 0x%lx to free!\n", offset);
-               return (NULL);
-       }
-
-       used_block = block;
-       XGI_INFO
-           ("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n",
-            used_block, used_block->offset, used_block->size,
-            used_block->bus_addr, used_block->hw_addr);
-
-       xgi_pcie_block_stuff_free(used_block);
 
-       /* update xgi_pcie_heap */
-       xgi_pcie_heap->max_freesize += used_block->size;
+               (void) xgi_pcie_free(info, block->offset, filp);
+       } while(1);
 
-       prev = next = NULL;
-       upper = used_block->offset + used_block->size;
-       lower = used_block->offset;
-
-       list_for_each_entry(block, &xgi_pcie_heap->free_list, list) {
-               if (block->offset == upper) {
-                       next = block;
-               } else if ((block->offset + block->size) == lower) {
-                       prev = block;
-               }
-       }
-
-       XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev);
-       list_del(&used_block->list);
-
-       if (prev && next) {
-               prev->size += (used_block->size + next->size);
-               list_del(&next->list);
-               XGI_INFO("free node 0x%p\n", next);
-               kmem_cache_free(xgi_pcie_cache_block, next);
-               kmem_cache_free(xgi_pcie_cache_block, used_block);
-               next = NULL;
-               used_block = NULL;
-               return (prev);
-       }
-
-       if (prev) {
-               prev->size += used_block->size;
-               XGI_INFO("free node 0x%p\n", used_block);
-               kmem_cache_free(xgi_pcie_cache_block, used_block);
-               used_block = NULL;
-               return (prev);
-       }
-
-       if (next) {
-               next->size += used_block->size;
-               next->offset = used_block->offset;
-               XGI_INFO("free node 0x%p\n", used_block);
-               kmem_cache_free(xgi_pcie_cache_block, used_block);
-               used_block = NULL;
-               return (next);
-       }
-
-       used_block->bus_addr = 0;
-       used_block->hw_addr = 0;
-       used_block->page_count = 0;
-       used_block->page_order = 0;
-       list_add(&used_block->list, &xgi_pcie_heap->free_list);
-       XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",
-                used_block, used_block->offset, used_block->size);
-       return (used_block);
+       up(&info->pcie_sem);
 }
 
-void xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
-                   pid_t pid)
-{
-       struct xgi_pcie_block *block;
-
-       xgi_down(info->pcie_sem);
-       block = xgi_pcie_mem_alloc(info, alloc->size, alloc->owner);
-       xgi_up(info->pcie_sem);
-
-       if (block == NULL) {
-               alloc->location = XGI_MEMLOC_INVALID;
-               alloc->size = 0;
-               alloc->bus_addr = 0;
-               alloc->hw_addr = 0;
-               XGI_ERROR("PCIE RAM allocation failed\n");
-       } else {
-               XGI_INFO
-                   ("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n",
-                    block->offset, block->bus_addr);
-               alloc->location = XGI_MEMLOC_NON_LOCAL;
-               alloc->size = block->size;
-               alloc->bus_addr = block->bus_addr;
-               alloc->hw_addr = block->hw_addr;
-
-               /*
-                  manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE.
-                  PCIE_3D request means a opengl process created.
-                  PCIE_3D_TEXTURE request means texture cannot alloc from fb.
-                */
-               if ((alloc->owner == PCIE_3D)
-                   || (alloc->owner == PCIE_3D_TEXTURE)) {
-                       struct xgi_mem_pid *mempid_block =
-                           kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL);
-                       if (!mempid_block)
-                               XGI_ERROR("mempid_block alloc failed\n");
-                       mempid_block->location = XGI_MEMLOC_NON_LOCAL;
-                       if (alloc->owner == PCIE_3D)
-                               mempid_block->bus_addr = 0xFFFFFFFF;    /*xgi_pcie_vertex_block has the address */
-                       else
-                               mempid_block->bus_addr = alloc->bus_addr;
-                       mempid_block->pid = pid;
-
-                       XGI_INFO
-                           ("Memory ProcessID add one pcie block pid:%ld successfully! \n",
-                            mempid_block->pid);
-                       list_add(&mempid_block->list, &xgi_mempid_list);
-               }
-       }
-}
 
-void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr)
+int xgi_pcie_free(struct xgi_info * info, unsigned long offset, DRMFILE filp)
 {
-       struct xgi_pcie_block *block;
-       unsigned long offset = bus_addr - info->pcie.base;
-       struct xgi_mem_pid *mempid_block;
-       struct xgi_mem_pid *mempid_freeblock = NULL;
-       char isvertex = 0;
-       int processcnt;
-
-       if (xgi_pcie_vertex_block
-           && xgi_pcie_vertex_block->bus_addr == bus_addr)
-               isvertex = 1;
-
-       if (isvertex) {
-               /*check is there any other process using vertex */
-               processcnt = 0;
-
-               list_for_each_entry(mempid_block, &xgi_mempid_list, list) {
-                       if (mempid_block->location == XGI_MEMLOC_NON_LOCAL
-                           && mempid_block->bus_addr == 0xFFFFFFFF) {
-                               ++processcnt;
-                       }
-               }
-               if (processcnt > 1) {
-                       return;
-               }
-       }
+       const bool isvertex = (xgi_pcie_vertex_block
+                              && (xgi_pcie_vertex_block->offset == offset));
+       int err;
 
-       xgi_down(info->pcie_sem);
-       block = xgi_pcie_mem_free(info, offset);
-       xgi_up(info->pcie_sem);
+       down(&info->pcie_sem);
+       err = xgi_mem_free(&info->pcie_heap, offset, filp);
+       up(&info->pcie_sem);
 
-       if (block == NULL) {
-               XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset);
+       if (err) {
+               DRM_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset);
        }
 
        if (isvertex)
                xgi_pcie_vertex_block = NULL;
 
-       /* manage mempid */
-       list_for_each_entry(mempid_block, &xgi_mempid_list, list) {
-               if (mempid_block->location == XGI_MEMLOC_NON_LOCAL
-                   && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF)
-                       || (!isvertex && mempid_block->bus_addr == bus_addr))) {
-                       mempid_freeblock = mempid_block;
-                       break;
-               }
-       }
-       if (mempid_freeblock) {
-               list_del(&mempid_freeblock->list);
-               XGI_INFO
-                   ("Memory ProcessID delete one pcie block pid:%ld successfully! \n",
-                    mempid_freeblock->pid);
-               kfree(mempid_freeblock);
-       }
+       return err;
 }
 
-/*
- * given a bus address, fid the pcie mem block
- * uses the bus address as the key.
- */
-struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info,
-                                          unsigned long address)
-{
-       struct xgi_pcie_block *block;
-       int i;
-
 
-       list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
-               if (block->bus_addr == address) {
-                       return block;
-               }
-
-               if (block->page_table) {
-                       for (i = 0; i < block->page_count; i++) {
-                               unsigned long offset = block->bus_addr;
-                               if ((address >= offset)
-                                   && (address < (offset + PAGE_SIZE))) {
-                                       return block;
-                               }
-                       }
-               }
-       }
+int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS)
+{
+       DRM_DEVICE;
+       struct xgi_info *info = dev->dev_private;
+       u32 offset;
 
-       XGI_ERROR("could not find map for vm 0x%lx\n", address);
+       DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data,
+                                sizeof(offset));
 
-       return NULL;
+       return xgi_pcie_free(info, offset, filp);
 }
 
+
 /**
  * xgi_find_pcie_virt
  * @address: GE HW address
@@ -880,60 +296,43 @@ struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info,
  * Returns CPU virtual address.  Assumes the CPU VAddr is continuous in not
  * the same block
  */
-void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address)
+void *xgi_find_pcie_virt(struct xgi_info * info, u32 address)
 {
-       struct xgi_pcie_block *block;
-       const unsigned long offset_in_page = address & (PAGE_SIZE - 1);
-
-       XGI_INFO("begin (address = 0x%lx, offset_in_page = %lu)\n",
-                address, offset_in_page);
-
-       list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
-               XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n",
-                        block, block->hw_addr, block->size);
-
-               if ((address >= block->hw_addr)
-                   && (address < (block->hw_addr + block->size))) {
-                       const unsigned long loc_in_pagetable =
-                           (address - block->hw_addr) >> PAGE_SHIFT;
-                       void *const ret =
-                           (void *)(block->page_table[loc_in_pagetable].
-                                    virt_addr + offset_in_page);
-
-                       XGI_INFO("PAGE_SHIFT = %d\n", PAGE_SHIFT);
-                       XGI_INFO("block->page_table[0x%lx].virt_addr = 0x%lx\n",
-                                loc_in_pagetable,
-                                block->page_table[loc_in_pagetable].virt_addr);
-                       XGI_INFO("return 0x%p\n", ret);
-
-                       return ret;
-               }
-       }
+       const unsigned long offset = address - info->pcie.base;
 
-       XGI_ERROR("could not find map for vm 0x%lx\n", address);
-       return NULL;
+       return ((u8 *) info->dev->sg->virtual) + offset;
 }
 
 /*
     address -- GE hw address
 */
-void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address)
+int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS)
 {
+       DRM_DEVICE;
+       struct xgi_info *info = dev->dev_private;
+       u32 address;
        u32 *virtaddr = 0;
 
-       XGI_INFO("input GE HW addr is 0x%x\n", address);
+       DRM_COPY_FROM_USER_IOCTL(address, (unsigned long __user *) data,
+                                sizeof(address));
+
+       DRM_INFO("input GE HW addr is 0x%x\n", address);
 
        if (address == 0) {
-               return;
+               return DRM_ERR(EFAULT);
        }
 
        virtaddr = (u32 *)xgi_find_pcie_virt(info, address);
 
-       XGI_INFO("convert to CPU virt addr 0x%p\n", virtaddr);
+       DRM_INFO("convert to CPU virt addr 0x%p\n", virtaddr);
 
        if (virtaddr != NULL) {
-               XGI_INFO("original [virtaddr] = 0x%x\n", *virtaddr);
+               DRM_INFO("original [virtaddr] = 0x%x\n", *virtaddr);
                *virtaddr = 0x00f00fff;
-               XGI_INFO("modified [virtaddr] = 0x%x\n", *virtaddr);
+               DRM_INFO("modified [virtaddr] = 0x%x\n", *virtaddr);
+       } else {
+               return DRM_ERR(EFAULT);
        }
+
+       return 0;
 }
diff --git a/linux-core/xgi_pcie.h b/linux-core/xgi_pcie.h
deleted file mode 100644 (file)
index b66d6a2..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-
-/****************************************************************************
- * Copyright (C) 2003-2006 by XGI Technology, Taiwan.                  
- *                                                                                                                                                     *
- * All Rights Reserved.                                                                                                                *
- *                                                                                                                                                     *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the     
- * "Software"), to deal in the Software without restriction, including 
- * without limitation on the rights to use, copy, modify, merge,       
- * publish, distribute, sublicense, and/or sell copies of the Software,        
- * and to permit persons to whom the Software is furnished to do so,   
- * subject to the following conditions:                                        
- *                                                                                                                                                     *
- * The above copyright notice and this permission notice (including the        
- * next paragraph) shall be included in all copies or substantial      
- * portions of the Software.                                           
- *                                                                                                                                                     *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,     
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF  
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND               
- * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR                     
- * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,          
- * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,          
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER                       
- * DEALINGS IN THE SOFTWARE.                                                                                           
- ***************************************************************************/
-
-#ifndef _XGI_PCIE_H_
-#define _XGI_PCIE_H_
-
-#ifndef XGI_PCIE_ALLOC_MAX_ORDER
-#define XGI_PCIE_ALLOC_MAX_ORDER    1  /* 8K in Kernel 2.4.* */
-#endif
-
-struct xgi_page_block {
-       struct xgi_page_block *next;
-       unsigned long phys_addr;
-       unsigned long virt_addr;
-       unsigned long page_count;
-       unsigned long page_order;
-};
-
-struct xgi_pcie_block {
-       struct list_head list;
-       unsigned long offset;   /* block's offset in pcie memory, begin from 0 */
-       unsigned long size;     /* The block size.              */
-       unsigned long bus_addr; /* CPU access address/bus address */
-       unsigned long hw_addr;  /* GE access address            */
-
-       unsigned long page_count;
-       unsigned long page_order;
-       struct xgi_page_block *page_block;
-       struct xgi_pte *page_table;     /* list of physical pages allocated */
-
-       atomic_t use_count;
-       enum PcieOwner owner;
-       unsigned long processID;
-};
-
-struct xgi_pcie_heap {
-       struct list_head free_list;
-       struct list_head used_list;
-       struct list_head sort_list;
-       unsigned long max_freesize;
-};
-
-#endif
index bc3e2a1..b211626 100644 (file)
 #ifndef _XGI_REGS_H_
 #define _XGI_REGS_H_
 
-#ifndef XGI_MMIO
-#define XGI_MMIO 1
-#endif
+#include "drmP.h"
+#include "drm.h"
 
-#if XGI_MMIO
-#define OUTB(port, value)   writeb(value, info->mmio.vbase + port)
-#define INB(port)           readb(info->mmio.vbase + port)
-#define OUTW(port, value)   writew(value, info->mmio.vbase + port)
-#define INW(port)           readw(info->mmio.vbase + port)
-#define OUTDW(port, value)  writel(value, info->mmio.vbase + port)
-#define INDW(port)          readl(info->mmio.vbase + port)
-#else
-#define OUTB(port, value)   outb(value, port)
-#define INB(port)           inb(port)
-#define OUTW(port, value)   outw(value, port)
-#define INW(port)           inw(port)
-#define OUTDW(port, value)  outl(value, port)
-#define INDW(port)          inl(port)
-#endif
 
 /* Hardware access functions */
-static inline void OUT3C5B(struct xgi_info * info, u8 index, u8 data)
-{
-       OUTB(0x3C4, index);
-       OUTB(0x3C5, data);
-}
-
-static inline void OUT3X5B(struct xgi_info * info, u8 index, u8 data)
-{
-       OUTB(0x3D4, index);
-       OUTB(0x3D5, data);
-}
-
-static inline void OUT3CFB(struct xgi_info * info, u8 index, u8 data)
-{
-       OUTB(0x3CE, index);
-       OUTB(0x3CF, data);
-}
-
-static inline u8 IN3C5B(struct xgi_info * info, u8 index)
-{
-       volatile u8 data = 0;
-       OUTB(0x3C4, index);
-       data = INB(0x3C5);
-       return data;
-}
-
-static inline u8 IN3X5B(struct xgi_info * info, u8 index)
+static inline void OUT3C5B(struct drm_map * map, u8 index, u8 data)
 {
-       volatile u8 data = 0;
-       OUTB(0x3D4, index);
-       data = INB(0x3D5);
-       return data;
+       DRM_WRITE8(map, 0x3C4, index);
+       DRM_WRITE8(map, 0x3C5, data);
 }
 
-static inline u8 IN3CFB(struct xgi_info * info, u8 index)
+static inline void OUT3X5B(struct drm_map * map, u8 index, u8 data)
 {
-       volatile u8 data = 0;
-       OUTB(0x3CE, index);
-       data = INB(0x3CF);
-       return data;
+       DRM_WRITE8(map, 0x3D4, index);
+       DRM_WRITE8(map, 0x3D5, data);
 }
 
-static inline void OUT3C5W(struct xgi_info * info, u8 index, u16 data)
+static inline void OUT3CFB(struct drm_map * map, u8 index, u8 data)
 {
-       OUTB(0x3C4, index);
-       OUTB(0x3C5, data);
+       DRM_WRITE8(map, 0x3CE, index);
+       DRM_WRITE8(map, 0x3CF, data);
 }
 
-static inline void OUT3X5W(struct xgi_info * info, u8 index, u16 data)
+static inline u8 IN3C5B(struct drm_map * map, u8 index)
 {
-       OUTB(0x3D4, index);
-       OUTB(0x3D5, data);
+       DRM_WRITE8(map, 0x3C4, index);
+       return DRM_READ8(map, 0x3C5);
 }
 
-static inline void OUT3CFW(struct xgi_info * info, u8 index, u8 data)
+static inline u8 IN3X5B(struct drm_map * map, u8 index)
 {
-       OUTB(0x3CE, index);
-       OUTB(0x3CF, data);
+       DRM_WRITE8(map, 0x3D4, index);
+       return DRM_READ8(map, 0x3D5);
 }
 
-static inline u8 IN3C5W(struct xgi_info * info, u8 index)
+static inline u8 IN3CFB(struct drm_map * map, u8 index)
 {
-       volatile u8 data = 0;
-       OUTB(0x3C4, index);
-       data = INB(0x3C5);
-       return data;
+       DRM_WRITE8(map, 0x3CE, index);
+       return DRM_READ8(map, 0x3CF);
 }
 
-static inline u8 IN3X5W(struct xgi_info * info, u8 index)
-{
-       volatile u8 data = 0;
-       OUTB(0x3D4, index);
-       data = INB(0x3D5);
-       return data;
-}
-
-static inline u8 IN3CFW(struct xgi_info * info, u8 index)
-{
-       volatile u8 data = 0;
-       OUTB(0x3CE, index);
-       data = INB(0x3CF);
-       return data;
-}
-
-static inline u8 readAttr(struct xgi_info * info, u8 index)
-{
-       INB(0x3DA);             /* flip-flop to index */
-       OUTB(0x3C0, index);
-       return INB(0x3C1);
-}
-
-static inline void writeAttr(struct xgi_info * info, u8 index, u8 value)
-{
-       INB(0x3DA);             /* flip-flop to index */
-       OUTB(0x3C0, index);
-       OUTB(0x3C0, value);
-}
 
 /*
  * Graphic engine register (2d/3d) acessing interface
  */
-static inline void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data)
-{
-       XGI_INFO("mmio vbase = 0x%p, addr = 0x%x, data = 0x%x\n",
-                info->mmio->vbase, addr, data);
-
-       *(volatile u32 *)(info->mmio.vbase + addr) = (data);
-}
-
-static inline void WriteRegWord(struct xgi_info * info, u32 addr, u16 data)
+static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data)
 {
-       *(volatile u16 *)(info->mmio.vbase + addr) = (data);
-}
-
-static inline void WriteRegByte(struct xgi_info * info, u32 addr, u8 data)
-{
-       *(volatile u8 *)(info->mmio.vbase + addr) = (data);
-}
-
-static inline u32 ReadRegDWord(struct xgi_info * info, u32 addr)
-{
-       volatile u32 data;
-       data = *(volatile u32 *)(info->mmio.vbase + addr);
-       return data;
-}
-
-static inline u16 ReadRegWord(struct xgi_info * info, u32 addr)
-{
-       volatile u16 data;
-       data = *(volatile u16 *)(info->mmio.vbase + addr);
-       return data;
-}
+       DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n",
+                map->handle, addr, data);
 
-static inline u8 ReadRegByte(struct xgi_info * info, u32 addr)
-{
-       volatile u8 data;
-       data = *(volatile u8 *)(info->mmio.vbase + addr);
-       return data;
+       DRM_WRITE32(map, addr, data);
 }
 
-#if 0
-extern void OUT3C5B(struct xgi_info * info, u8 index, u8 data);
-extern void OUT3X5B(struct xgi_info * info, u8 index, u8 data);
-extern void OUT3CFB(struct xgi_info * info, u8 index, u8 data);
-extern u8 IN3C5B(struct xgi_info * info, u8 index);
-extern u8 IN3X5B(struct xgi_info * info, u8 index);
-extern u8 IN3CFB(struct xgi_info * info, u8 index);
-extern void OUT3C5W(struct xgi_info * info, u8 index, u8 data);
-extern void OUT3X5W(struct xgi_info * info, u8 index, u8 data);
-extern void OUT3CFW(struct xgi_info * info, u8 index, u8 data);
-extern u8 IN3C5W(struct xgi_info * info, u8 index);
-extern u8 IN3X5W(struct xgi_info * info, u8 index);
-extern u8 IN3CFW(struct xgi_info * info, u8 index);
-
-extern void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data);
-extern void WriteRegWord(struct xgi_info * info, u32 addr, u16 data);
-extern void WriteRegByte(struct xgi_info * info, u32 addr, u8 data);
-extern u32 ReadRegDWord(struct xgi_info * info, u32 addr);
-extern u16 ReadRegWord(struct xgi_info * info, u32 addr);
-extern u8 ReadRegByte(struct xgi_info * info, u32 addr);
-
-extern void EnableProtect();
-extern void DisableProtect();
-#endif
-
-#define Out(port, data)         OUTB(port, data)
-#define bOut(port, data)        OUTB(port, data)
-#define wOut(port, data)        OUTW(port, data)
-#define dwOut(port, data)       OUTDW(port, data)
-
-#define Out3x5(index, data)     OUT3X5B(info, index, data)
-#define bOut3x5(index, data)    OUT3X5B(info, index, data)
-#define wOut3x5(index, data)    OUT3X5W(info, index, data)
-
-#define Out3c5(index, data)     OUT3C5B(info, index, data)
-#define bOut3c5(index, data)    OUT3C5B(info, index, data)
-#define wOut3c5(index, data)    OUT3C5W(info, index, data)
-
-#define Out3cf(index, data)     OUT3CFB(info, index, data)
-#define bOut3cf(index, data)    OUT3CFB(info, index, data)
-#define wOut3cf(index, data)    OUT3CFW(info, index, data)
-
-#define In(port)                INB(port)
-#define bIn(port)               INB(port)
-#define wIn(port)               INW(port)
-#define dwIn(port)              INDW(port)
-
-#define In3x5(index)            IN3X5B(info, index)
-#define bIn3x5(index)           IN3X5B(info, index)
-#define wIn3x5(index)           IN3X5W(info, index)
-
-#define In3c5(index)            IN3C5B(info, index)
-#define bIn3c5(index)           IN3C5B(info, index)
-#define wIn3c5(index)           IN3C5W(info, index)
-
-#define In3cf(index)            IN3CFB(info, index)
-#define bIn3cf(index)           IN3CFB(info, index)
-#define wIn3cf(index)           IN3CFW(info, index)
-
-#define dwWriteReg(addr, data)  WriteRegDWord(info, addr, data)
-#define wWriteReg(addr, data)   WriteRegWord(info, addr, data)
-#define bWriteReg(addr, data)   WriteRegByte(info, addr, data)
-#define dwReadReg(addr)         ReadRegDWord(info, addr)
-#define wReadReg(addr)          ReadRegWord(info, addr)
-#define bReadReg(addr)          ReadRegByte(info, addr)
 
 static inline void xgi_enable_mmio(struct xgi_info * info)
 {
        u8 protect = 0;
+       u8 temp;
 
        /* Unprotect registers */
-       outb(0x11, 0x3C4);
-       protect = inb(0x3C5);
-       outb(0x92, 0x3C5);
+       DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
+       protect = DRM_READ8(info->mmio_map, 0x3C5);
+       DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
 
-       outb(0x3A, 0x3D4);
-       outb(inb(0x3D5) | 0x20, 0x3D5);
+       DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A);
+       temp = DRM_READ8(info->mmio_map, 0x3D5);
+       DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20);
 
        /* Enable MMIO */
-       outb(0x39, 0x3D4);
-       outb(inb(0x3D5) | 0x01, 0x3D5);
+       DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
+       temp = DRM_READ8(info->mmio_map, 0x3D5);
+       DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01);
 
-       OUTB(0x3C4, 0x11);
-       OUTB(0x3C5, protect);
+       /* Protect registers */
+       OUT3C5B(info->mmio_map, 0x11, protect);
 }
 
 static inline void xgi_disable_mmio(struct xgi_info * info)
 {
        u8 protect = 0;
+       u8 temp;
 
-       /* unprotect registers */
-       OUTB(0x3C4, 0x11);
-       protect = INB(0x3C5);
-       OUTB(0x3C5, 0x92);
+       /* Unprotect registers */
+       DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
+       protect = DRM_READ8(info->mmio_map, 0x3C5);
+       DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
 
        /* Disable MMIO access */
-       OUTB(0x3D4, 0x39);
-       OUTB(0x3D5, INB(0x3D5) & 0xFE);
+       DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
+       temp = DRM_READ8(info->mmio_map, 0x3D5);
+       DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE);
 
        /* Protect registers */
-       outb(0x11, 0x3C4);
-       outb(protect, 0x3C5);
+       OUT3C5B(info->mmio_map, 0x11, protect);
 }
 
 static inline void xgi_enable_ge(struct xgi_info * info)
@@ -300,36 +131,36 @@ static inline void xgi_enable_ge(struct xgi_info * info)
        int wait = 0;
 
        // Enable GE
-       OUTW(0x3C4, 0x9211);
+       DRM_WRITE16(info->mmio_map, 0x3C4, 0x9211);
 
        // Save and close dynamic gating
-       bOld3cf2a = bIn3cf(0x2a);
-       bOut3cf(0x2a, bOld3cf2a & 0xfe);
+       bOld3cf2a = IN3CFB(info->mmio_map, 0x2a);
+       OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a & 0xfe);
 
        // Reset both 3D and 2D engine
-       bOut3x5(0x36, 0x84);
+       OUT3X5B(info->mmio_map, 0x36, 0x84);
        wait = 10;
        while (wait--) {
-               bIn(0x36);
+               DRM_READ8(info->mmio_map, 0x36);
        }
-       bOut3x5(0x36, 0x94);
+       OUT3X5B(info->mmio_map, 0x36, 0x94);
        wait = 10;
        while (wait--) {
-               bIn(0x36);
+               DRM_READ8(info->mmio_map, 0x36);
        }
-       bOut3x5(0x36, 0x84);
+       OUT3X5B(info->mmio_map, 0x36, 0x84);
        wait = 10;
        while (wait--) {
-               bIn(0x36);
+               DRM_READ8(info->mmio_map, 0x36);
        }
        // Enable 2D engine only
-       bOut3x5(0x36, 0x80);
+       OUT3X5B(info->mmio_map, 0x36, 0x80);
 
        // Enable 2D+3D engine
-       bOut3x5(0x36, 0x84);
+       OUT3X5B(info->mmio_map, 0x36, 0x84);
 
        // Restore dynamic gating
-       bOut3cf(0x2a, bOld3cf2a);
+       OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a);
 }
 
 static inline void xgi_disable_ge(struct xgi_info * info)
@@ -337,50 +168,50 @@ static inline void xgi_disable_ge(struct xgi_info * info)
        int wait = 0;
 
        // Reset both 3D and 2D engine
-       bOut3x5(0x36, 0x84);
+       OUT3X5B(info->mmio_map, 0x36, 0x84);
 
        wait = 10;
        while (wait--) {
-               bIn(0x36);
+               DRM_READ8(info->mmio_map, 0x36);
        }
-       bOut3x5(0x36, 0x94);
+       OUT3X5B(info->mmio_map, 0x36, 0x94);
 
        wait = 10;
        while (wait--) {
-               bIn(0x36);
+               DRM_READ8(info->mmio_map, 0x36);
        }
-       bOut3x5(0x36, 0x84);
+       OUT3X5B(info->mmio_map, 0x36, 0x84);
 
        wait = 10;
        while (wait--) {
-               bIn(0x36);
+               DRM_READ8(info->mmio_map, 0x36);
        }
 
        // Disable 2D engine only
-       bOut3x5(0x36, 0);
+       OUT3X5B(info->mmio_map, 0x36, 0);
 }
 
 static inline void xgi_enable_dvi_interrupt(struct xgi_info * info)
 {
-       Out3cf(0x39, In3cf(0x39) & ~0x01);      //Set 3cf.39 bit 0 to 0
-       Out3cf(0x39, In3cf(0x39) | 0x01);       //Set 3cf.39 bit 0 to 1
-       Out3cf(0x39, In3cf(0x39) | 0x02);
+       OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x01);    //Set 3cf.39 bit 0 to 0
+       OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x01);     //Set 3cf.39 bit 0 to 1
+       OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x02);
 }
 static inline void xgi_disable_dvi_interrupt(struct xgi_info * info)
 {
-       Out3cf(0x39, In3cf(0x39) & ~0x02);
+       OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x02);
 }
 
 static inline void xgi_enable_crt1_interrupt(struct xgi_info * info)
 {
-       Out3cf(0x3d, In3cf(0x3d) | 0x04);
-       Out3cf(0x3d, In3cf(0x3d) & ~0x04);
-       Out3cf(0x3d, In3cf(0x3d) | 0x08);
+       OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x04);
+       OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x04);
+       OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x08);
 }
 
 static inline void xgi_disable_crt1_interrupt(struct xgi_info * info)
 {
-       Out3cf(0x3d, In3cf(0x3d) & ~0x08);
+       OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x08);
 }
 
 #endif
index 6711888..66cb4ef 100644 (file)
@@ -44,6 +44,15 @@ struct drm_xgi_sarea {
        unsigned int scrn_pitch;
 };
 
+
+struct xgi_bootstrap {
+       /**
+        * Size of PCI-e GART range in megabytes.
+        */
+       unsigned int gart_size;
+};
+
+
 enum xgi_mem_location {
        XGI_MEMLOC_NON_LOCAL = 0,
        XGI_MEMLOC_LOCAL = 1,
@@ -62,9 +71,9 @@ struct xgi_mem_alloc {
        __u32 hw_addr;
 
        /**
-        * Physical address of the memory from the processor's point of view.
+        * Offset of the allocation in the mapping.
         */
-       unsigned long bus_addr;
+       unsigned long offset;
 };
 
 enum xgi_batch_type {
@@ -95,38 +104,31 @@ struct xgi_state_info {
  * Ioctl definitions
  */
 
-#define XGI_IOCTL_MAGIC             'x'        /* use 'x' as magic number */
-
-#define XGI_IOCTL_BASE              0
-#define XGI_ESC_POST_VBIOS          (XGI_IOCTL_BASE + 0)
-
-#define XGI_ESC_FB_ALLOC            (XGI_IOCTL_BASE + 1)
-#define XGI_ESC_FB_FREE             (XGI_IOCTL_BASE + 2)
-#define XGI_ESC_PCIE_ALLOC          (XGI_IOCTL_BASE + 3)
-#define XGI_ESC_PCIE_FREE           (XGI_IOCTL_BASE + 4)
-#define XGI_ESC_SUBMIT_CMDLIST      (XGI_IOCTL_BASE + 5)
-#define XGI_ESC_GE_RESET            (XGI_IOCTL_BASE + 6)
-#define XGI_ESC_DUMP_REGISTER       (XGI_IOCTL_BASE + 7)
-#define XGI_ESC_DEBUG_INFO          (XGI_IOCTL_BASE + 8)
-#define XGI_ESC_TEST_RWINKERNEL     (XGI_IOCTL_BASE + 9)
-#define XGI_ESC_STATE_CHANGE        (XGI_IOCTL_BASE + 10)
-
-#define XGI_IOCTL_POST_VBIOS        _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS)
-
-#define XGI_IOCTL_FB_ALLOC          _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_alloc)
-#define XGI_IOCTL_FB_FREE           _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long)
-
-#define XGI_IOCTL_PCIE_ALLOC        _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_alloc)
-#define XGI_IOCTL_PCIE_FREE         _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long)
-
-#define XGI_IOCTL_GE_RESET          _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET)
-#define XGI_IOCTL_DUMP_REGISTER     _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER)
-#define XGI_IOCTL_DEBUG_INFO        _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO)
-
-#define XGI_IOCTL_SUBMIT_CMDLIST    _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info)
-#define XGI_IOCTL_TEST_RWINKERNEL   _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long)
-#define XGI_IOCTL_STATE_CHANGE      _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, struct xgi_state_info)
-
-#define XGI_IOCTL_MAXNR          30
+#define DRM_XGI_BOOTSTRAP           0
+#define DRM_XGI_FB_ALLOC            1
+#define DRM_XGI_FB_FREE             2
+#define DRM_XGI_PCIE_ALLOC          3
+#define DRM_XGI_PCIE_FREE           4
+#define DRM_XGI_SUBMIT_CMDLIST      5
+#define DRM_XGI_GE_RESET            6
+#define DRM_XGI_DUMP_REGISTER       7
+#define DRM_XGI_DEBUG_INFO          8
+#define DRM_XGI_TEST_RWINKERNEL     9
+#define DRM_XGI_STATE_CHANGE        10
+
+#define XGI_IOCTL_BOOTSTRAP         DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap)
+
+#define XGI_IOCTL_FB_ALLOC          DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_FB_ALLOC, struct xgi_mem_alloc)
+#define XGI_IOCTL_FB_FREE           DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FB_FREE, __u32)
+
+#define XGI_IOCTL_PCIE_ALLOC        DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_PCIE_ALLOC, struct xgi_mem_alloc)
+#define XGI_IOCTL_PCIE_FREE         DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_PCIE_FREE, __u32)
+
+#define XGI_IOCTL_GE_RESET          DRM_IO(DRM_COMMAND_BASE + DRM_XGI_GE_RESET)
+#define XGI_IOCTL_DUMP_REGISTER     DRM_IO(DRM_COMMAND_BASE + DRM_XGI_DUMP_REGISTER)
+#define XGI_IOCTL_DEBUG_INFO        DRM_IO(DRM_COMMAND_BASE + DRM_XGI_DEBUG_INFO)
+#define XGI_IOCTL_SUBMIT_CMDLIST    DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_SUBMIT_CMDLIST, struct xgi_cmd_info)
+#define XGI_IOCTL_TEST_RWINKERNEL   DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_TEST_RWINKERNEL, __u32)
+#define XGI_IOCTL_STATE_CHANGE      DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_STATE_CHANGE, struct xgi_state_info)
 
 #endif /* _XGI_DRM_H_ */