--- /dev/null
+\r
+/****************************************************************************\r
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan. \r
+ * *\r
+ * All Rights Reserved. *\r
+ * *\r
+ * Permission is hereby granted, free of charge, to any person obtaining\r
+ * a copy of this software and associated documentation files (the \r
+ * "Software"), to deal in the Software without restriction, including \r
+ * without limitation on the rights to use, copy, modify, merge, \r
+ * publish, distribute, sublicense, and/or sell copies of the Software, \r
+ * and to permit persons to whom the Software is furnished to do so, \r
+ * subject to the following conditions: \r
+ * *\r
+ * The above copyright notice and this permission notice (including the \r
+ * next paragraph) shall be included in all copies or substantial \r
+ * portions of the Software. \r
+ * *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, \r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR \r
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r
+ * DEALINGS IN THE SOFTWARE. \r
+ ***************************************************************************/\r
+\r
+\r
+#include "xgi_types.h"\r
+#include "xgi_linux.h"\r
+#include "xgi_drv.h"\r
+#include "xgi_regs.h"\r
+#include "xgi_misc.h"\r
+#include "xgi_cmdlist.h"\r
+\r
+\r
+\r
+U32 s_emptyBegin[AGPCMDLIST_BEGIN_SIZE] =\r
+{\r
+ 0x10000000, // 3D Type Begin, Invalid\r
+ 0x80000004, // Length = 4;\r
+ 0x00000000,\r
+ 0x00000000\r
+};\r
+\r
+U32 s_flush2D[AGPCMDLIST_FLUSH_CMD_LEN] =\r
+{\r
+ FLUSH_2D,\r
+ FLUSH_2D,\r
+ FLUSH_2D,\r
+ FLUSH_2D\r
+};\r
+\r
+xgi_cmdring_info_t s_cmdring;\r
+\r
+static void addFlush2D(xgi_info_t *info);\r
+static U32 getCurBatchBeginPort(xgi_cmd_info_t *pCmdInfo);\r
+static void triggerHWCommandList(xgi_info_t *info, U32 triggerCounter);\r
+static void xgi_cmdlist_reset(void);\r
+\r
+int xgi_cmdlist_initialize(xgi_info_t *info, U32 size)\r
+{\r
+ //xgi_mem_req_t mem_req;\r
+ xgi_mem_alloc_t mem_alloc;\r
+\r
+ //mem_req.size = size;\r
+\r
+ xgi_pcie_alloc(info, size, PCIE_2D, &mem_alloc);\r
+\r
+ if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0))\r
+ {\r
+ return -1;\r
+ }\r
+\r
+ s_cmdring._cmdRingSize = mem_alloc.size;\r
+ s_cmdring._cmdRingBuffer = mem_alloc.hw_addr;\r
+ s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr;\r
+ s_cmdring._lastBatchStartAddr = 0;\r
+ s_cmdring._cmdRingOffset = 0;\r
+\r
+ return 1;\r
+}\r
+\r
+void xgi_submit_cmdlist(xgi_info_t *info, xgi_cmd_info_t *pCmdInfo)\r
+{\r
+ U32 beginPort;\r
+ /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/\r
+\r
+ /* Jong 05/25/2006 */\r
+ /* return; */\r
+\r
+ beginPort = getCurBatchBeginPort(pCmdInfo);\r
+ XGI_INFO("Jong-xgi_submit_cmdlist-After getCurBatchBeginPort() \n"); \r
+\r
+ /* Jong 05/25/2006 */\r
+ /* return; */\r
+\r
+ if (s_cmdring._lastBatchStartAddr == 0)\r
+ {\r
+ U32 portOffset;\r
+\r
+ /* Jong 06/13/2006; remove marked for system hang test */\r
+ /* xgi_waitfor_pci_idle(info); */\r
+\r
+ /* Jong 06132006; BASE_3D_ENG=0x2800 */\r
+ /* beginPort: 2D: 0x30 */\r
+ portOffset = BASE_3D_ENG + beginPort;\r
+\r
+ // Enable PCI Trigger Mode\r
+ XGI_INFO("Jong-xgi_submit_cmdlist-Enable PCI Trigger Mode \n"); \r
+\r
+ /* Jong 05/25/2006 */\r
+ /* return; */\r
+\r
+ /* Jong 06/13/2006; M2REG_AUTO_LINK_SETTING_ADDRESS=0x10 */\r
+ XGI_INFO("Jong-M2REG_AUTO_LINK_SETTING_ADDRESS=0x%lx \n", M2REG_AUTO_LINK_SETTING_ADDRESS); \r
+ XGI_INFO("Jong-M2REG_CLEAR_COUNTERS_MASK=0x%lx \n", M2REG_CLEAR_COUNTERS_MASK); \r
+ XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)=0x%lx \n", (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)); \r
+ XGI_INFO("Jong-M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n\n", M2REG_PCI_TRIGGER_MODE_MASK); \r
+\r
+ /* Jong 06/14/2006; 0x400001a */\r
+ XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", \r
+ (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK); \r
+ dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,\r
+ (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |\r
+ M2REG_CLEAR_COUNTERS_MASK |\r
+ 0x08 |\r
+ M2REG_PCI_TRIGGER_MODE_MASK);\r
+\r
+ /* Jong 05/25/2006 */\r
+ XGI_INFO("Jong-xgi_submit_cmdlist-After dwWriteReg() \n"); \r
+ /* return; */ /* OK */\r
+\r
+ /* Jong 06/14/2006; 0x400000a */\r
+ XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", \r
+ (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK); \r
+ dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,\r
+ (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |\r
+ 0x08 |\r
+ M2REG_PCI_TRIGGER_MODE_MASK);\r
+\r
+ // Send PCI begin command\r
+ XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command \n"); \r
+ /* return; */\r
+\r
+ XGI_INFO("Jong-xgi_submit_cmdlist-portOffset=%d \n", portOffset);\r
+ XGI_INFO("Jong-xgi_submit_cmdlist-beginPort=%d \n", beginPort); \r
+\r
+ /* beginPort = 48; */\r
+ /* 0xc100000 */\r
+ dwWriteReg(portOffset, (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID);\r
+ XGI_INFO("Jong-(beginPort<<22)=0x%lx \n", (beginPort<<22)); \r
+ XGI_INFO("Jong-(BEGIN_VALID_MASK)=0x%lx \n", BEGIN_VALID_MASK); \r
+ XGI_INFO("Jong- pCmdInfo->_curDebugID=0x%lx \n", pCmdInfo->_curDebugID); \r
+ XGI_INFO("Jong- (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID=0x%lx \n", (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); \r
+ XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command- After \n"); \r
+ /* return; */ /* OK */\r
+\r
+ /* 0x80000024 */\r
+ dwWriteReg(portOffset+4, BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize);\r
+ XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK=0x%lx \n", BEGIN_LINK_ENABLE_MASK); \r
+ XGI_INFO("Jong- pCmdInfo->_firstSize=0x%lx \n", pCmdInfo->_firstSize); \r
+ XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize=0x%lx \n", BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); \r
+ XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-1 \n"); \r
+\r
+ /* 0x1010000 */\r
+ dwWriteReg(portOffset+8, (pCmdInfo->_firstBeginAddr >> 4));\r
+ XGI_INFO("Jong- pCmdInfo->_firstBeginAddr=0x%lx \n", pCmdInfo->_firstBeginAddr); \r
+ XGI_INFO("Jong- (pCmdInfo->_firstBeginAddr >> 4)=0x%lx \n", (pCmdInfo->_firstBeginAddr >> 4)); \r
+ XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-2 \n"); \r
+\r
+ /* Jong 06/13/2006 */\r
+ xgi_dump_register(info);\r
+\r
+ /* Jong 06/12/2006; system hang; marked for test */\r
+ dwWriteReg(portOffset+12, 0); \r
+ XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-3 \n"); \r
+\r
+ /* Jong 06/13/2006; remove marked for system hang test */\r
+ /* xgi_waitfor_pci_idle(info); */\r
+ }\r
+ else\r
+ {\r
+ XGI_INFO("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); \r
+ U32 *lastBatchVirtAddr;\r
+\r
+ /* Jong 05/25/2006 */\r
+ /* return; */\r
+\r
+ if (pCmdInfo->_firstBeginType == BTYPE_3D)\r
+ {\r
+ addFlush2D(info);\r
+ }\r
+\r
+ lastBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr);\r
+\r
+ lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize;\r
+ lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4;\r
+ lastBatchVirtAddr[3] = 0;\r
+ //barrier();\r
+ lastBatchVirtAddr[0] = (beginPort<<22) + (BEGIN_VALID_MASK) + (0xffff & pCmdInfo->_curDebugID);\r
+\r
+ /* Jong 06/12/2006; system hang; marked for test */\r
+ triggerHWCommandList(info, pCmdInfo->_beginCount); \r
+\r
+ XGI_INFO("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n"); \r
+ }\r
+\r
+ s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr;\r
+ XGI_INFO("Jong-xgi_submit_cmdlist-End \n"); \r
+}\r
+\r
+\r
+/*\r
+ state: 0 - console\r
+ 1 - graphic\r
+ 2 - fb\r
+ 3 - logout\r
+*/\r
+void xgi_state_change(xgi_info_t *info, xgi_state_info_t *pStateInfo)\r
+{\r
+#define STATE_CONSOLE 0\r
+#define STATE_GRAPHIC 1\r
+#define STATE_FBTERM 2\r
+#define STATE_LOGOUT 3\r
+#define STATE_REBOOT 4\r
+#define STATE_SHUTDOWN 5\r
+\r
+ if ((pStateInfo->_fromState == STATE_GRAPHIC)\r
+ && (pStateInfo->_toState == STATE_CONSOLE))\r
+ {\r
+ XGI_INFO("[kd] I see, now is to leaveVT\n");\r
+ // stop to received batch\r
+ }\r
+ else if ((pStateInfo->_fromState == STATE_CONSOLE)\r
+ && (pStateInfo->_toState == STATE_GRAPHIC))\r
+ {\r
+ XGI_INFO("[kd] I see, now is to enterVT\n");\r
+ xgi_cmdlist_reset();\r
+ }\r
+ else if ((pStateInfo->_fromState == STATE_GRAPHIC)\r
+ && ( (pStateInfo->_toState == STATE_LOGOUT)\r
+ ||(pStateInfo->_toState == STATE_REBOOT)\r
+ ||(pStateInfo->_toState == STATE_SHUTDOWN)))\r
+ {\r
+ XGI_INFO("[kd] I see, not is to exit from X\n");\r
+ // stop to received batch\r
+ }\r
+ else\r
+ {\r
+ XGI_ERROR("[kd] Should not happen\n");\r
+ }\r
+\r
+}\r
+\r
+void xgi_cmdlist_reset(void)\r
+{\r
+ s_cmdring._lastBatchStartAddr = 0;\r
+ s_cmdring._cmdRingOffset = 0;\r
+}\r
+\r
+void xgi_cmdlist_cleanup(xgi_info_t *info)\r
+{\r
+ if (s_cmdring._cmdRingBuffer != 0)\r
+ {\r
+ xgi_pcie_free(info, s_cmdring._cmdRingBusAddr);\r
+ s_cmdring._cmdRingBuffer = 0;\r
+ s_cmdring._cmdRingOffset = 0;\r
+ s_cmdring._cmdRingSize = 0;\r
+ }\r
+}\r
+\r
+static void triggerHWCommandList(xgi_info_t *info, U32 triggerCounter)\r
+{\r
+ static U32 s_triggerID = 1;\r
+\r
+ //Fix me, currently we just trigger one time\r
+ while (triggerCounter--)\r
+ {\r
+ dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,\r
+ 0x05000000 + (0xffff & s_triggerID++));\r
+ // xgi_waitfor_pci_idle(info);\r
+ }\r
+}\r
+\r
+static U32 getCurBatchBeginPort(xgi_cmd_info_t *pCmdInfo)\r
+{\r
+ // Convert the batch type to begin port ID\r
+ switch(pCmdInfo->_firstBeginType)\r
+ {\r
+ case BTYPE_2D:\r
+ return 0x30;\r
+ case BTYPE_3D:\r
+ return 0x40;\r
+ case BTYPE_FLIP:\r
+ return 0x50;\r
+ case BTYPE_CTRL:\r
+ return 0x20;\r
+ default:\r
+ //ASSERT(0);\r
+ return 0xff;\r
+ }\r
+}\r
+\r
+static void addFlush2D(xgi_info_t *info)\r
+{\r
+ U32 *flushBatchVirtAddr;\r
+ U32 flushBatchHWAddr;\r
+\r
+ U32 *lastBatchVirtAddr;\r
+\r
+ /* check buf is large enough to contain a new flush batch */\r
+ if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize)\r
+ {\r
+ s_cmdring._cmdRingOffset = 0;\r
+ }\r
+\r
+ flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset;\r
+ flushBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, flushBatchHWAddr);\r
+\r
+ /* not using memcpy for I assume the address is discrete */\r
+ *(flushBatchVirtAddr + 0) = 0x10000000;\r
+ *(flushBatchVirtAddr + 1) = 0x80000004; /* size = 0x04 dwords */\r
+ *(flushBatchVirtAddr + 2) = 0x00000000;\r
+ *(flushBatchVirtAddr + 3) = 0x00000000;\r
+ *(flushBatchVirtAddr + 4) = FLUSH_2D;\r
+ *(flushBatchVirtAddr + 5) = FLUSH_2D;\r
+ *(flushBatchVirtAddr + 6) = FLUSH_2D;\r
+ *(flushBatchVirtAddr + 7) = FLUSH_2D;\r
+\r
+ // ASSERT(s_cmdring._lastBatchStartAddr != NULL);\r
+ lastBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr);\r
+\r
+ lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08;\r
+ lastBatchVirtAddr[2] = flushBatchHWAddr >> 4;\r
+ lastBatchVirtAddr[3] = 0;\r
+\r
+ //barrier();\r
+\r
+ // BTYPE_CTRL & NO debugID\r
+ lastBatchVirtAddr[0] = (0x20<<22) + (BEGIN_VALID_MASK);\r
+\r
+ triggerHWCommandList(info, 1);\r
+\r
+ s_cmdring._cmdRingOffset += 0x20;\r
+ s_cmdring._lastBatchStartAddr = flushBatchHWAddr;\r
+}\r
--- /dev/null
+\r
+/****************************************************************************\r
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan. \r
+ * *\r
+ * All Rights Reserved. *\r
+ * *\r
+ * Permission is hereby granted, free of charge, to any person obtaining\r
+ * a copy of this software and associated documentation files (the \r
+ * "Software"), to deal in the Software without restriction, including \r
+ * without limitation on the rights to use, copy, modify, merge, \r
+ * publish, distribute, sublicense, and/or sell copies of the Software, \r
+ * and to permit persons to whom the Software is furnished to do so, \r
+ * subject to the following conditions: \r
+ * *\r
+ * The above copyright notice and this permission notice (including the \r
+ * next paragraph) shall be included in all copies or substantial \r
+ * portions of the Software. \r
+ * *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, \r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR \r
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r
+ * DEALINGS IN THE SOFTWARE. \r
+ ***************************************************************************/\r
+\r
+#ifndef _XGI_CMDLIST_H_\r
+#define _XGI_CMDLIST_H_\r
+\r
+#define ONE_BIT_MASK 0x1\r
+#define TWENTY_BIT_MASK 0xfffff\r
+#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20)\r
+#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK\r
+#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21)\r
+#define BASE_3D_ENG 0x2800\r
+#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x10\r
+#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4)\r
+#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1)\r
+#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20)\r
+#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31)\r
+#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x14\r
+\r
+typedef enum\r
+{\r
+ FLUSH_2D = M2REG_FLUSH_2D_ENGINE_MASK,\r
+ FLUSH_3D = M2REG_FLUSH_3D_ENGINE_MASK,\r
+ FLUSH_FLIP = M2REG_FLUSH_FLIP_ENGINE_MASK\r
+}FLUSH_CODE;\r
+\r
+typedef enum\r
+{\r
+ AGPCMDLIST_SCRATCH_SIZE = 0x100,\r
+ AGPCMDLIST_BEGIN_SIZE = 0x004,\r
+ AGPCMDLIST_3D_SCRATCH_CMD_SIZE = 0x004,\r
+ AGPCMDLIST_2D_SCRATCH_CMD_SIZE = 0x00c,\r
+ AGPCMDLIST_FLUSH_CMD_LEN = 0x004,\r
+ AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE\r
+}CMD_SIZE;\r
+\r
+typedef struct xgi_cmdring_info_s\r
+{\r
+ U32 _cmdRingSize;\r
+ U32 _cmdRingBuffer;\r
+ U32 _cmdRingBusAddr;\r
+ U32 _lastBatchStartAddr;\r
+ U32 _cmdRingOffset;\r
+}xgi_cmdring_info_t;\r
+\r
+extern int xgi_cmdlist_initialize(xgi_info_t *info, U32 size);\r
+\r
+extern void xgi_submit_cmdlist(xgi_info_t *info, xgi_cmd_info_t * pCmdInfo);\r
+\r
+extern void xgi_state_change(xgi_info_t *info, xgi_state_info_t * pStateInfo);\r
+\r
+extern void xgi_cmdlist_cleanup(xgi_info_t *info);\r
+\r
+#endif /* _XGI_CMDLIST_H_ */\r
--- /dev/null
+\r
+/****************************************************************************\r
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan. \r
+ * *\r
+ * All Rights Reserved. *\r
+ * *\r
+ * Permission is hereby granted, free of charge, to any person obtaining\r
+ * a copy of this software and associated documentation files (the \r
+ * "Software"), to deal in the Software without restriction, including \r
+ * without limitation on the rights to use, copy, modify, merge, \r
+ * publish, distribute, sublicense, and/or sell copies of the Software, \r
+ * and to permit persons to whom the Software is furnished to do so, \r
+ * subject to the following conditions: \r
+ * *\r
+ * The above copyright notice and this permission notice (including the \r
+ * next paragraph) shall be included in all copies or substantial \r
+ * portions of the Software. \r
+ * *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, \r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR \r
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r
+ * DEALINGS IN THE SOFTWARE. \r
+ ***************************************************************************/\r
+#include "xgi_types.h"\r
+#include "xgi_linux.h"\r
+#include "xgi_drv.h"\r
+#include "xgi_regs.h"\r
+#include "xgi_pcie.h"\r
+#include "xgi_misc.h"\r
+#include "xgi_cmdlist.h"\r
+\r
+/* for debug */\r
+static int xgi_temp = 1;\r
+/*\r
+ * global parameters\r
+ */\r
+static struct xgi_dev {\r
+ u16 vendor;\r
+ u16 device;\r
+ const char *name;\r
+} xgidev_list[] = {\r
+ {PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XP5, "XP5"},\r
+ {PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XG47, "XG47"},\r
+ {0, 0, NULL}\r
+};\r
+\r
+int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */\r
+\r
+static int xgi_num_devices = 0;\r
+\r
+xgi_info_t xgi_devices[XGI_MAX_DEVICES];\r
+\r
+#if defined(XGI_PM_SUPPORT_APM)\r
+static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 };\r
+#endif\r
+\r
+/* add one for the control device */\r
+xgi_info_t xgi_ctl_device;\r
+wait_queue_head_t xgi_ctl_waitqueue;\r
+\r
+#ifdef CONFIG_PROC_FS\r
+struct proc_dir_entry *proc_xgi;\r
+#endif\r
+\r
+#ifdef CONFIG_DEVFS_FS\r
+devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES];\r
+#endif\r
+\r
+struct list_head xgi_mempid_list;\r
+\r
+/* xgi_ functions.. do not take a state device parameter */\r
+static int xgi_post_vbios(xgi_ioctl_post_vbios_t *info);\r
+static void xgi_proc_create(void);\r
+static void xgi_proc_remove_all(struct proc_dir_entry *);\r
+static void xgi_proc_remove(void);\r
+\r
+/* xgi_kern_ functions, interfaces used by linux kernel */\r
+int xgi_kern_probe(struct pci_dev *, const struct pci_device_id *);\r
+\r
+unsigned int xgi_kern_poll(struct file *, poll_table *);\r
+int xgi_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long);\r
+int xgi_kern_mmap(struct file *, struct vm_area_struct *);\r
+int xgi_kern_open(struct inode *, struct file *);\r
+int xgi_kern_release(struct inode *inode, struct file *filp);\r
+\r
+void xgi_kern_vma_open(struct vm_area_struct *vma);\r
+void xgi_kern_vma_release(struct vm_area_struct *vma);\r
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1))\r
+struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,\r
+ unsigned long address, int *type);\r
+#else\r
+struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,\r
+ unsigned long address, int write_access);\r
+#endif\r
+\r
+int xgi_kern_read_card_info(char *, char **, off_t off, int, int *, void *);\r
+int xgi_kern_read_status(char *, char **, off_t off, int, int *, void *);\r
+int xgi_kern_read_pcie_info(char *, char **, off_t off, int, int *, void *);\r
+int xgi_kern_read_version(char *, char **, off_t off, int, int *, void *);\r
+\r
+int xgi_kern_ctl_open(struct inode *, struct file *);\r
+int xgi_kern_ctl_close(struct inode *, struct file *);\r
+unsigned int xgi_kern_ctl_poll(struct file *, poll_table *);\r
+\r
+void xgi_kern_isr_bh(unsigned long);\r
+irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *);\r
+\r
+static void xgi_lock_init(xgi_info_t *info);\r
+\r
+#if defined(XGI_PM_SUPPORT_ACPI)\r
+int xgi_kern_acpi_standby(struct pci_dev *, u32);\r
+int xgi_kern_acpi_resume(struct pci_dev *);\r
+#endif\r
+\r
+/*\r
+ * verify access to pci config space wasn't disabled behind our back\r
+ * unfortunately, XFree86 enables/disables memory access in pci config space at\r
+ * various times (such as restoring initial pci config space settings during vt\r
+ * switches or when doing mulicard). As a result, all of our register accesses\r
+ * are garbage at this point. add a check to see if access was disabled and\r
+ * reenable any such access.\r
+ */\r
+#define XGI_CHECK_PCI_CONFIG(xgi) \\r
+ xgi_check_pci_config(xgi, __LINE__)\r
+\r
+static inline void xgi_check_pci_config(xgi_info_t *info, int line)\r
+{\r
+ unsigned short cmd, flag = 0;\r
+\r
+ // don't do this on the control device, only the actual devices\r
+ if (info->flags & XGI_FLAG_CONTROL)\r
+ return;\r
+\r
+ pci_read_config_word(info->dev, PCI_COMMAND, &cmd);\r
+ if (!(cmd & PCI_COMMAND_MASTER))\r
+ {\r
+ XGI_INFO("restoring bus mastering! (%d)\n", line);\r
+ cmd |= PCI_COMMAND_MASTER;\r
+ flag = 1;\r
+ }\r
+\r
+ if (!(cmd & PCI_COMMAND_MEMORY))\r
+ {\r
+ XGI_INFO("restoring MEM access! (%d)\n", line);\r
+ cmd |= PCI_COMMAND_MEMORY;\r
+ flag = 1;\r
+ }\r
+\r
+ if (flag)\r
+ pci_write_config_word(info->dev, PCI_COMMAND, cmd);\r
+}\r
+\r
+static int xgi_post_vbios(xgi_ioctl_post_vbios_t *info)\r
+{\r
+ return 1;\r
+}\r
+\r
+/*\r
+ * struct pci_device_id {\r
+ * unsigned int vendor, device; // Vendor and device ID or PCI_ANY_ID\r
+ * unsigned int subvendor, subdevice; // Subsystem ID's or PCI_ANY_ID\r
+ * unsigned int class, class_mask; // (class,subclass,prog-if) triplet\r
+ * unsigned long driver_data; // Data private to the driver\r
+ * };\r
+ */\r
+\r
+static struct pci_device_id xgi_dev_table[] = {\r
+ {\r
+ .vendor = PCI_VENDOR_ID_XGI,\r
+ .device = PCI_ANY_ID,\r
+ .subvendor = PCI_ANY_ID,\r
+ .subdevice = PCI_ANY_ID,\r
+ .class = (PCI_CLASS_DISPLAY_VGA << 8),\r
+ .class_mask = ~0,\r
+ },\r
+ { }\r
+};\r
+\r
+/*\r
+ * #define MODULE_DEVICE_TABLE(type,name) \\r
+ * MODULE_GENERIC_TABLE(type##_device,name)\r
+ */\r
+ MODULE_DEVICE_TABLE(pci, xgi_dev_table);\r
+\r
+/*\r
+ * struct pci_driver {\r
+ * struct list_head node;\r
+ * char *name;\r
+ * const struct pci_device_id *id_table; // NULL if wants all devices\r
+ * int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); // New device inserted\r
+ * void (*remove)(struct pci_dev *dev); // Device removed (NULL if not a hot-plug capable driver)\r
+ * int (*save_state)(struct pci_dev *dev, u32 state); // Save Device Context\r
+ * int (*suspend)(struct pci_dev *dev, u32 state); // Device suspended\r
+ * int (*resume)(struct pci_dev *dev); // Device woken up\r
+ * int (*enable_wake)(struct pci_dev *dev, u32 state, int enable); // Enable wake event\r
+ * };\r
+ */\r
+static struct pci_driver xgi_pci_driver = {\r
+ .name = "xgi",\r
+ .id_table = xgi_dev_table,\r
+ .probe = xgi_kern_probe,\r
+#if defined(XGI_SUPPORT_ACPI)\r
+ .suspend = xgi_kern_acpi_standby,\r
+ .resume = xgi_kern_acpi_resume,\r
+#endif\r
+};\r
+\r
+/*\r
+ * find xgi devices and set initial state\r
+ */\r
+int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table)\r
+{\r
+ xgi_info_t *info;\r
+\r
+ if ((dev->vendor != PCI_VENDOR_ID_XGI)\r
+ || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8)))\r
+ {\r
+ return -1;\r
+ }\r
+\r
+ if (xgi_num_devices == XGI_MAX_DEVICES)\r
+ {\r
+ XGI_INFO("maximum device number (%d) reached!\n", xgi_num_devices);\r
+ return -1;\r
+ }\r
+\r
+ /* enable io, mem, and bus-mastering in pci config space */\r
+ if (pci_enable_device(dev) != 0)\r
+ {\r
+ XGI_INFO("pci_enable_device failed, aborting\n");\r
+ return -1;\r
+ }\r
+\r
+ XGI_INFO("maximum device number (%d) reached \n", xgi_num_devices);\r
+\r
+ pci_set_master(dev);\r
+\r
+ info = &xgi_devices[xgi_num_devices];\r
+ info->dev = dev;\r
+ info->vendor_id = dev->vendor;\r
+ info->device_id = dev->device;\r
+ info->bus = dev->bus->number;\r
+ info->slot = PCI_SLOT((dev)->devfn);\r
+\r
+ xgi_lock_init(info);\r
+\r
+ info->mmio.base = XGI_PCI_RESOURCE_START(dev, 1);\r
+ info->mmio.size = XGI_PCI_RESOURCE_SIZE(dev, 1);\r
+\r
+ /* check IO region */\r
+ if (!request_mem_region(info->mmio.base, info->mmio.size, "xgi"))\r
+ {\r
+ XGI_ERROR("cannot reserve MMIO memory\n");\r
+ goto error_disable_dev;\r
+ }\r
+\r
+ XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base);\r
+ XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size);\r
+\r
+ info->mmio.vbase = (unsigned char *)ioremap_nocache(info->mmio.base,\r
+ info->mmio.size);\r
+ if (!info->mmio.vbase)\r
+ {\r
+ release_mem_region(info->mmio.base, info->mmio.size);\r
+ XGI_ERROR("info->mmio.vbase failed\n");\r
+ goto error_disable_dev;\r
+ }\r
+ xgi_enable_mmio(info);\r
+\r
+ //xgi_enable_ge(info);\r
+\r
+ XGI_INFO("info->mmio.vbase: 0x%p \n", info->mmio.vbase);\r
+\r
+ info->fb.base = XGI_PCI_RESOURCE_START(dev, 0);\r
+ info->fb.size = XGI_PCI_RESOURCE_SIZE(dev, 0);\r
+\r
+ XGI_INFO("info->fb.base: 0x%lx \n", info->fb.base);\r
+ XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size);\r
+\r
+ info->fb.size = bIn3cf(0x54) * 8 * 1024 * 1024;\r
+ XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size);\r
+\r
+ /* check frame buffer region\r
+ if (!request_mem_region(info->fb.base, info->fb.size, "xgi"))\r
+ {\r
+ release_mem_region(info->mmio.base, info->mmio.size);\r
+ XGI_ERROR("cannot reserve frame buffer memory\n");\r
+ goto error_disable_dev;\r
+ }\r
+\r
+\r
+ info->fb.vbase = (unsigned char *)ioremap_nocache(info->fb.base,\r
+ info->fb.size);\r
+\r
+ if (!info->fb.vbase)\r
+ {\r
+ release_mem_region(info->mmio.base, info->mmio.size);\r
+ release_mem_region(info->fb.base, info->fb.size);\r
+ XGI_ERROR("info->fb.vbase failed\n");\r
+ goto error_disable_dev;\r
+ }\r
+ */\r
+ info->fb.vbase = NULL;\r
+ XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase);\r
+\r
+ info->irq = dev->irq;\r
+\r
+ /* check common error condition */\r
+ if (info->irq == 0)\r
+ {\r
+ XGI_ERROR("Can't find an IRQ for your XGI card! \n");\r
+ goto error_zero_dev;\r
+ }\r
+ XGI_INFO("info->irq: %lx \n", info->irq);\r
+\r
+ //xgi_enable_dvi_interrupt(info);\r
+\r
+ /* sanity check the IO apertures */\r
+ if ((info->mmio.base == 0) || (info->mmio.size == 0)\r
+ || (info->fb.base == 0) || (info->fb.size == 0))\r
+ {\r
+ XGI_ERROR("The IO regions for your XGI card are invalid.\n");\r
+\r
+ if ((info->mmio.base == 0) || (info->mmio.size == 0))\r
+ {\r
+ XGI_ERROR("mmio appears to be wrong: 0x%lx 0x%lx\n",\r
+ info->mmio.base,\r
+ info->mmio.size);\r
+ }\r
+\r
+ if ((info->fb.base == 0) || (info->fb.size == 0))\r
+ {\r
+ XGI_ERROR("frame buffer appears to be wrong: 0x%lx 0x%lx\n",\r
+ info->fb.base,\r
+ info->fb.size);\r
+ }\r
+\r
+ goto error_zero_dev;\r
+ }\r
+\r
+ //xgi_num_devices++;\r
+\r
+ return 0;\r
+\r
+error_zero_dev:\r
+ release_mem_region(info->fb.base, info->fb.size);\r
+ release_mem_region(info->mmio.base, info->mmio.size);\r
+\r
+error_disable_dev:\r
+ pci_disable_device(dev);\r
+ return -1;\r
+\r
+}\r
+\r
+/*\r
+ * vma operations...\r
+ * this is only called when the vmas are duplicated. this\r
+ * appears to only happen when the process is cloned to create\r
+ * a new process, and not when the process is threaded.\r
+ *\r
+ * increment the usage count for the physical pages, so when\r
+ * this clone unmaps the mappings, the pages are not\r
+ * deallocated under the original process.\r
+ */\r
+struct vm_operations_struct xgi_vm_ops = {\r
+ .open = xgi_kern_vma_open,\r
+ .close = xgi_kern_vma_release,\r
+ .nopage = xgi_kern_vma_nopage,\r
+};\r
+\r
+void xgi_kern_vma_open(struct vm_area_struct *vma)\r
+{\r
+ XGI_INFO("VM: vma_open for 0x%lx - 0x%lx, offset 0x%lx\n",\r
+ vma->vm_start,\r
+ vma->vm_end,\r
+ XGI_VMA_OFFSET(vma));\r
+\r
+ if (XGI_VMA_PRIVATE(vma))\r
+ {\r
+ xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma);\r
+ XGI_ATOMIC_INC(block->use_count);\r
+ }\r
+}\r
+\r
+void xgi_kern_vma_release(struct vm_area_struct *vma)\r
+{\r
+ XGI_INFO("VM: vma_release for 0x%lx - 0x%lx, offset 0x%lx\n",\r
+ vma->vm_start,\r
+ vma->vm_end,\r
+ XGI_VMA_OFFSET(vma));\r
+\r
+ if (XGI_VMA_PRIVATE(vma))\r
+ {\r
+ xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma);\r
+ XGI_ATOMIC_DEC(block->use_count);\r
+\r
+ /*\r
+ * if use_count is down to 0, the kernel virtual mapping was freed\r
+ * but the underlying physical pages were not, we need to clear the\r
+ * bit and free the physical pages.\r
+ */\r
+ if (XGI_ATOMIC_READ(block->use_count) == 0)\r
+ {\r
+ // Need TO Finish\r
+ XGI_VMA_PRIVATE(vma) = NULL;\r
+ }\r
+ }\r
+}\r
+\r
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1))\r
+struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,\r
+ unsigned long address, int *type)\r
+{\r
+ xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma);\r
+ struct page *page = NOPAGE_SIGBUS;\r
+ unsigned long offset = 0;\r
+ unsigned long page_addr = 0;\r
+/*\r
+ XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n",\r
+ vma->vm_start,\r
+ vma->vm_end,\r
+ XGI_VMA_OFFSET(vma),\r
+ address);\r
+*/\r
+ offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma);\r
+\r
+ offset = offset - block->bus_addr;\r
+\r
+ offset >>= PAGE_SHIFT;\r
+\r
+ page_addr = block->page_table[offset].virt_addr;\r
+\r
+ if (xgi_temp)\r
+ {\r
+ XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx"\r
+ "block->page_count: 0x%lx block->page_order: 0x%lx"\r
+ "block->page_table[0x%lx].virt_addr: 0x%lx\n",\r
+ block->bus_addr, block->hw_addr,\r
+ block->page_count, block->page_order,\r
+ offset,\r
+ block->page_table[offset].virt_addr);\r
+ xgi_temp = 0;\r
+ }\r
+\r
+ if (!page_addr) goto out; /* hole or end-of-file */\r
+ page = virt_to_page(page_addr);\r
+\r
+ /* got it, now increment the count */\r
+ get_page(page);\r
+out:\r
+ return page;\r
+\r
+}\r
+#else\r
+struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,\r
+ unsigned long address, int write_access)\r
+{\r
+ xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma);\r
+ struct page *page = NOPAGE_SIGBUS;\r
+ unsigned long offset = 0;\r
+ unsigned long page_addr = 0;\r
+/*\r
+ XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n",\r
+ vma->vm_start,\r
+ vma->vm_end,\r
+ XGI_VMA_OFFSET(vma),\r
+ address);\r
+*/\r
+ offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma);\r
+\r
+ offset = offset - block->bus_addr;\r
+\r
+ offset >>= PAGE_SHIFT;\r
+\r
+ page_addr = block->page_table[offset].virt_addr;\r
+\r
+ if (xgi_temp)\r
+ {\r
+ XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx"\r
+ "block->page_count: 0x%lx block->page_order: 0x%lx"\r
+ "block->page_table[0x%lx].virt_addr: 0x%lx\n",\r
+ block->bus_addr, block->hw_addr,\r
+ block->page_count, block->page_order,\r
+ offset,\r
+ block->page_table[offset].virt_addr);\r
+ xgi_temp = 0;\r
+ }\r
+\r
+ if (!page_addr) goto out; /* hole or end-of-file */\r
+ page = virt_to_page(page_addr);\r
+\r
+ /* got it, now increment the count */\r
+ get_page(page);\r
+out:\r
+ return page;\r
+}\r
+#endif\r
+\r
+#if 0\r
+static struct file_operations xgi_fops = {\r
+ /* owner: THIS_MODULE, */\r
+ poll: xgi_kern_poll,\r
+ ioctl: xgi_kern_ioctl,\r
+ mmap: xgi_kern_mmap,\r
+ open: xgi_kern_open,\r
+ release: xgi_kern_release,\r
+};\r
+#endif\r
+\r
+static struct file_operations xgi_fops = {\r
+ .owner = THIS_MODULE,\r
+ .poll = xgi_kern_poll,\r
+ .ioctl = xgi_kern_ioctl,\r
+ .mmap = xgi_kern_mmap,\r
+ .open = xgi_kern_open,\r
+ .release = xgi_kern_release,\r
+};\r
+\r
+static xgi_file_private_t * xgi_alloc_file_private(void)\r
+{\r
+ xgi_file_private_t *fp;\r
+\r
+ XGI_KMALLOC(fp, sizeof(xgi_file_private_t));\r
+ if (!fp)\r
+ return NULL;\r
+\r
+ memset(fp, 0, sizeof(xgi_file_private_t));\r
+\r
+ /* initialize this file's event queue */\r
+ init_waitqueue_head(&fp->wait_queue);\r
+\r
+ xgi_init_lock(fp->fp_lock);\r
+\r
+ return fp;\r
+}\r
+\r
+static void xgi_free_file_private(xgi_file_private_t *fp)\r
+{\r
+ if (fp == NULL)\r
+ return;\r
+\r
+ XGI_KFREE(fp, sizeof(xgi_file_private_t));\r
+}\r
+\r
+int xgi_kern_open(struct inode *inode, struct file *filp)\r
+{\r
+ xgi_info_t *info = NULL;\r
+ int dev_num;\r
+ int result = 0, status;\r
+\r
+ /*\r
+ * the type and num values are only valid if we are not using devfs.\r
+ * However, since we use them to retrieve the device pointer, we\r
+ * don't need them with devfs as filp->private_data is already\r
+ * initialized\r
+ */\r
+ filp->private_data = xgi_alloc_file_private();\r
+ if (filp->private_data == NULL)\r
+ return -ENOMEM;\r
+\r
+ XGI_INFO("filp->private_data %p\n", filp->private_data);\r
+ /*\r
+ * for control device, just jump to its open routine\r
+ * after setting up the private data\r
+ */\r
+ if (XGI_IS_CONTROL_DEVICE(inode))\r
+ return xgi_kern_ctl_open(inode, filp);\r
+\r
+ /* what device are we talking about? */\r
+ dev_num = XGI_DEVICE_NUMBER(inode);\r
+ if (dev_num >= XGI_MAX_DEVICES)\r
+ {\r
+ xgi_free_file_private(filp->private_data);\r
+ filp->private_data = NULL;\r
+ return -ENODEV;\r
+ }\r
+\r
+ info = &xgi_devices[dev_num];\r
+\r
+ XGI_INFO("Jong-xgi_kern_open on device %d\n", dev_num);\r
+\r
+ xgi_down(info->info_sem);\r
+ XGI_CHECK_PCI_CONFIG(info);\r
+\r
+ XGI_INFO_FROM_FP(filp) = info;\r
+\r
+ /*\r
+ * map the memory and allocate isr on first open\r
+ */\r
+\r
+ if (!(info->flags & XGI_FLAG_OPEN))\r
+ {\r
+ XGI_INFO("info->flags & XGI_FLAG_OPEN \n");\r
+\r
+ if (info->device_id == 0)\r
+ {\r
+ XGI_INFO("open of nonexistent device %d\n", dev_num);\r
+ result = -ENXIO;\r
+ goto failed;\r
+ }\r
+\r
+ /* initialize struct irqaction */\r
+ status = request_irq(info->irq, xgi_kern_isr,\r
+ SA_INTERRUPT | SA_SHIRQ, "xgi",\r
+ (void *) info);\r
+ if (status != 0)\r
+ {\r
+ if (info->irq && (status == -EBUSY))\r
+ {\r
+ XGI_ERROR("Tried to get irq %d, but another driver",\r
+ (unsigned int) info->irq);\r
+ XGI_ERROR("has it and is not sharing it.\n");\r
+ }\r
+ XGI_ERROR("isr request failed 0x%x\n", status);\r
+ result = -EIO;\r
+ goto failed;\r
+ }\r
+\r
+ /*\r
+ * #define DECLARE_TASKLET(name, func, data) \\r
+ * struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }\r
+ */\r
+ info->tasklet.func = xgi_kern_isr_bh;\r
+ info->tasklet.data = (unsigned long) info;\r
+ tasklet_enable(&info->tasklet);\r
+\r
+ /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */\r
+ xgi_cmdlist_initialize(info, 0x100000);\r
+\r
+ info->flags |= XGI_FLAG_OPEN;\r
+ }\r
+\r
+ XGI_ATOMIC_INC(info->use_count);\r
+\r
+failed:\r
+ xgi_up(info->info_sem);\r
+\r
+ if ((result) && filp->private_data)\r
+ {\r
+ xgi_free_file_private(filp->private_data);\r
+ filp->private_data = NULL;\r
+ }\r
+\r
+ return result;\r
+}\r
+\r
+int xgi_kern_release(struct inode *inode, struct file *filp)\r
+{\r
+ xgi_info_t *info = XGI_INFO_FROM_FP(filp);\r
+\r
+ XGI_CHECK_PCI_CONFIG(info);\r
+\r
+ /*\r
+ * for control device, just jump to its open routine\r
+ * after setting up the private data\r
+ */\r
+ if (XGI_IS_CONTROL_DEVICE(inode))\r
+ return xgi_kern_ctl_close(inode, filp);\r
+\r
+ XGI_INFO("Jong-xgi_kern_release on device %d\n", XGI_DEVICE_NUMBER(inode));\r
+\r
+ xgi_down(info->info_sem);\r
+ if (XGI_ATOMIC_DEC_AND_TEST(info->use_count))\r
+ {\r
+\r
+ /*\r
+ * The usage count for this device has dropped to zero, it can be shut\r
+ * down safely; disable its interrupts.\r
+ */\r
+\r
+ /*\r
+ * Disable this device's tasklet to make sure that no bottom half will\r
+ * run with undefined device state.\r
+ */\r
+ tasklet_disable(&info->tasklet);\r
+\r
+ /*\r
+ * Free the IRQ, which may block until all pending interrupt processing\r
+ * has completed.\r
+ */\r
+ free_irq(info->irq, (void *)info);\r
+\r
+ xgi_cmdlist_cleanup(info);\r
+\r
+ /* leave INIT flag alone so we don't reinit every time */\r
+ info->flags &= ~XGI_FLAG_OPEN;\r
+ }\r
+\r
+ xgi_up(info->info_sem);\r
+\r
+ if (FILE_PRIVATE(filp))\r
+ {\r
+ xgi_free_file_private(FILE_PRIVATE(filp));\r
+ FILE_PRIVATE(filp) = NULL;\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma)\r
+{\r
+ //struct inode *inode = INODE_FROM_FP(filp);\r
+ xgi_info_t *info = XGI_INFO_FROM_FP(filp);\r
+ xgi_pcie_block_t *block;\r
+ int pages = 0;\r
+ unsigned long prot;\r
+\r
+ XGI_INFO("Jong-VM: mmap([0x%lx-0x%lx] off=0x%lx)\n",\r
+ vma->vm_start,\r
+ vma->vm_end,\r
+ XGI_VMA_OFFSET(vma));\r
+\r
+ XGI_CHECK_PCI_CONFIG(info);\r
+\r
+ if (XGI_MASK_OFFSET(vma->vm_start)\r
+ || XGI_MASK_OFFSET(vma->vm_end))\r
+ {\r
+ XGI_ERROR("VM: bad mmap range: %lx - %lx\n",\r
+ vma->vm_start, vma->vm_end);\r
+ return -ENXIO;\r
+ }\r
+\r
+ pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;\r
+\r
+ vma->vm_ops = &xgi_vm_ops;\r
+\r
+ /* XGI IO(reg) space */\r
+ if (IS_IO_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start))\r
+ {\r
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);\r
+ if (XGI_REMAP_PAGE_RANGE(vma->vm_start,\r
+ XGI_VMA_OFFSET(vma),\r
+ vma->vm_end - vma->vm_start,\r
+ vma->vm_page_prot))\r
+ return -EAGAIN;\r
+\r
+ /* mark it as IO so that we don't dump it on core dump */\r
+ vma->vm_flags |= VM_IO;\r
+ XGI_INFO("VM: mmap io space \n");\r
+ }\r
+ /* XGI fb space */\r
+ /* Jong 06/14/2006; moved behind PCIE or modify IS_FB_OFFSET */\r
+ else if (IS_FB_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start))\r
+ {\r
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);\r
+ if (XGI_REMAP_PAGE_RANGE(vma->vm_start,\r
+ XGI_VMA_OFFSET(vma),\r
+ vma->vm_end - vma->vm_start,\r
+ vma->vm_page_prot))\r
+ return -EAGAIN;\r
+\r
+ // mark it as IO so that we don't dump it on core dump\r
+ vma->vm_flags |= VM_IO;\r
+ XGI_INFO("VM: mmap fb space \n");\r
+ }\r
+ /* PCIE allocator */\r
+ /* XGI_VMA_OFFSET(vma) is offset based on pcie.base (HW address space) */\r
+ else if (IS_PCIE_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start))\r
+ {\r
+ xgi_down(info->pcie_sem);\r
+\r
+ block = (xgi_pcie_block_t *)xgi_find_pcie_block(info, XGI_VMA_OFFSET(vma));\r
+\r
+ if (block == NULL)\r
+ {\r
+ XGI_ERROR("couldn't find pre-allocated PCIE memory!\n");\r
+ xgi_up(info->pcie_sem);\r
+ return -EAGAIN;\r
+ }\r
+\r
+ if (block->page_count != pages)\r
+ {\r
+ XGI_ERROR("pre-allocated PCIE memory has wrong number of pages!\n");\r
+ xgi_up(info->pcie_sem);\r
+ return -EAGAIN;\r
+ }\r
+\r
+ vma->vm_private_data = block;\r
+ XGI_ATOMIC_INC(block->use_count);\r
+ xgi_up(info->pcie_sem);\r
+\r
+ /*\r
+ * prevent the swapper from swapping it out\r
+ * mark the memory i/o so the buffers aren't\r
+ * dumped on core dumps */\r
+ vma->vm_flags |= (VM_LOCKED | VM_IO);\r
+\r
+ /* un-cached */\r
+ prot = pgprot_val(vma->vm_page_prot);\r
+ /* \r
+ if (boot_cpu_data.x86 > 3)\r
+ prot |= _PAGE_PCD | _PAGE_PWT;\r
+ */\r
+ vma->vm_page_prot = __pgprot(prot);\r
+\r
+ XGI_INFO("VM: mmap pcie space \n");\r
+ }\r
+#if 0\r
+ else if (IS_FB_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start))\r
+ {\r
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);\r
+ if (XGI_REMAP_PAGE_RANGE(vma->vm_start,\r
+ XGI_VMA_OFFSET(vma),\r
+ vma->vm_end - vma->vm_start,\r
+ vma->vm_page_prot))\r
+ return -EAGAIN;\r
+\r
+ // mark it as IO so that we don't dump it on core dump\r
+ vma->vm_flags |= VM_IO;\r
+ XGI_INFO("VM: mmap fb space \n");\r
+ }\r
+#endif\r
+ else\r
+ {\r
+ vma->vm_flags |= (VM_IO | VM_LOCKED);\r
+ XGI_ERROR("VM: mmap wrong range \n");\r
+ }\r
+\r
+ vma->vm_file = filp;\r
+\r
+ return 0;\r
+}\r
+\r
+unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait)\r
+{\r
+ xgi_file_private_t *fp;\r
+ xgi_info_t *info;\r
+ unsigned int mask = 0;\r
+ unsigned long eflags;\r
+\r
+ info = XGI_INFO_FROM_FP(filp);\r
+\r
+ if (info->device_number == XGI_CONTROL_DEVICE_NUMBER)\r
+ return xgi_kern_ctl_poll(filp, wait);\r
+\r
+ fp = XGI_GET_FP(filp);\r
+\r
+ if (!(filp->f_flags & O_NONBLOCK))\r
+ {\r
+ /* add us to the list */\r
+ poll_wait(filp, &fp->wait_queue, wait);\r
+ }\r
+\r
+ xgi_lock_irqsave(fp->fp_lock, eflags);\r
+\r
+ /* wake the user on any event */\r
+ if (fp->num_events)\r
+ {\r
+ XGI_INFO("Hey, an event occured!\n");\r
+ /*\r
+ * trigger the client, when they grab the event,\r
+ * we'll decrement the event count\r
+ */\r
+ mask |= (POLLPRI|POLLIN);\r
+ }\r
+ xgi_unlock_irqsave(fp->fp_lock, eflags);\r
+\r
+ return mask;\r
+}\r
+\r
+int xgi_kern_ioctl(struct inode *inode, struct file *filp,\r
+ unsigned int cmd, unsigned long arg)\r
+{\r
+ xgi_info_t *info;\r
+ xgi_mem_alloc_t *alloc = NULL;\r
+\r
+ int status = 0;\r
+ void *arg_copy;\r
+ int arg_size;\r
+ int err = 0;\r
+\r
+ info = XGI_INFO_FROM_FP(filp);\r
+\r
+ XGI_INFO("Jong-ioctl(0x%x, 0x%x, 0x%lx, 0x%x)\n", _IOC_TYPE(cmd), _IOC_NR(cmd), arg, _IOC_SIZE(cmd));\r
+ /*\r
+ * extract the type and number bitfields, and don't decode\r
+ * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()\r
+ */\r
+ if (_IOC_TYPE(cmd) != XGI_IOCTL_MAGIC) return -ENOTTY;\r
+ if (_IOC_NR(cmd) > XGI_IOCTL_MAXNR) return -ENOTTY;\r
+\r
+ /*\r
+ * the direction is a bitmask, and VERIFY_WRITE catches R/W\r
+ * transfers. `Type' is user-oriented, while\r
+ * access_ok is kernel-oriented, so the concept of "read" and\r
+ * "write" is reversed\r
+ */\r
+ if (_IOC_DIR(cmd) & _IOC_READ)\r
+ {\r
+ err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));\r
+ }\r
+ else if (_IOC_DIR(cmd) & _IOC_WRITE)\r
+ {\r
+ err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));\r
+ }\r
+ if (err) return -EFAULT;\r
+\r
+ XGI_CHECK_PCI_CONFIG(info);\r
+\r
+ arg_size = _IOC_SIZE(cmd);\r
+ XGI_KMALLOC(arg_copy, arg_size);\r
+ if (arg_copy == NULL)\r
+ {\r
+ XGI_ERROR("failed to allocate ioctl memory\n");\r
+ return -ENOMEM;\r
+ }\r
+\r
+ /* Jong 05/25/2006 */\r
+ /* copy_from_user(arg_copy, (void *)arg, arg_size); */\r
+ if(copy_from_user(arg_copy, (void *)arg, arg_size))\r
+ {\r
+ XGI_ERROR("failed to copyin ioctl data\n");\r
+ XGI_INFO("Jong-copy_from_user-fail! \n");\r
+ }\r
+ else\r
+ XGI_INFO("Jong-copy_from_user-OK! \n");\r
+\r
+ alloc = (xgi_mem_alloc_t *)arg_copy;\r
+ XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg, arg_size);\r
+\r
+ switch (_IOC_NR(cmd))\r
+ {\r
+ case XGI_ESC_DEVICE_INFO:\r
+ XGI_INFO("Jong-xgi_ioctl_get_device_info \n");\r
+ xgi_get_device_info(info, (struct xgi_chip_info_s *) arg_copy);\r
+ break;\r
+ case XGI_ESC_POST_VBIOS:\r
+ XGI_INFO("Jong-xgi_ioctl_post_vbios \n");\r
+ break;\r
+ case XGI_ESC_FB_ALLOC:\r
+ XGI_INFO("Jong-xgi_ioctl_fb_alloc \n");\r
+ xgi_fb_alloc(info, (struct xgi_mem_req_s *)arg_copy, alloc);\r
+ break;\r
+ case XGI_ESC_FB_FREE:\r
+ XGI_INFO("Jong-xgi_ioctl_fb_free \n");\r
+ xgi_fb_free(info, *(unsigned long *) arg_copy);\r
+ break;\r
+ case XGI_ESC_MEM_COLLECT:\r
+ XGI_INFO("Jong-xgi_ioctl_mem_collect \n");\r
+ xgi_mem_collect(info, (unsigned int *) arg_copy);\r
+ break;\r
+ case XGI_ESC_PCIE_ALLOC:\r
+ XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n");\r
+ xgi_pcie_alloc(info, ((xgi_mem_req_t *)arg_copy)->size,\r
+ ((xgi_mem_req_t *)arg_copy)->owner, alloc);\r
+ break;\r
+ case XGI_ESC_PCIE_FREE:\r
+ XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", *((unsigned long *) arg_copy));\r
+ xgi_pcie_free(info, *((unsigned long *) arg_copy));\r
+ break;\r
+ case XGI_ESC_PCIE_CHECK:\r
+ XGI_INFO("Jong-xgi_pcie_heap_check \n");\r
+ xgi_pcie_heap_check();\r
+ break;\r
+ case XGI_ESC_GET_SCREEN_INFO:\r
+ XGI_INFO("Jong-xgi_get_screen_info \n");\r
+ xgi_get_screen_info(info, (struct xgi_screen_info_s *) arg_copy);\r
+ break;\r
+ case XGI_ESC_PUT_SCREEN_INFO:\r
+ XGI_INFO("Jong-xgi_put_screen_info \n");\r
+ xgi_put_screen_info(info, (struct xgi_screen_info_s *) arg_copy);\r
+ break;\r
+ case XGI_ESC_MMIO_INFO:\r
+ XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n");\r
+ xgi_get_mmio_info(info, (struct xgi_mmio_info_s *) arg_copy);\r
+ break;\r
+ case XGI_ESC_GE_RESET:\r
+ XGI_INFO("Jong-xgi_ioctl_ge_reset \n");\r
+ xgi_ge_reset(info);\r
+ break;\r
+ case XGI_ESC_SAREA_INFO:\r
+ XGI_INFO("Jong-xgi_ioctl_sarea_info \n");\r
+ xgi_sarea_info(info, (struct xgi_sarea_info_s *) arg_copy);\r
+ break;\r
+ case XGI_ESC_DUMP_REGISTER:\r
+ XGI_INFO("Jong-xgi_ioctl_dump_register \n");\r
+ xgi_dump_register(info);\r
+ break;\r
+ case XGI_ESC_DEBUG_INFO:\r
+ XGI_INFO("Jong-xgi_ioctl_restore_registers \n");\r
+ xgi_restore_registers(info);\r
+ //xgi_write_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy);\r
+ //xgi_read_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy);\r
+ break;\r
+ case XGI_ESC_SUBMIT_CMDLIST:\r
+ XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n");\r
+ xgi_submit_cmdlist(info, (xgi_cmd_info_t *) arg_copy);\r
+ break;\r
+ case XGI_ESC_TEST_RWINKERNEL:\r
+ XGI_INFO("Jong-xgi_test_rwinkernel \n");\r
+ xgi_test_rwinkernel(info, *(unsigned long*) arg_copy);\r
+ break;\r
+ case XGI_ESC_STATE_CHANGE:\r
+ XGI_INFO("Jong-xgi_state_change \n");\r
+ xgi_state_change(info, (xgi_state_info_t *) arg_copy);\r
+ break;\r
+ case XGI_ESC_CPUID:\r
+ XGI_INFO("Jong-XGI_ESC_CPUID \n");\r
+ xgi_get_cpu_id((struct cpu_info_s*) arg_copy);\r
+ break;\r
+ default:\r
+ XGI_INFO("Jong-xgi_ioctl_default \n");\r
+ status = -EINVAL;\r
+ break;\r
+ }\r
+\r
+ if (copy_to_user((void *)arg, arg_copy, arg_size))\r
+ {\r
+ XGI_ERROR("failed to copyout ioctl data\n");\r
+ XGI_INFO("Jong-copy_to_user-fail! \n");\r
+ }\r
+ else\r
+ XGI_INFO("Jong-copy_to_user-OK! \n");\r
+\r
+ XGI_KFREE(arg_copy, arg_size);\r
+ return status;\r
+}\r
+\r
+\r
+/*\r
+ * xgi control driver operations defined here\r
+ */\r
+int xgi_kern_ctl_open(struct inode *inode, struct file *filp)\r
+{\r
+ xgi_info_t *info = &xgi_ctl_device;\r
+\r
+ int rc = 0;\r
+\r
+ XGI_INFO("Jong-xgi_kern_ctl_open\n");\r
+\r
+ xgi_down(info->info_sem);\r
+ info->device_number = XGI_CONTROL_DEVICE_NUMBER;\r
+\r
+ /* save the xgi info in file->private_data */\r
+ filp->private_data = info;\r
+\r
+ if (XGI_ATOMIC_READ(info->use_count) == 0)\r
+ {\r
+ init_waitqueue_head(&xgi_ctl_waitqueue);\r
+ }\r
+\r
+ info->flags |= XGI_FLAG_OPEN + XGI_FLAG_CONTROL;\r
+\r
+ XGI_ATOMIC_INC(info->use_count);\r
+ xgi_up(info->info_sem);\r
+\r
+ return rc;\r
+}\r
+\r
+int xgi_kern_ctl_close(struct inode *inode, struct file *filp)\r
+{\r
+ xgi_info_t *info = XGI_INFO_FROM_FP(filp);\r
+\r
+ XGI_INFO("Jong-xgi_kern_ctl_close\n");\r
+\r
+ xgi_down(info->info_sem);\r
+ if (XGI_ATOMIC_DEC_AND_TEST(info->use_count))\r
+ {\r
+ info->flags = 0;\r
+ }\r
+ xgi_up(info->info_sem);\r
+\r
+ if (FILE_PRIVATE(filp))\r
+ {\r
+ xgi_free_file_private(FILE_PRIVATE(filp));\r
+ FILE_PRIVATE(filp) = NULL;\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table *wait)\r
+{\r
+ //xgi_info_t *info = XGI_INFO_FROM_FP(filp);;\r
+ unsigned int ret = 0;\r
+\r
+ if (!(filp->f_flags & O_NONBLOCK))\r
+ {\r
+ poll_wait(filp, &xgi_ctl_waitqueue, wait);\r
+ }\r
+\r
+ return ret;\r
+}\r
+\r
+/*\r
+ * xgi proc system\r
+ */\r
+static u8 xgi_find_pcie_capability(struct pci_dev *dev)\r
+{\r
+ u16 status;\r
+ u8 cap_ptr, cap_id;\r
+\r
+ pci_read_config_word(dev, PCI_STATUS, &status);\r
+ status &= PCI_STATUS_CAP_LIST;\r
+ if (!status)\r
+ return 0;\r
+\r
+ switch (dev->hdr_type)\r
+ {\r
+ case PCI_HEADER_TYPE_NORMAL:\r
+ case PCI_HEADER_TYPE_BRIDGE:\r
+ pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr);\r
+ break;\r
+ default:\r
+ return 0;\r
+ }\r
+\r
+ do\r
+ {\r
+ cap_ptr &= 0xFC;\r
+ pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id);\r
+ pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, &cap_ptr);\r
+ } while (cap_ptr && cap_id != 0xFF);\r
+\r
+ return 0;\r
+}\r
+\r
+static struct pci_dev* xgi_get_pci_device(xgi_info_t *info)\r
+{\r
+ struct pci_dev *dev;\r
+\r
+ dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, NULL);\r
+ while (dev)\r
+ {\r
+ if (XGI_PCI_SLOT_NUMBER(dev) == info->slot\r
+ && XGI_PCI_BUS_NUMBER(dev) == info->bus)\r
+ return dev;\r
+ dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, dev);\r
+ }\r
+\r
+ return NULL;\r
+}\r
+\r
+int xgi_kern_read_card_info(char *page, char **start, off_t off,\r
+ int count, int *eof, void *data)\r
+{\r
+ struct pci_dev *dev;\r
+ char *type;\r
+ int len = 0;\r
+\r
+ xgi_info_t *info;\r
+ info = (xgi_info_t *) data;\r
+\r
+ dev = xgi_get_pci_device(info);\r
+ if (!dev)\r
+ return 0;\r
+\r
+ type = xgi_find_pcie_capability(dev) ? "PCIE" : "PCI";\r
+ len += sprintf(page+len, "Card Type: \t %s\n", type);\r
+\r
+ XGI_PCI_DEV_PUT(dev);\r
+ return len;\r
+}\r
+\r
+int xgi_kern_read_version(char *page, char **start, off_t off,\r
+ int count, int *eof, void *data)\r
+{\r
+ int len = 0;\r
+\r
+ len += sprintf(page+len, "XGI version: %s\n", "1.0");\r
+ len += sprintf(page+len, "GCC version: %s\n", "3.0");\r
+\r
+ return len;\r
+}\r
+\r
+int xgi_kern_read_pcie_info(char *page, char **start, off_t off,\r
+ int count, int *eof, void *data)\r
+{\r
+ return 0;\r
+}\r
+\r
+int xgi_kern_read_status(char *page, char **start, off_t off,\r
+ int count, int *eof, void *data)\r
+{\r
+ return 0;\r
+}\r
+\r
+\r
+static void xgi_proc_create(void)\r
+{\r
+#ifdef CONFIG_PROC_FS\r
+\r
+ struct pci_dev *dev;\r
+ int i = 0;\r
+ char name[6];\r
+\r
+ struct proc_dir_entry *entry;\r
+ struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards;\r
+\r
+ xgi_info_t *info;\r
+ xgi_info_t *xgi_max_devices;\r
+\r
+ /* world readable directory */\r
+ int flags = S_IFDIR | S_IRUGO | S_IXUGO;\r
+\r
+ proc_xgi = create_proc_entry("xgi", flags, proc_root_driver);\r
+ if (!proc_xgi)\r
+ goto failed;\r
+\r
+ proc_xgi_cards = create_proc_entry("cards", flags, proc_xgi);\r
+ if (!proc_xgi_cards)\r
+ goto failed;\r
+\r
+ proc_xgi_pcie = create_proc_entry("pcie", flags, proc_xgi);\r
+ if (!proc_xgi_pcie)\r
+ goto failed;\r
+\r
+ /*\r
+ * Set the module owner to ensure that the reference\r
+ * count reflects accesses to the proc files.\r
+ */\r
+ proc_xgi->owner = THIS_MODULE;\r
+ proc_xgi_cards->owner = THIS_MODULE;\r
+ proc_xgi_pcie->owner = THIS_MODULE;\r
+\r
+ xgi_max_devices = xgi_devices + XGI_MAX_DEVICES;\r
+ for (info = xgi_devices; info < xgi_max_devices; info++)\r
+ {\r
+ if (info->device_id == 0)\r
+ break;\r
+\r
+ /* world readable file */\r
+ flags = S_IFREG | S_IRUGO;\r
+\r
+ dev = xgi_get_pci_device(info);\r
+ if (!dev)\r
+ break;\r
+\r
+ sprintf(name, "%d", i++);\r
+ entry = create_proc_entry(name, flags, proc_xgi_cards);\r
+ if (!entry)\r
+ {\r
+ XGI_PCI_DEV_PUT(dev);\r
+ goto failed;\r
+ }\r
+\r
+ entry->data = info;\r
+ entry->read_proc = xgi_kern_read_card_info;\r
+ entry->owner = THIS_MODULE;\r
+\r
+ if (xgi_find_pcie_capability(dev))\r
+ {\r
+ entry = create_proc_entry("status", flags, proc_xgi_pcie);\r
+ if (!entry)\r
+ {\r
+ XGI_PCI_DEV_PUT(dev);\r
+ goto failed;\r
+ }\r
+\r
+ entry->data = info;\r
+ entry->read_proc = xgi_kern_read_status;\r
+ entry->owner = THIS_MODULE;\r
+\r
+ entry = create_proc_entry("card", flags, proc_xgi_pcie);\r
+ if (!entry)\r
+ {\r
+ XGI_PCI_DEV_PUT(dev);\r
+ goto failed;\r
+ }\r
+\r
+ entry->data = info;\r
+ entry->read_proc = xgi_kern_read_pcie_info;\r
+ entry->owner = THIS_MODULE;\r
+ }\r
+\r
+ XGI_PCI_DEV_PUT(dev);\r
+ }\r
+\r
+ entry = create_proc_entry("version", flags, proc_xgi);\r
+ if (!entry)\r
+ goto failed;\r
+\r
+ entry->read_proc = xgi_kern_read_version;\r
+ entry->owner = THIS_MODULE;\r
+\r
+ entry = create_proc_entry("host-bridge", flags, proc_xgi_pcie);\r
+ if (!entry)\r
+ goto failed;\r
+\r
+ entry->data = NULL;\r
+ entry->read_proc = xgi_kern_read_pcie_info;\r
+ entry->owner = THIS_MODULE;\r
+\r
+ return;\r
+\r
+failed:\r
+ XGI_ERROR("failed to create /proc entries!\n");\r
+ xgi_proc_remove_all(proc_xgi);\r
+#endif\r
+}\r
+\r
+#ifdef CONFIG_PROC_FS\r
+static void xgi_proc_remove_all(struct proc_dir_entry *entry)\r
+{\r
+ while (entry)\r
+ {\r
+ struct proc_dir_entry *next = entry->next;\r
+ if (entry->subdir)\r
+ xgi_proc_remove_all(entry->subdir);\r
+ remove_proc_entry(entry->name, entry->parent);\r
+ if (entry == proc_xgi)\r
+ break;\r
+ entry = next;\r
+ }\r
+}\r
+#endif\r
+\r
+static void xgi_proc_remove(void)\r
+{\r
+#ifdef CONFIG_PROC_FS\r
+ xgi_proc_remove_all(proc_xgi);\r
+#endif\r
+}\r
+\r
+/*\r
+ * driver receives an interrupt if someone waiting, then hand it off.\r
+ */\r
+irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs)\r
+{\r
+ xgi_info_t *info = (xgi_info_t *) dev_id;\r
+ u32 need_to_run_bottom_half = 0;\r
+\r
+ //XGI_INFO("xgi_kern_isr \n");\r
+\r
+ //XGI_CHECK_PCI_CONFIG(info);\r
+\r
+ //xgi_dvi_irq_handler(info);\r
+\r
+ if (need_to_run_bottom_half)\r
+ {\r
+ tasklet_schedule(&info->tasklet);\r
+ }\r
+\r
+ return IRQ_HANDLED;\r
+}\r
+\r
+void xgi_kern_isr_bh(unsigned long data)\r
+{\r
+ xgi_info_t *info = (xgi_info_t *) data;\r
+\r
+ XGI_INFO("xgi_kern_isr_bh \n");\r
+\r
+ //xgi_dvi_irq_handler(info);\r
+\r
+ XGI_CHECK_PCI_CONFIG(info);\r
+}\r
+\r
+static void xgi_lock_init(xgi_info_t *info)\r
+{\r
+ if (info == NULL) return;\r
+\r
+ spin_lock_init(&info->info_lock);\r
+\r
+ sema_init(&info->info_sem, 1);\r
+ sema_init(&info->fb_sem, 1);\r
+ sema_init(&info->pcie_sem, 1);\r
+\r
+ XGI_ATOMIC_SET(info->use_count, 0);\r
+}\r
+\r
+static void xgi_dev_init(xgi_info_t *info)\r
+{\r
+ struct pci_dev *pdev = NULL;\r
+ struct xgi_dev *dev;\r
+ int found = 0;\r
+ u16 pci_cmd;\r
+\r
+ XGI_INFO("Enter xgi_dev_init \n");\r
+\r
+ //XGI_PCI_FOR_EACH_DEV(pdev)\r
+ {\r
+ for (dev = xgidev_list; dev->vendor; dev++)\r
+ {\r
+ if ((dev->vendor == pdev->vendor) && (dev->device == pdev->device))\r
+ {\r
+ XGI_INFO("dev->vendor = pdev->vendor= %x \n", dev->vendor);\r
+ XGI_INFO("dev->device = pdev->device= %x \n", dev->device);\r
+\r
+ xgi_devices[found].device_id = pdev->device;\r
+\r
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &xgi_devices[found].revision_id);\r
+\r
+ XGI_INFO("PCI_REVISION_ID= %x \n", xgi_devices[found].revision_id);\r
+\r
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);\r
+\r
+ XGI_INFO("PCI_COMMAND = %x \n", pci_cmd);\r
+\r
+ break;\r
+ }\r
+ }\r
+ }\r
+}\r
+/*\r
+ * Export to Linux Kernel\r
+ */\r
+\r
+static int __init xgi_init_module(void)\r
+{\r
+ xgi_info_t *info = &xgi_devices[xgi_num_devices];\r
+ int i, result;\r
+\r
+ XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION);\r
+ //SET_MODULE_OWNER(&xgi_fops);\r
+\r
+ memset(xgi_devices, 0, sizeof(xgi_devices));\r
+\r
+ if (pci_register_driver(&xgi_pci_driver) < 0)\r
+ {\r
+ pci_unregister_driver(&xgi_pci_driver);\r
+ XGI_ERROR("no XGI graphics adapter found\n");\r
+ return -ENODEV;\r
+ }\r
+\r
+ XGI_INFO("Jong-xgi_devices[%d].fb.base.: 0x%lx \n", xgi_num_devices, xgi_devices[xgi_num_devices].fb.base);\r
+ XGI_INFO("Jong-xgi_devices[%d].fb.size.: 0x%lx \n", xgi_num_devices, xgi_devices[xgi_num_devices].fb.size);\r
+\r
+/* Jong 07/27/2006; test for ubuntu */\r
+/*\r
+#ifdef CONFIG_DEVFS_FS\r
+\r
+ XGI_INFO("Jong-Use devfs \n");\r
+ do\r
+ {\r
+ xgi_devfs_handles[0] = XGI_DEVFS_REGISTER("xgi", 0);\r
+ if (xgi_devfs_handles[0] == NULL)\r
+ {\r
+ result = -ENOMEM;\r
+ XGI_ERROR("devfs register failed\n");\r
+ goto failed;\r
+ }\r
+ } while(0);\r
+#else */ /* no devfs, do it the "classic" way */\r
+\r
+\r
+ XGI_INFO("Jong-Use non-devfs \n");\r
+ /*\r
+ * Register your major, and accept a dynamic number. This is the\r
+ * first thing to do, in order to avoid releasing other module's\r
+ * fops in scull_cleanup_module()\r
+ */\r
+ result = XGI_REGISTER_CHRDEV(xgi_major, "xgi", &xgi_fops);\r
+ if (result < 0)\r
+ {\r
+ XGI_ERROR("register chrdev failed\n");\r
+ pci_unregister_driver(&xgi_pci_driver);\r
+ return result;\r
+ }\r
+ if (xgi_major == 0) xgi_major = result; /* dynamic */\r
+\r
+/* #endif */ /* CONFIG_DEVFS_FS */\r
+\r
+ XGI_INFO("Jong-major number %d\n", xgi_major);\r
+\r
+ /* instantiate tasklets */\r
+ for (i = 0; i < XGI_MAX_DEVICES; i++)\r
+ {\r
+ /*\r
+ * We keep one tasklet per card to avoid latency issues with more\r
+ * than one device; no two instances of a single tasklet are ever\r
+ * executed concurrently.\r
+ */\r
+ XGI_ATOMIC_SET(xgi_devices[i].tasklet.count, 1);\r
+ }\r
+\r
+ /* init the xgi control device */\r
+ {\r
+ xgi_info_t *info_ctl = &xgi_ctl_device;\r
+ xgi_lock_init(info_ctl);\r
+ }\r
+\r
+ /* Init the resource manager */\r
+ INIT_LIST_HEAD(&xgi_mempid_list);\r
+ if (!xgi_fb_heap_init(info))\r
+ {\r
+ XGI_ERROR("xgi_fb_heap_init() failed\n");\r
+ result = -EIO;\r
+ goto failed;\r
+ }\r
+\r
+ /* Init the resource manager */\r
+ if (!xgi_pcie_heap_init(info))\r
+ {\r
+ XGI_ERROR("xgi_pcie_heap_init() failed\n");\r
+ result = -EIO;\r
+ goto failed;\r
+ }\r
+\r
+ /* create /proc/driver/xgi */\r
+ xgi_proc_create();\r
+\r
+#if defined(DEBUG)\r
+ inter_module_register("xgi_devices", THIS_MODULE, xgi_devices);\r
+#endif\r
+\r
+ return 0;\r
+\r
+failed:\r
+#ifdef CONFIG_DEVFS_FS\r
+ XGI_DEVFS_REMOVE_CONTROL();\r
+ XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices);\r
+#endif\r
+\r
+ if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0)\r
+ XGI_ERROR("unregister xgi chrdev failed\n");\r
+\r
+ for (i = 0; i < xgi_num_devices; i++)\r
+ {\r
+ if (xgi_devices[i].dev)\r
+ {\r
+ release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size);\r
+ release_mem_region(xgi_devices[i].mmio.base, xgi_devices[i].mmio.size);\r
+ }\r
+ }\r
+\r
+ pci_unregister_driver(&xgi_pci_driver);\r
+ return result;\r
+\r
+ return 1;\r
+}\r
+\r
+void __exit xgi_exit_module(void)\r
+{\r
+ int i;\r
+ xgi_info_t *info, *max_devices;\r
+\r
+#ifdef CONFIG_DEVFS_FS\r
+ /*\r
+ XGI_DEVFS_REMOVE_CONTROL();\r
+ for (i = 0; i < XGI_MAX_DEVICES; i++)\r
+ XGI_DEVFS_REMOVE_DEVICE(i);\r
+ */\r
+ XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices);\r
+#endif\r
+\r
+ if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0)\r
+ XGI_ERROR("unregister xgi chrdev failed\n");\r
+\r
+ XGI_INFO("Jong-unregister xgi chrdev scceeded\n");\r
+ for (i = 0; i < XGI_MAX_DEVICES; i++)\r
+ {\r
+ if (xgi_devices[i].dev)\r
+ {\r
+ /* clean up the flush2D batch array */\r
+ xgi_cmdlist_cleanup(&xgi_devices[i]);\r
+\r
+ if(xgi_devices[i].fb.vbase != NULL)\r
+ {\r
+ iounmap((void *)xgi_devices[i].fb.vbase);\r
+ xgi_devices[i].fb.vbase = NULL;\r
+ }\r
+ if(xgi_devices[i].mmio.vbase != NULL)\r
+ {\r
+ iounmap((void *)xgi_devices[i].mmio.vbase);\r
+ xgi_devices[i].mmio.vbase = NULL;\r
+ }\r
+\r
+ //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size);\r
+ //XGI_INFO("release frame buffer mem region scceeded\n");\r
+\r
+ release_mem_region(xgi_devices[i].mmio.base, xgi_devices[i].mmio.size);\r
+ XGI_INFO("release MMIO mem region scceeded\n");\r
+\r
+ xgi_fb_heap_cleanup(&xgi_devices[i]);\r
+ XGI_INFO("xgi_fb_heap_cleanup scceeded\n");\r
+\r
+ xgi_pcie_heap_cleanup(&xgi_devices[i]);\r
+ XGI_INFO("xgi_pcie_heap_cleanup scceeded\n");\r
+\r
+ XGI_PCI_DISABLE_DEVICE(xgi_devices[i].dev);\r
+ }\r
+ }\r
+\r
+ pci_unregister_driver(&xgi_pci_driver);\r
+\r
+ /* remove /proc/driver/xgi */\r
+ xgi_proc_remove();\r
+\r
+#if defined(DEBUG)\r
+ inter_module_unregister("xgi_devices");\r
+#endif\r
+}\r
+\r
+module_init(xgi_init_module);\r
+module_exit(xgi_exit_module);\r
+\r
+#if defined(XGI_PM_SUPPORT_ACPI)\r
+int xgi_acpi_event(struct pci_dev *dev, u32 state)\r
+{\r
+ return 1;\r
+}\r
+\r
+int xgi_kern_acpi_standby(struct pci_dev *dev, u32 state)\r
+{\r
+ return 1;\r
+}\r
+\r
+int xgi_kern_acpi_resume(struct pci_dev *dev)\r
+{\r
+ return 1;\r
+}\r
+#endif\r
+\r
+MODULE_AUTHOR("Andrea Zhang <andrea_zhang@macrosynergy.com>");\r
+MODULE_DESCRIPTION("xgi kernel driver for xgi cards");\r
+MODULE_LICENSE("GPL");\r
--- /dev/null
+\r
+/****************************************************************************\r
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan. \r
+ * *\r
+ * All Rights Reserved. *\r
+ * *\r
+ * Permission is hereby granted, free of charge, to any person obtaining\r
+ * a copy of this software and associated documentation files (the \r
+ * "Software"), to deal in the Software without restriction, including \r
+ * without limitation on the rights to use, copy, modify, merge, \r
+ * publish, distribute, sublicense, and/or sell copies of the Software, \r
+ * and to permit persons to whom the Software is furnished to do so, \r
+ * subject to the following conditions: \r
+ * *\r
+ * The above copyright notice and this permission notice (including the \r
+ * next paragraph) shall be included in all copies or substantial \r
+ * portions of the Software. \r
+ * *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, \r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR \r
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r
+ * DEALINGS IN THE SOFTWARE. \r
+ ***************************************************************************/\r
+\r
+#ifndef _XGI_DRV_H_\r
+#define _XGI_DRV_H_\r
+\r
+#define XGI_MAJOR_VERSION 0\r
+#define XGI_MINOR_VERSION 7\r
+#define XGI_PATCHLEVEL 5\r
+\r
+#define XGI_DRV_VERSION "0.7.5"\r
+\r
+#ifndef XGI_DRV_NAME\r
+#define XGI_DRV_NAME "xgi"\r
+#endif\r
+\r
+/*\r
+ * xgi reserved major device number, Set this to 0 to\r
+ * request dynamic major number allocation.\r
+ */\r
+#ifndef XGI_DEV_MAJOR\r
+#define XGI_DEV_MAJOR 0\r
+#endif\r
+\r
+#ifndef XGI_MAX_DEVICES\r
+#define XGI_MAX_DEVICES 1\r
+#endif\r
+\r
+/* Jong 06/06/2006 */\r
+/* #define XGI_DEBUG */\r
+\r
+#ifndef PCI_VENDOR_ID_XGI\r
+/*\r
+#define PCI_VENDOR_ID_XGI 0x1023\r
+*/\r
+#define PCI_VENDOR_ID_XGI 0x18CA\r
+\r
+#endif\r
+\r
+#ifndef PCI_DEVICE_ID_XP5\r
+#define PCI_DEVICE_ID_XP5 0x2200\r
+#endif\r
+\r
+#ifndef PCI_DEVICE_ID_XG47\r
+#define PCI_DEVICE_ID_XG47 0x0047\r
+#endif\r
+\r
+/* Macros to make printk easier */\r
+#define XGI_ERROR(fmt, arg...) \\r
+ printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg)\r
+\r
+#define XGI_MEM_ERROR(area, fmt, arg...) \\r
+ printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg)\r
+\r
+/* #define XGI_DEBUG */ \r
+\r
+#ifdef XGI_DEBUG\r
+#define XGI_INFO(fmt, arg...) \\r
+ printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg)\r
+/* printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */\r
+#else\r
+#define XGI_INFO(fmt, arg...) do { } while (0)\r
+#endif\r
+\r
+/* device name length; must be atleast 8 */\r
+#define XGI_DEVICE_NAME_LENGTH 40\r
+\r
+/* need a fake device number for control device; just to flag it for msgs */\r
+#define XGI_CONTROL_DEVICE_NUMBER 100\r
+\r
+typedef struct {\r
+ U32 base; // pcie base is different from fb base\r
+ U32 size;\r
+ U8 *vbase;\r
+} xgi_aperture_t;\r
+\r
+typedef struct xgi_screen_info_s {\r
+ U32 scrn_start;\r
+ U32 scrn_xres;\r
+ U32 scrn_yres;\r
+ U32 scrn_bpp;\r
+ U32 scrn_pitch;\r
+} xgi_screen_info_t;\r
+\r
+typedef struct xgi_sarea_info_s {\r
+ U32 bus_addr;\r
+ U32 size;\r
+} xgi_sarea_info_t;\r
+\r
+typedef struct xgi_info_s {\r
+ struct pci_dev *dev;\r
+ int flags;\r
+ int device_number;\r
+ int bus; /* PCI config info */\r
+ int slot;\r
+ int vendor_id;\r
+ U32 device_id;\r
+ U8 revision_id;\r
+\r
+ /* physical characteristics */\r
+ xgi_aperture_t mmio;\r
+ xgi_aperture_t fb;\r
+ xgi_aperture_t pcie;\r
+ xgi_screen_info_t scrn_info;\r
+ xgi_sarea_info_t sarea_info;\r
+\r
+ /* look up table parameters */\r
+ U32 *lut_base;\r
+ U32 lutPageSize;\r
+ U32 lutPageOrder;\r
+ U32 isLUTInLFB;\r
+ U32 sdfbPageSize;\r
+\r
+ U32 pcie_config;\r
+ U32 pcie_status;\r
+ U32 irq;\r
+\r
+ atomic_t use_count;\r
+\r
+ /* keep track of any pending bottom halfes */\r
+ struct tasklet_struct tasklet;\r
+\r
+ spinlock_t info_lock;\r
+\r
+ struct semaphore info_sem;\r
+ struct semaphore fb_sem;\r
+ struct semaphore pcie_sem;\r
+} xgi_info_t;\r
+\r
+typedef struct xgi_ioctl_post_vbios {\r
+ U32 bus;\r
+ U32 slot;\r
+} xgi_ioctl_post_vbios_t;\r
+\r
+typedef enum xgi_mem_location_s\r
+{\r
+ NON_LOCAL = 0,\r
+ LOCAL = 1,\r
+ INVALID = 0x7fffffff\r
+} xgi_mem_location_t;\r
+\r
+enum PcieOwner\r
+{\r
+ PCIE_2D = 0,\r
+ /*\r
+ PCIE_3D should not begin with 1,\r
+ 2D alloc pcie memory will use owner 1.\r
+ */\r
+ PCIE_3D = 11,/*vetex buf*/\r
+ PCIE_3D_CMDLIST = 12,\r
+ PCIE_3D_SCRATCHPAD = 13,\r
+ PCIE_3D_TEXTURE = 14,\r
+ PCIE_INVALID = 0x7fffffff\r
+};\r
+\r
+typedef struct xgi_mem_req_s {\r
+ xgi_mem_location_t location;\r
+ unsigned long size;\r
+ unsigned long is_front;\r
+ enum PcieOwner owner;\r
+ unsigned long pid;\r
+} xgi_mem_req_t;\r
+\r
+typedef struct xgi_mem_alloc_s {\r
+ xgi_mem_location_t location;\r
+ unsigned long size;\r
+ unsigned long bus_addr;\r
+ unsigned long hw_addr;\r
+ unsigned long pid;\r
+} xgi_mem_alloc_t;\r
+\r
+typedef struct xgi_chip_info_s {\r
+ U32 device_id;\r
+ char device_name[32];\r
+ U32 vendor_id;\r
+ U32 curr_display_mode; //Singe, DualView(Contained), MHS\r
+ U32 fb_size;\r
+ U32 sarea_bus_addr;\r
+ U32 sarea_size;\r
+} xgi_chip_info_t;\r
+\r
+typedef struct xgi_opengl_cmd_s {\r
+ U32 cmd;\r
+} xgi_opengl_cmd_t;\r
+\r
+typedef struct xgi_mmio_info_s {\r
+ xgi_opengl_cmd_t cmd_head;\r
+ void *mmioBase;\r
+ int size;\r
+} xgi_mmio_info_t;\r
+\r
+typedef enum {\r
+ BTYPE_2D = 0,\r
+ BTYPE_3D = 1,\r
+ BTYPE_FLIP = 2,\r
+ BTYPE_CTRL = 3,\r
+ BTYPE_NONE = 0x7fffffff\r
+}BATCH_TYPE;\r
+\r
+typedef struct xgi_cmd_info_s {\r
+ BATCH_TYPE _firstBeginType;\r
+ U32 _firstBeginAddr;\r
+ U32 _firstSize;\r
+ U32 _curDebugID;\r
+ U32 _lastBeginAddr;\r
+ U32 _beginCount;\r
+} xgi_cmd_info_t;\r
+\r
+typedef struct xgi_state_info_s {\r
+ U32 _fromState;\r
+ U32 _toState;\r
+} xgi_state_info_t;\r
+\r
+typedef struct cpu_info_s {\r
+ U32 _eax;\r
+ U32 _ebx;\r
+ U32 _ecx;\r
+ U32 _edx;\r
+} cpu_info_t;\r
+\r
+typedef struct xgi_mem_pid_s {\r
+ struct list_head list;\r
+ xgi_mem_location_t location;\r
+ unsigned long bus_addr;\r
+ unsigned long pid;\r
+} xgi_mem_pid_t;\r
+\r
+/*\r
+ * Ioctl definitions\r
+ */\r
+\r
+#define XGI_IOCTL_MAGIC 'x' /* use 'x' as magic number */\r
+\r
+#define XGI_IOCTL_BASE 0\r
+#define XGI_ESC_DEVICE_INFO (XGI_IOCTL_BASE + 0)\r
+#define XGI_ESC_POST_VBIOS (XGI_IOCTL_BASE + 1)\r
+\r
+#define XGI_ESC_FB_INIT (XGI_IOCTL_BASE + 2)\r
+#define XGI_ESC_FB_ALLOC (XGI_IOCTL_BASE + 3)\r
+#define XGI_ESC_FB_FREE (XGI_IOCTL_BASE + 4)\r
+#define XGI_ESC_PCIE_INIT (XGI_IOCTL_BASE + 5)\r
+#define XGI_ESC_PCIE_ALLOC (XGI_IOCTL_BASE + 6)\r
+#define XGI_ESC_PCIE_FREE (XGI_IOCTL_BASE + 7)\r
+#define XGI_ESC_SUBMIT_CMDLIST (XGI_IOCTL_BASE + 8)\r
+#define XGI_ESC_PUT_SCREEN_INFO (XGI_IOCTL_BASE + 9)\r
+#define XGI_ESC_GET_SCREEN_INFO (XGI_IOCTL_BASE + 10)\r
+#define XGI_ESC_GE_RESET (XGI_IOCTL_BASE + 11)\r
+#define XGI_ESC_SAREA_INFO (XGI_IOCTL_BASE + 12)\r
+#define XGI_ESC_DUMP_REGISTER (XGI_IOCTL_BASE + 13)\r
+#define XGI_ESC_DEBUG_INFO (XGI_IOCTL_BASE + 14)\r
+#define XGI_ESC_TEST_RWINKERNEL (XGI_IOCTL_BASE + 16)\r
+#define XGI_ESC_STATE_CHANGE (XGI_IOCTL_BASE + 17)\r
+#define XGI_ESC_MMIO_INFO (XGI_IOCTL_BASE + 18)\r
+#define XGI_ESC_PCIE_CHECK (XGI_IOCTL_BASE + 19)\r
+#define XGI_ESC_CPUID (XGI_IOCTL_BASE + 20)\r
+#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 21)\r
+\r
+#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, xgi_chip_info_t)\r
+#define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS)\r
+\r
+#define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT)\r
+#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, xgi_mem_req_t)\r
+#define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long)\r
+\r
+#define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT)\r
+#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, xgi_mem_req_t)\r
+#define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long)\r
+\r
+#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, xgi_screen_info_t)\r
+#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, xgi_screen_info_t)\r
+\r
+#define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET)\r
+#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, xgi_sarea_info_t)\r
+#define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER)\r
+#define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO)\r
+#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, xgi_mmio_info_t)\r
+\r
+#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, xgi_cmd_info_t)\r
+#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long)\r
+#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, xgi_state_info_t)\r
+\r
+#define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK)\r
+#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, cpu_info_t)\r
+#define XGI_IOCTL_MAXNR 30\r
+\r
+/*\r
+ * flags\r
+ */\r
+#define XGI_FLAG_OPEN 0x0001\r
+#define XGI_FLAG_NEEDS_POSTING 0x0002\r
+#define XGI_FLAG_WAS_POSTED 0x0004\r
+#define XGI_FLAG_CONTROL 0x0010\r
+#define XGI_FLAG_MAP_REGS_EARLY 0x0200\r
+\r
+/* mmap(2) offsets */\r
+\r
+#define IS_IO_OFFSET(info, offset, length) \\r
+ (((offset) >= (info)->mmio.base) \\r
+ && (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size))\r
+\r
+/* Jong 06/14/2006 */\r
+/* (info)->fb.base is a base address for physical (bus) address space */\r
+/* what's the definition of offest? on physical (bus) address space or HW address space */\r
+/* Jong 06/15/2006; use HW address space */\r
+#define IS_FB_OFFSET(info, offset, length) \\r
+ (((offset) >= 0) \\r
+ && (((offset) + (length)) <= (info)->fb.size))\r
+#if 0\r
+#define IS_FB_OFFSET(info, offset, length) \\r
+ (((offset) >= (info)->fb.base) \\r
+ && (((offset) + (length)) <= (info)->fb.base + (info)->fb.size))\r
+#endif\r
+\r
+#define IS_PCIE_OFFSET(info, offset, length) \\r
+ (((offset) >= (info)->pcie.base) \\r
+ && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size))\r
+\r
+extern int xgi_fb_heap_init(xgi_info_t *info);\r
+extern void xgi_fb_heap_cleanup(xgi_info_t *info);\r
+\r
+extern void xgi_fb_alloc(xgi_info_t *info, xgi_mem_req_t *req, xgi_mem_alloc_t *alloc);\r
+extern void xgi_fb_free(xgi_info_t *info, unsigned long offset);\r
+extern void xgi_mem_collect(xgi_info_t *info, unsigned int *pcnt);\r
+\r
+extern int xgi_pcie_heap_init(xgi_info_t *info);\r
+extern void xgi_pcie_heap_cleanup(xgi_info_t *info);\r
+\r
+extern void xgi_pcie_alloc(xgi_info_t *info, unsigned long size, enum PcieOwner owner, xgi_mem_alloc_t *alloc);\r
+extern void xgi_pcie_free(xgi_info_t *info, unsigned long offset);\r
+extern void xgi_pcie_heap_check(void);\r
+extern void *xgi_find_pcie_block(xgi_info_t *info, unsigned long address);\r
+extern void *xgi_find_pcie_virt(xgi_info_t *info, unsigned long address);\r
+\r
+extern void xgi_read_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req);\r
+extern void xgi_write_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req);\r
+\r
+extern void xgi_test_rwinkernel(xgi_info_t *info, unsigned long address);\r
+\r
+#endif\r
--- /dev/null
+\r
+/****************************************************************************\r
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan. \r
+ * *\r
+ * All Rights Reserved. *\r
+ * *\r
+ * Permission is hereby granted, free of charge, to any person obtaining\r
+ * a copy of this software and associated documentation files (the \r
+ * "Software"), to deal in the Software without restriction, including \r
+ * without limitation on the rights to use, copy, modify, merge, \r
+ * publish, distribute, sublicense, and/or sell copies of the Software, \r
+ * and to permit persons to whom the Software is furnished to do so, \r
+ * subject to the following conditions: \r
+ * *\r
+ * The above copyright notice and this permission notice (including the \r
+ * next paragraph) shall be included in all copies or substantial \r
+ * portions of the Software. \r
+ * *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, \r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR \r
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r
+ * DEALINGS IN THE SOFTWARE. \r
+ ***************************************************************************/\r
+\r
+#include "xgi_types.h"\r
+#include "xgi_linux.h"\r
+#include "xgi_drv.h"\r
+#include "xgi_fb.h"\r
+\r
+#define XGI_FB_HEAP_START 0x1000000\r
+\r
+static xgi_mem_heap_t *xgi_fb_heap;\r
+static kmem_cache_t *xgi_fb_cache_block = NULL;\r
+extern struct list_head xgi_mempid_list;\r
+\r
+static xgi_mem_block_t *xgi_mem_new_node(void);\r
+static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t *info, unsigned long size);\r
+static xgi_mem_block_t *xgi_mem_free(xgi_info_t *info, unsigned long offset);\r
+\r
+void xgi_fb_alloc(xgi_info_t *info,\r
+ xgi_mem_req_t *req,\r
+ xgi_mem_alloc_t *alloc)\r
+{\r
+ xgi_mem_block_t *block;\r
+ xgi_mem_pid_t *mempid_block;\r
+\r
+ if (req->is_front)\r
+ {\r
+ alloc->location = LOCAL;\r
+ alloc->bus_addr = info->fb.base;\r
+ alloc->hw_addr = 0;\r
+ XGI_INFO("Video RAM allocation on front buffer successfully! \n");\r
+ }\r
+ else\r
+ {\r
+ xgi_down(info->fb_sem);\r
+ block = xgi_mem_alloc(info, req->size);\r
+ xgi_up(info->fb_sem);\r
+\r
+ if (block == NULL)\r
+ {\r
+ alloc->location = LOCAL;\r
+ alloc->size = 0;\r
+ alloc->bus_addr = 0;\r
+ alloc->hw_addr = 0;\r
+ XGI_ERROR("Video RAM allocation failed\n");\r
+ }\r
+ else\r
+ {\r
+ XGI_INFO("Video RAM allocation succeeded: 0x%p\n",\r
+ (char *) block->offset);\r
+ alloc->location = LOCAL;\r
+ alloc->size = block->size;\r
+ alloc->bus_addr = info->fb.base + block->offset;\r
+ alloc->hw_addr = block->offset;\r
+\r
+ /* manage mempid */\r
+ mempid_block = kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL);\r
+ mempid_block->location = LOCAL;\r
+ mempid_block->bus_addr = alloc->bus_addr;\r
+ mempid_block->pid = alloc->pid;\r
+\r
+ if (!mempid_block)\r
+ XGI_ERROR("mempid_block alloc failed\n");\r
+\r
+ XGI_INFO("Memory ProcessID add one fb block pid:%ld successfully! \n", mempid_block->pid);\r
+ list_add(&mempid_block->list, &xgi_mempid_list);\r
+ }\r
+ }\r
+}\r
+\r
+void xgi_fb_free(xgi_info_t *info, unsigned long bus_addr)\r
+{\r
+ xgi_mem_block_t *block;\r
+ unsigned long offset = bus_addr - info->fb.base;\r
+ xgi_mem_pid_t *mempid_block;\r
+ xgi_mem_pid_t *mempid_freeblock = NULL;\r
+ struct list_head *mempid_list;\r
+\r
+ if (offset < 0)\r
+ {\r
+ XGI_INFO("free onscreen frame buffer successfully !\n");\r
+ }\r
+ else\r
+ {\r
+ xgi_down(info->fb_sem);\r
+ block = xgi_mem_free(info, offset);\r
+ xgi_up(info->fb_sem);\r
+\r
+ if (block == NULL)\r
+ {\r
+ XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n", offset);\r
+ }\r
+\r
+ /* manage mempid */\r
+ mempid_list = xgi_mempid_list.next;\r
+ while (mempid_list != &xgi_mempid_list)\r
+ {\r
+ mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list);\r
+ if (mempid_block->location == LOCAL && mempid_block->bus_addr == bus_addr)\r
+ {\r
+ mempid_freeblock = mempid_block;\r
+ break;\r
+ }\r
+ mempid_list = mempid_list->next;\r
+ }\r
+ if (mempid_freeblock)\r
+ {\r
+ list_del(&mempid_freeblock->list);\r
+ XGI_INFO("Memory ProcessID delete one fb block pid:%ld successfully! \n", mempid_freeblock->pid);\r
+ kfree(mempid_freeblock);\r
+ }\r
+ }\r
+}\r
+\r
+int xgi_fb_heap_init(xgi_info_t *info)\r
+{\r
+ xgi_mem_block_t *block;\r
+\r
+ xgi_fb_heap = kmalloc(sizeof(xgi_mem_heap_t), GFP_KERNEL);\r
+ if (!xgi_fb_heap)\r
+ {\r
+ XGI_ERROR("xgi_fb_heap alloc failed\n");\r
+ return 0;\r
+ }\r
+\r
+ INIT_LIST_HEAD(&xgi_fb_heap->free_list);\r
+ INIT_LIST_HEAD(&xgi_fb_heap->used_list);\r
+ INIT_LIST_HEAD(&xgi_fb_heap->sort_list);\r
+\r
+ xgi_fb_cache_block = kmem_cache_create("xgi_fb_block", sizeof(xgi_mem_block_t),\r
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);\r
+\r
+ if (NULL == xgi_fb_cache_block)\r
+ {\r
+ XGI_ERROR("Fail to creat xgi_fb_block\n");\r
+ goto fail1;\r
+ }\r
+\r
+ block = (xgi_mem_block_t *)kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL);\r
+ if (!block)\r
+ {\r
+ XGI_ERROR("kmem_cache_alloc failed\n");\r
+ goto fail2;\r
+ }\r
+\r
+ block->offset = XGI_FB_HEAP_START;\r
+ block->size = info->fb.size - XGI_FB_HEAP_START;\r
+\r
+ list_add(&block->list, &xgi_fb_heap->free_list);\r
+\r
+ xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START;\r
+\r
+ XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset, block->size);\r
+ XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize);\r
+\r
+ return 1;\r
+\r
+fail2:\r
+ if (xgi_fb_cache_block)\r
+ {\r
+ kmem_cache_destroy(xgi_fb_cache_block);\r
+ xgi_fb_cache_block = NULL;\r
+ }\r
+fail1:\r
+ if(xgi_fb_heap)\r
+ {\r
+ kfree(xgi_fb_heap);\r
+ xgi_fb_heap = NULL;\r
+ }\r
+ return 0;\r
+}\r
+\r
+void xgi_fb_heap_cleanup(xgi_info_t *info)\r
+{\r
+ struct list_head *free_list, *temp;\r
+ xgi_mem_block_t *block;\r
+ int i;\r
+\r
+ if (xgi_fb_heap)\r
+ {\r
+ free_list = &xgi_fb_heap->free_list;\r
+ for (i = 0; i < 3; i++, free_list++)\r
+ {\r
+ temp = free_list->next;\r
+ while (temp != free_list)\r
+ {\r
+ block = list_entry(temp, struct xgi_mem_block_s, list);\r
+ temp = temp->next;\r
+\r
+ XGI_INFO("No. %d block->offset: 0x%lx block->size: 0x%lx \n",\r
+ i, block->offset, block->size);\r
+ //XGI_INFO("No. %d free block: 0x%p \n", i, block);\r
+ kmem_cache_free(xgi_fb_cache_block, block);\r
+ block = NULL;\r
+ }\r
+ }\r
+ XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap);\r
+ kfree(xgi_fb_heap);\r
+ xgi_fb_heap = NULL;\r
+ }\r
+\r
+ if (xgi_fb_cache_block)\r
+ {\r
+ kmem_cache_destroy(xgi_fb_cache_block);\r
+ xgi_fb_cache_block = NULL;\r
+ }\r
+}\r
+\r
+static xgi_mem_block_t * xgi_mem_new_node(void)\r
+{\r
+ xgi_mem_block_t *block;\r
+\r
+ block = (xgi_mem_block_t *)kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL);\r
+ if (!block)\r
+ {\r
+ XGI_ERROR("kmem_cache_alloc failed\n");\r
+ return NULL;\r
+ }\r
+\r
+ return block;\r
+}\r
+\r
+#if 0\r
+static void xgi_mem_insert_node_after(xgi_mem_list_t *list,\r
+ xgi_mem_block_t *current,\r
+ xgi_mem_block_t *block);\r
+static void xgi_mem_insert_node_before(xgi_mem_list_t *list,\r
+ xgi_mem_block_t *current,\r
+ xgi_mem_block_t *block);\r
+static void xgi_mem_insert_node_head(xgi_mem_list_t *list,\r
+ xgi_mem_block_t *block);\r
+static void xgi_mem_insert_node_tail(xgi_mem_list_t *list,\r
+ xgi_mem_block_t *block);\r
+static void xgi_mem_delete_node(xgi_mem_list_t *list,\r
+ xgi_mem_block_t *block);\r
+/*\r
+ * insert node:block after node:current\r
+ */\r
+static void xgi_mem_insert_node_after(xgi_mem_list_t *list,\r
+ xgi_mem_block_t *current,\r
+ xgi_mem_block_t *block)\r
+{\r
+ block->prev = current;\r
+ block->next = current->next;\r
+ current->next = block;\r
+\r
+ if (current == list->tail)\r
+ {\r
+ list->tail = block;\r
+ }\r
+ else\r
+ {\r
+ block->next->prev = block;\r
+ }\r
+}\r
+\r
+/*\r
+ * insert node:block before node:current\r
+ */\r
+static void xgi_mem_insert_node_before(xgi_mem_list_t *list,\r
+ xgi_mem_block_t *current,\r
+ xgi_mem_block_t *block)\r
+{\r
+ block->prev = current->prev;\r
+ block->next = current;\r
+ current->prev = block;\r
+ if (current == list->head)\r
+ {\r
+ list->head = block;\r
+ }\r
+ else\r
+ {\r
+ block->prev->next = block;\r
+ }\r
+}\r
+void xgi_mem_insert_node_head(xgi_mem_list_t *list,\r
+ xgi_mem_block_t *block)\r
+{\r
+ block->next = list->head;\r
+ block->prev = NULL;\r
+\r
+ if (NULL == list->head)\r
+ {\r
+ list->tail = block;\r
+ }\r
+ else\r
+ {\r
+ list->head->prev = block;\r
+ }\r
+ list->head = block;\r
+}\r
+\r
+static void xgi_mem_insert_node_tail(xgi_mem_list_t *list,\r
+ xgi_mem_block_t *block)\r
+\r
+{\r
+ block->next = NULL;\r
+ block->prev = list->tail;\r
+ if (NULL == list->tail)\r
+ {\r
+ list->head = block;\r
+ }\r
+ else\r
+ {\r
+ list->tail->next = block;\r
+ }\r
+ list->tail = block;\r
+}\r
+\r
+static void xgi_mem_delete_node(xgi_mem_list_t *list,\r
+ xgi_mem_block_t *block)\r
+{\r
+ if (block == list->head)\r
+ {\r
+ list->head = block->next;\r
+ }\r
+ if (block == list->tail)\r
+ {\r
+ list->tail = block->prev;\r
+ }\r
+\r
+ if (block->prev)\r
+ {\r
+ block->prev->next = block->next;\r
+ }\r
+ if (block->next)\r
+ {\r
+ block->next->prev = block->prev;\r
+ }\r
+\r
+ block->next = block->prev = NULL;\r
+}\r
+#endif\r
+static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t *info, unsigned long originalSize)\r
+{\r
+ struct list_head *free_list;\r
+ xgi_mem_block_t *block, *free_block, *used_block;\r
+\r
+ unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;\r
+\r
+ XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size);\r
+\r
+ if (size == 0)\r
+ {\r
+ XGI_ERROR("size == 0\n");\r
+ return (NULL);\r
+ }\r
+ XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize);\r
+ if (size > xgi_fb_heap->max_freesize)\r
+ {\r
+ XGI_ERROR("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n",\r
+ size, xgi_fb_heap->max_freesize);\r
+ return (NULL);\r
+ }\r
+\r
+ free_list = xgi_fb_heap->free_list.next;\r
+\r
+ while (free_list != &xgi_fb_heap->free_list)\r
+ {\r
+ XGI_INFO("free_list: 0x%px \n", free_list);\r
+ block = list_entry(free_list, struct xgi_mem_block_s, list);\r
+ if (size <= block->size)\r
+ {\r
+ break;\r
+ }\r
+ free_list = free_list->next;\r
+ }\r
+\r
+ if (free_list == &xgi_fb_heap->free_list)\r
+ {\r
+ XGI_ERROR("Can't allocate %ldk size from frame buffer memory !\n", size/1024);\r
+ return (NULL);\r
+ }\r
+\r
+ free_block = block;\r
+ XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",\r
+ size, free_block->offset, free_block->size);\r
+\r
+ if (size == free_block->size)\r
+ {\r
+ used_block = free_block;\r
+ XGI_INFO("size == free_block->size: free_block = 0x%p\n", free_block);\r
+ list_del(&free_block->list);\r
+ }\r
+ else\r
+ {\r
+ used_block = xgi_mem_new_node();\r
+\r
+ if (used_block == NULL) return (NULL);\r
+\r
+ if (used_block == free_block)\r
+ {\r
+ XGI_ERROR("used_block == free_block = 0x%p\n", used_block);\r
+ }\r
+\r
+ used_block->offset = free_block->offset;\r
+ used_block->size = size;\r
+\r
+ free_block->offset += size;\r
+ free_block->size -= size;\r
+ }\r
+\r
+ xgi_fb_heap->max_freesize -= size;\r
+\r
+ list_add(&used_block->list, &xgi_fb_heap->used_list);\r
+\r
+ return (used_block);\r
+}\r
+\r
+static xgi_mem_block_t *xgi_mem_free(xgi_info_t *info, unsigned long offset)\r
+{\r
+ struct list_head *free_list, *used_list;\r
+ xgi_mem_block_t *used_block = NULL, *block = NULL;\r
+ xgi_mem_block_t *prev, *next;\r
+\r
+ unsigned long upper;\r
+ unsigned long lower;\r
+\r
+ used_list = xgi_fb_heap->used_list.next;\r
+ while (used_list != &xgi_fb_heap->used_list)\r
+ {\r
+ block = list_entry(used_list, struct xgi_mem_block_s, list);\r
+ if (block->offset == offset)\r
+ {\r
+ break;\r
+ }\r
+ used_list = used_list->next;\r
+ }\r
+\r
+ if (used_list == &xgi_fb_heap->used_list)\r
+ {\r
+ XGI_ERROR("can't find block: 0x%lx to free!\n", offset);\r
+ return (NULL);\r
+ }\r
+\r
+ used_block = block;\r
+ XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n",\r
+ used_block, used_block->offset, used_block->size);\r
+\r
+ xgi_fb_heap->max_freesize += used_block->size;\r
+\r
+ prev = next = NULL;\r
+ upper = used_block->offset + used_block->size;\r
+ lower = used_block->offset;\r
+\r
+ free_list = xgi_fb_heap->free_list.next;\r
+ while (free_list != &xgi_fb_heap->free_list)\r
+ {\r
+ block = list_entry(free_list, struct xgi_mem_block_s, list);\r
+\r
+ if (block->offset == upper)\r
+ {\r
+ next = block;\r
+ }\r
+ else if ((block->offset + block->size) == lower)\r
+ {\r
+ prev = block;\r
+ }\r
+ free_list = free_list->next;\r
+ }\r
+\r
+ XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev);\r
+ list_del(&used_block->list);\r
+\r
+ if (prev && next)\r
+ {\r
+ prev->size += (used_block->size + next->size);\r
+ list_del(&next->list);\r
+ XGI_INFO("free node 0x%p\n", next);\r
+ kmem_cache_free(xgi_fb_cache_block, next);\r
+ kmem_cache_free(xgi_fb_cache_block, used_block);\r
+\r
+ next = NULL;\r
+ used_block = NULL;\r
+ return (prev);\r
+ }\r
+\r
+ if (prev)\r
+ {\r
+ prev->size += used_block->size;\r
+ XGI_INFO("free node 0x%p\n", used_block);\r
+ kmem_cache_free(xgi_fb_cache_block, used_block);\r
+ used_block = NULL;\r
+ return (prev);\r
+ }\r
+\r
+ if (next)\r
+ {\r
+ next->size += used_block->size;\r
+ next->offset = used_block->offset;\r
+ XGI_INFO("free node 0x%p\n", used_block);\r
+ kmem_cache_free(xgi_fb_cache_block, used_block);\r
+ used_block = NULL;\r
+ return (next);\r
+ }\r
+\r
+ list_add(&used_block->list, &xgi_fb_heap->free_list);\r
+ XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",\r
+ used_block, used_block->offset, used_block->size);\r
+\r
+ return (used_block);\r
+}\r
+\r
--- /dev/null
+\r
+/****************************************************************************\r
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan. \r
+ * *\r
+ * All Rights Reserved. *\r
+ * *\r
+ * Permission is hereby granted, free of charge, to any person obtaining\r
+ * a copy of this software and associated documentation files (the \r
+ * "Software"), to deal in the Software without restriction, including \r
+ * without limitation on the rights to use, copy, modify, merge, \r
+ * publish, distribute, sublicense, and/or sell copies of the Software, \r
+ * and to permit persons to whom the Software is furnished to do so, \r
+ * subject to the following conditions: \r
+ * *\r
+ * The above copyright notice and this permission notice (including the \r
+ * next paragraph) shall be included in all copies or substantial \r
+ * portions of the Software. \r
+ * *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, \r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR \r
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r
+ * DEALINGS IN THE SOFTWARE. \r
+ ***************************************************************************/\r
+\r
+#ifndef _XGI_FB_H_\r
+#define _XGI_FB_H_\r
+\r
+typedef struct xgi_mem_block_s {\r
+ struct list_head list;\r
+ unsigned long offset;\r
+ unsigned long size;\r
+ atomic_t use_count;\r
+} xgi_mem_block_t;\r
+\r
+typedef struct xgi_mem_heap_s {\r
+ struct list_head free_list;\r
+ struct list_head used_list;\r
+ struct list_head sort_list;\r
+ unsigned long max_freesize;\r
+ spinlock_t lock;\r
+} xgi_mem_heap_t;\r
+\r
+#if 0\r
+typedef struct xgi_mem_block_s {\r
+ struct xgi_mem_block_s *next;\r
+ struct xgi_mem_block_s *prev;\r
+ unsigned long offset;\r
+ unsigned long size;\r
+ atomic_t use_count;\r
+} xgi_mem_block_t;\r
+\r
+typedef struct xgi_mem_list_s {\r
+ xgi_mem_block_t *head;\r
+ xgi_mem_block_t *tail;\r
+} xgi_mem_list_t;\r
+\r
+typedef struct xgi_mem_heap_s {\r
+ xgi_mem_list_t *free_list;\r
+ xgi_mem_list_t *used_list;\r
+ xgi_mem_list_t *sort_list;\r
+ unsigned long max_freesize;\r
+ spinlock_t lock;\r
+} xgi_mem_heap_t;\r
+#endif\r
+\r
+#endif\r
+\r
--- /dev/null
+\r
+/****************************************************************************\r
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan. \r
+ * *\r
+ * All Rights Reserved. *\r
+ * *\r
+ * Permission is hereby granted, free of charge, to any person obtaining\r
+ * a copy of this software and associated documentation files (the \r
+ * "Software"), to deal in the Software without restriction, including \r
+ * without limitation on the rights to use, copy, modify, merge, \r
+ * publish, distribute, sublicense, and/or sell copies of the Software, \r
+ * and to permit persons to whom the Software is furnished to do so, \r
+ * subject to the following conditions: \r
+ * *\r
+ * The above copyright notice and this permission notice (including the \r
+ * next paragraph) shall be included in all copies or substantial \r
+ * portions of the Software. \r
+ * *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, \r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR \r
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r
+ * DEALINGS IN THE SOFTWARE. \r
+ ***************************************************************************/\r
+\r
+\r
+#ifndef _XGI_LINUX_H_\r
+#define _XGI_LINUX_H_\r
+\r
+#include <linux/config.h>\r
+\r
+#ifndef LINUX_VERSION_CODE\r
+#include <linux/version.h>\r
+#endif\r
+\r
+#ifndef KERNEL_VERSION /* pre-2.1.90 didn't have it */\r
+#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))\r
+#endif\r
+\r
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)\r
+# error "This driver does not support pre-2.4 kernels!"\r
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)\r
+#define KERNEL_2_4\r
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)\r
+# error "This driver does not support 2.5 kernels!"\r
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0)\r
+#define KERNEL_2_6\r
+#else\r
+# error "This driver does not support development kernels!"\r
+#endif\r
+\r
+#if defined (CONFIG_SMP) && !defined (__SMP__)\r
+#define __SMP__\r
+#endif\r
+\r
+#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS)\r
+#define MODVERSIONS\r
+#endif\r
+\r
+#if defined (MODVERSIONS) && !defined (KERNEL_2_6)\r
+#include <linux/modversions.h>\r
+#endif\r
+\r
+#include <linux/kernel.h> /* printk */\r
+#include <linux/module.h>\r
+\r
+#include <linux/init.h> /* module_init, module_exit */\r
+#include <linux/types.h> /* pic_t, size_t, __u32, etc */\r
+#include <linux/errno.h> /* error codes */\r
+#include <linux/list.h> /* circular linked list */\r
+#include <linux/stddef.h> /* NULL, offsetof */\r
+#include <linux/wait.h> /* wait queues */\r
+\r
+#include <linux/slab.h> /* kmalloc, kfree, etc */\r
+#include <linux/vmalloc.h> /* vmalloc, vfree, etc */\r
+\r
+#include <linux/poll.h> /* poll_wait */\r
+#include <linux/delay.h> /* mdelay, udelay */\r
+#include <asm/msr.h> /* rdtsc rdtscl */\r
+\r
+#include <linux/sched.h> /* suser(), capable() replacement\r
+ for_each_task, for_each_process */\r
+#ifdef for_each_process\r
+#define XGI_SCAN_PROCESS(p) for_each_process(p)\r
+#else\r
+#define XGI_SCAN_PROCESS(p) for_each_task(p)\r
+#endif\r
+\r
+#ifdef KERNEL_2_6\r
+#include <linux/moduleparam.h> /* module_param() */\r
+#include <linux/smp_lock.h> /* kernel_locked */\r
+#include <asm/tlbflush.h> /* flush_tlb(), flush_tlb_all() */\r
+#include <asm/kmap_types.h> /* page table entry lookup */\r
+#endif\r
+\r
+#include <linux/pci.h> /* pci_find_class, etc */\r
+#include <linux/interrupt.h> /* tasklets, interrupt helpers */\r
+#include <linux/timer.h>\r
+\r
+#include <asm/system.h> /* cli, sli, save_flags */\r
+#include <asm/io.h> /* ioremap, virt_to_phys */\r
+#include <asm/uaccess.h> /* access_ok */\r
+#include <asm/page.h> /* PAGE_OFFSET */\r
+#include <asm/pgtable.h> /* pte bit definitions */\r
+\r
+#include <linux/spinlock.h>\r
+#include <asm/semaphore.h>\r
+#include <linux/highmem.h>\r
+\r
+#ifdef CONFIG_PROC_FS\r
+#include <linux/proc_fs.h>\r
+#endif\r
+\r
+#ifdef CONFIG_DEVFS_FS\r
+#include <linux/devfs_fs_kernel.h>\r
+#endif\r
+\r
+#ifdef CONFIG_KMOD\r
+#include <linux/kmod.h>\r
+#endif\r
+\r
+#ifdef CONFIG_PM\r
+#include <linux/pm.h>\r
+#endif\r
+\r
+#ifdef CONFIG_MTRR\r
+#include <asm/mtrr.h>\r
+#endif\r
+\r
+#ifdef CONFIG_KDB\r
+#include <linux/kdb.h>\r
+#include <asm/kdb.h>\r
+#endif\r
+\r
+#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE)\r
+#define AGPGART\r
+#include <linux/agp_backend.h>\r
+#include <linux/agpgart.h>\r
+#endif\r
+\r
+#ifndef MAX_ORDER\r
+#ifdef KERNEL_2_4\r
+#define MAX_ORDER 10\r
+#endif\r
+#ifdef KERNEL_2_6\r
+#define MAX_ORDER 11\r
+#endif\r
+#endif\r
+\r
+#ifndef module_init\r
+#define module_init(x) int init_module(void) { return x(); }\r
+#define module_exit(x) void cleanup_module(void) { x(); }\r
+#endif\r
+\r
+#ifndef minor\r
+#define minor(x) MINOR(x)\r
+#endif\r
+\r
+#ifndef IRQ_HANDLED\r
+typedef void irqreturn_t;\r
+#define IRQ_NONE\r
+#define IRQ_HANDLED\r
+#define IRQ_RETVAL(x)\r
+#endif\r
+\r
+#if !defined (list_for_each)\r
+#define list_for_each(pos, head) \\r
+ for (pos = (head)->next, prefetch(pos->next); pos != (head); \\r
+ pos = pos->next, prefetch(pos->next))\r
+#endif\r
+\r
+#ifdef KERNEL_2_4\r
+#define XGI_PCI_FOR_EACH_DEV(dev) pci_for_each_dev(dev)\r
+#endif\r
+#ifdef KERNEL_2_6\r
+extern struct list_head pci_devices; /* list of all devices */\r
+#define XGI_PCI_FOR_EACH_DEV(dev) \\r
+ for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next))\r
+#endif\r
+\r
+/*\r
+ * the following macro causes problems when used in the same module\r
+ * as module_param(); undef it so we don't accidentally mix the two\r
+ */\r
+#if defined (KERNEL_2_6)\r
+#undef MODULE_PARM\r
+#endif\r
+\r
+#ifdef EXPORT_NO_SYMBOLS\r
+EXPORT_NO_SYMBOLS;\r
+#endif\r
+\r
+#if defined (KERNEL_2_4)\r
+#define XGI_IS_SUSER() suser()\r
+#define XGI_PCI_DEVICE_NAME(dev) ((dev)->name)\r
+#define XGI_NUM_CPUS() smp_num_cpus\r
+#define XGI_CLI() __cli()\r
+#define XGI_SAVE_FLAGS(eflags) __save_flags(eflags)\r
+#define XGI_RESTORE_FLAGS(eflags) __restore_flags(eflags)\r
+#define XGI_MAY_SLEEP() (!in_interrupt())\r
+#define XGI_MODULE_PARAMETER(x) MODULE_PARM(x, "i")\r
+#endif\r
+\r
+#if defined (KERNEL_2_6)\r
+#define XGI_IS_SUSER() capable(CAP_SYS_ADMIN)\r
+#define XGI_PCI_DEVICE_NAME(dev) ((dev)->pretty_name)\r
+#define XGI_NUM_CPUS() num_online_cpus()\r
+#define XGI_CLI() local_irq_disable()\r
+#define XGI_SAVE_FLAGS(eflags) local_save_flags(eflags)\r
+#define XGI_RESTORE_FLAGS(eflags) local_irq_restore(eflags)\r
+#define XGI_MAY_SLEEP() (!in_interrupt() && !in_atomic())\r
+#define XGI_MODULE_PARAMETER(x) module_param(x, int, 0)\r
+#endif\r
+\r
+/* Earlier 2.4.x kernels don't have pci_disable_device() */\r
+#ifdef XGI_PCI_DISABLE_DEVICE_PRESENT\r
+#define XGI_PCI_DISABLE_DEVICE(dev) pci_disable_device(dev)\r
+#else\r
+#define XGI_PCI_DISABLE_DEVICE(dev)\r
+#endif\r
+\r
+/* common defines */\r
+#define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym)\r
+#define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym)\r
+\r
+#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page))\r
+#define XGI_VMA_OFFSET(vma) (((vma)->vm_pgoff) << PAGE_SHIFT)\r
+#define XGI_VMA_PRIVATE(vma) ((vma)->vm_private_data)\r
+\r
+#define XGI_DEVICE_NUMBER(x) minor((x)->i_rdev)\r
+#define XGI_IS_CONTROL_DEVICE(x) (minor((x)->i_rdev) == 255)\r
+\r
+#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start)\r
+#define XGI_PCI_RESOURCE_SIZE(dev, bar) ((dev)->resource[bar].end - (dev)->resource[bar].start + 1)\r
+\r
+#define XGI_PCI_BUS_NUMBER(dev) (dev)->bus->number\r
+#define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn)\r
+\r
+#ifdef XGI_PCI_GET_CLASS_PRESENT\r
+#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev)\r
+#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from)\r
+#define XGI_PCI_GET_SLOT(bus,devfn) pci_get_slot(pci_find_bus(0,bus),devfn)\r
+#define XGI_PCI_GET_CLASS(class,from) pci_get_class(class,from)\r
+#else\r
+#define XGI_PCI_DEV_PUT(dev)\r
+#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from)\r
+#define XGI_PCI_GET_SLOT(bus,devfn) pci_find_slot(bus,devfn)\r
+#define XGI_PCI_GET_CLASS(class,from) pci_find_class(class,from)\r
+#endif\r
+\r
+/*\r
+ * acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver\r
+ * model is not sufficient for full acpi support. it may work in some cases,\r
+ * but not enough for us to officially support this configuration.\r
+ */\r
+#if defined(CONFIG_ACPI) && defined(KERNEL_2_6)\r
+#define XGI_PM_SUPPORT_ACPI\r
+#endif\r
+\r
+#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)\r
+#define XGI_PM_SUPPORT_APM\r
+#endif\r
+\r
+\r
+#if defined(CONFIG_DEVFS_FS)\r
+#if defined(KERNEL_2_6)\r
+typedef void* devfs_handle_t;\r
+#define XGI_DEVFS_REGISTER(_name, _minor) \\r
+ ({ \\r
+ devfs_handle_t __handle = NULL; \\r
+ if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \\r
+ S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \\r
+ { \\r
+ __handle = (void *) 1; /* XXX Fix me! (boolean) */ \\r
+ } \\r
+ __handle; \\r
+ })\r
+/*\r
+#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i)\r
+*/\r
+#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl")\r
+#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi")\r
+#else // defined(KERNEL_2_4)\r
+#define XGI_DEVFS_REGISTER(_name, _minor) \\r
+ ({ \\r
+ devfs_handle_t __handle = devfs_register(NULL, _name, DEVFS_FL_AUTO_DEVNUM, \\r
+ XGI_DEV_MAJOR, _minor, \\r
+ S_IFCHR | S_IRUGO | S_IWUGO, &xgi_fops, NULL); \\r
+ __handle; \\r
+ })\r
+\r
+#define XGI_DEVFS_REMOVE_DEVICE(i) \\r
+ ({ \\r
+ if (xgi_devfs_handles[i] != NULL) \\r
+ { \\r
+ devfs_unregister(xgi_devfs_handles[i]); \\r
+ } \\r
+ })\r
+#define XGI_DEVFS_REMOVE_CONTROL() \\r
+ ({ \\r
+ if (xgi_devfs_handles[0] != NULL) \\r
+ { \\r
+ devfs_unregister(xgi_devfs_handles[0]); \\r
+ } \\r
+ })\r
+#endif /* defined(KERNEL_2_4) */\r
+#endif /* defined(CONFIG_DEVFS_FS) */\r
+\r
+#if defined(CONFIG_DEVFS_FS) && !defined(KERNEL_2_6)\r
+#define XGI_REGISTER_CHRDEV(x...) devfs_register_chrdev(x)\r
+#define XGI_UNREGISTER_CHRDEV(x...) devfs_unregister_chrdev(x)\r
+#else\r
+#define XGI_REGISTER_CHRDEV(x...) register_chrdev(x)\r
+#define XGI_UNREGISTER_CHRDEV(x...) unregister_chrdev(x)\r
+#endif\r
+\r
+#if defined(XGI_REMAP_PFN_RANGE_PRESENT)\r
+#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \\r
+ remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x)\r
+#elif defined(XGI_REMAP_PAGE_RANGE_5)\r
+#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x)\r
+#elif defined(XGI_REMAP_PAGE_RANGE_4)\r
+#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(x)\r
+#else\r
+#warning "xgi_configure.sh failed, assuming remap_page_range(5)!"\r
+#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x)\r
+#endif\r
+\r
+#if defined(pmd_offset_map)\r
+#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \\r
+ { \\r
+ pg_mid_dir = pmd_offset_map(pg_dir, address); \\r
+ }\r
+#define XGI_PMD_UNMAP(pg_mid_dir) \\r
+ { \\r
+ pmd_unmap(pg_mid_dir); \\r
+ }\r
+#else\r
+#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \\r
+ { \\r
+ pg_mid_dir = pmd_offset(pg_dir, address); \\r
+ }\r
+#define XGI_PMD_UNMAP(pg_mid_dir)\r
+#endif\r
+\r
+#define XGI_PMD_PRESENT(pg_mid_dir) \\r
+ ({ \\r
+ if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \\r
+ { \\r
+ XGI_PMD_UNMAP(pg_mid_dir); \\r
+ pg_mid_dir = NULL; \\r
+ } \\r
+ pg_mid_dir != NULL; \\r
+ })\r
+\r
+#if defined(pte_offset_atomic)\r
+#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \\r
+ { \\r
+ pte = pte_offset_atomic(pg_mid_dir, address); \\r
+ XGI_PMD_UNMAP(pg_mid_dir); \\r
+ }\r
+#define XGI_PTE_UNMAP(pte) \\r
+ { \\r
+ pte_kunmap(pte); \\r
+ }\r
+#elif defined(pte_offset)\r
+#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \\r
+ { \\r
+ pte = pte_offset(pg_mid_dir, address); \\r
+ XGI_PMD_UNMAP(pg_mid_dir); \\r
+ }\r
+#define XGI_PTE_UNMAP(pte)\r
+#else\r
+#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \\r
+ { \\r
+ pte = pte_offset_map(pg_mid_dir, address); \\r
+ XGI_PMD_UNMAP(pg_mid_dir); \\r
+ }\r
+#define XGI_PTE_UNMAP(pte) \\r
+ { \\r
+ pte_unmap(pte); \\r
+ }\r
+#endif\r
+\r
+#define XGI_PTE_PRESENT(pte) \\r
+ ({ \\r
+ if (pte) \\r
+ { \\r
+ if (!pte_present(*pte)) \\r
+ { \\r
+ XGI_PTE_UNMAP(pte); pte = NULL; \\r
+ } \\r
+ } \\r
+ pte != NULL; \\r
+ })\r
+\r
+#define XGI_PTE_VALUE(pte) \\r
+ ({ \\r
+ unsigned long __pte_value = pte_val(*pte); \\r
+ XGI_PTE_UNMAP(pte); \\r
+ __pte_value; \\r
+ })\r
+\r
+#define XGI_PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) / PAGE_SIZE)\r
+#define XGI_MASK_OFFSET(addr) ((addr) & (PAGE_SIZE - 1))\r
+\r
+#if !defined (pgprot_noncached)\r
+static inline pgprot_t pgprot_noncached(pgprot_t old_prot)\r
+ {\r
+ pgprot_t new_prot = old_prot;\r
+ if (boot_cpu_data.x86 > 3)\r
+ new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD);\r
+ return new_prot;\r
+ }\r
+#endif\r
+\r
+#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined)\r
+/* Added define for write combining page, only valid if pat enabled. */\r
+#define _PAGE_WRTCOMB _PAGE_PWT\r
+#define __PAGE_KERNEL_WRTCOMB \\r
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED)\r
+#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB)\r
+\r
+static inline pgprot_t pgprot_writecombined(pgprot_t old_prot)\r
+ {\r
+ pgprot_t new_prot = old_prot;\r
+ if (boot_cpu_data.x86 > 3)\r
+ {\r
+ pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT);\r
+ new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB);\r
+ }\r
+ return new_prot;\r
+ }\r
+#endif\r
+\r
+#if !defined(page_to_pfn)\r
+#define page_to_pfn(page) ((page) - mem_map)\r
+#endif\r
+\r
+#define XGI_VMALLOC(ptr, size) \\r
+ { \\r
+ (ptr) = vmalloc_32(size); \\r
+ }\r
+\r
+#define XGI_VFREE(ptr, size) \\r
+ { \\r
+ vfree((void *) (ptr)); \\r
+ }\r
+\r
+#define XGI_IOREMAP(ptr, physaddr, size) \\r
+ { \\r
+ (ptr) = ioremap(physaddr, size); \\r
+ }\r
+\r
+#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \\r
+ { \\r
+ (ptr) = ioremap_nocache(physaddr, size); \\r
+ }\r
+\r
+#define XGI_IOUNMAP(ptr, size) \\r
+ { \\r
+ iounmap(ptr); \\r
+ }\r
+\r
+/*\r
+ * only use this because GFP_KERNEL may sleep..\r
+ * GFP_ATOMIC is ok, it won't sleep\r
+ */\r
+#define XGI_KMALLOC(ptr, size) \\r
+ { \\r
+ (ptr) = kmalloc(size, GFP_KERNEL); \\r
+ }\r
+\r
+#define XGI_KMALLOC_ATOMIC(ptr, size) \\r
+ { \\r
+ (ptr) = kmalloc(size, GFP_ATOMIC); \\r
+ }\r
+\r
+#define XGI_KFREE(ptr, size) \\r
+ { \\r
+ kfree((void *) (ptr)); \\r
+ }\r
+\r
+#define XGI_GET_FREE_PAGES(ptr, order) \\r
+ { \\r
+ (ptr) = __get_free_pages(GFP_KERNEL, order); \\r
+ }\r
+\r
+#define XGI_FREE_PAGES(ptr, order) \\r
+ { \\r
+ free_pages(ptr, order); \\r
+ }\r
+\r
+typedef struct xgi_pte_s {\r
+ unsigned long phys_addr;\r
+ unsigned long virt_addr;\r
+} xgi_pte_t;\r
+\r
+/*\r
+ * AMD Athlon processors expose a subtle bug in the Linux\r
+ * kernel, that may lead to AGP memory corruption. Recent\r
+ * kernel versions had a workaround for this problem, but\r
+ * 2.4.20 is the first kernel to address it properly. The\r
+ * page_attr API provides the means to solve the problem.\r
+ */\r
+#if defined(XGI_CHANGE_PAGE_ATTR_PRESENT)\r
+static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t *page_ptr)\r
+ {\r
+ struct page *page = virt_to_page(__va(page_ptr->phys_addr));\r
+ change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);\r
+ }\r
+static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t *page_ptr)\r
+ {\r
+ struct page *page = virt_to_page(__va(page_ptr->phys_addr));\r
+ change_page_attr(page, 1, PAGE_KERNEL);\r
+ }\r
+#else\r
+#define XGI_SET_PAGE_ATTRIB_UNCACHED(page_list)\r
+#define XGI_SET_PAGE_ATTRIB_CACHED(page_list)\r
+#endif\r
+\r
+#ifdef KERNEL_2_4\r
+#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count)\r
+#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count)\r
+#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count)\r
+#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v)\r
+\r
+#define XGILockPage(page) set_bit(PG_locked, &(page)->flags)\r
+#define XGIUnlockPage(page) clear_bit(PG_locked, &(page)->flags)\r
+#endif\r
+\r
+#ifdef KERNEL_2_6\r
+/* add for SUSE 9, Jill*/\r
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4)\r
+#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count)\r
+#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count)\r
+#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count)\r
+#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v)\r
+#else\r
+#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->_count)\r
+#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->_count)\r
+#define XGI_PAGE_COUNT(page) atomic_read(&(page)->_count)\r
+#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->_count, v)\r
+#endif\r
+#define XGILockPage(page) SetPageLocked(page)\r
+#define XGIUnlockPage(page) ClearPageLocked(page)\r
+#endif\r
+\r
+\r
+/*\r
+ * hide a pointer to struct xgi_info_t in a file-private info\r
+ */\r
+\r
+typedef struct\r
+{\r
+ void *info;\r
+ U32 num_events;\r
+ spinlock_t fp_lock;\r
+ wait_queue_head_t wait_queue;\r
+} xgi_file_private_t;\r
+\r
+#define FILE_PRIVATE(filp) ((filp)->private_data)\r
+\r
+#define XGI_GET_FP(filp) ((xgi_file_private_t *) FILE_PRIVATE(filp))\r
+\r
+/* for the card devices */\r
+#define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info)\r
+\r
+#ifdef KERNEL_2_0\r
+#define INODE_FROM_FP(filp) ((filp)->f_inode)\r
+#else\r
+#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode)\r
+#endif\r
+\r
+#define XGI_ATOMIC_SET(data,val) atomic_set(&(data), (val))\r
+#define XGI_ATOMIC_INC(data) atomic_inc(&(data))\r
+#define XGI_ATOMIC_DEC(data) atomic_dec(&(data))\r
+#define XGI_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data))\r
+#define XGI_ATOMIC_READ(data) atomic_read(&(data))\r
+\r
+/*\r
+ * lock-related functions that should only be called from this file\r
+ */\r
+#define xgi_init_lock(lock) spin_lock_init(&lock)\r
+#define xgi_lock(lock) spin_lock(&lock)\r
+#define xgi_unlock(lock) spin_unlock(&lock)\r
+#define xgi_down(lock) down(&lock)\r
+#define xgi_up(lock) up(&lock)\r
+\r
+#define xgi_lock_irqsave(lock,flags) spin_lock_irqsave(&lock,flags)\r
+#define xgi_unlock_irqsave(lock,flags) spin_unlock_irqrestore(&lock,flags)\r
+\r
+#endif\r
--- /dev/null
+\r
+/****************************************************************************\r
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan. \r
+ * *\r
+ * All Rights Reserved. *\r
+ * *\r
+ * Permission is hereby granted, free of charge, to any person obtaining\r
+ * a copy of this software and associated documentation files (the \r
+ * "Software"), to deal in the Software without restriction, including \r
+ * without limitation on the rights to use, copy, modify, merge, \r
+ * publish, distribute, sublicense, and/or sell copies of the Software, \r
+ * and to permit persons to whom the Software is furnished to do so, \r
+ * subject to the following conditions: \r
+ * *\r
+ * The above copyright notice and this permission notice (including the \r
+ * next paragraph) shall be included in all copies or substantial \r
+ * portions of the Software. \r
+ * *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, \r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR \r
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r
+ * DEALINGS IN THE SOFTWARE. \r
+ ***************************************************************************/\r
+\r
+#include "xgi_types.h"\r
+#include "xgi_linux.h"\r
+#include "xgi_drv.h"\r
+#include "xgi_regs.h"\r
+#include "xgi_pcie.h"\r
+\r
+void xgi_get_device_info(xgi_info_t *info, xgi_chip_info_t *req)\r
+{\r
+ req->device_id = info->device_id;\r
+ req->device_name[0] = 'x';\r
+ req->device_name[1] = 'g';\r
+ req->device_name[2] = '4';\r
+ req->device_name[3] = '7';\r
+ req->vendor_id = info->vendor_id;\r
+ req->curr_display_mode = 0;\r
+ req->fb_size = info->fb.size;\r
+ req->sarea_bus_addr = info->sarea_info.bus_addr;\r
+ req->sarea_size = info->sarea_info.size;\r
+}\r
+\r
+void xgi_get_mmio_info(xgi_info_t *info, xgi_mmio_info_t *req)\r
+{\r
+ req->mmioBase = (void *)info->mmio.base;\r
+ req->size = info->mmio.size;\r
+}\r
+\r
+void xgi_put_screen_info(xgi_info_t *info, xgi_screen_info_t *req)\r
+{\r
+ info->scrn_info.scrn_start = req->scrn_start;\r
+ info->scrn_info.scrn_xres = req->scrn_xres;\r
+ info->scrn_info.scrn_yres = req->scrn_yres;\r
+ info->scrn_info.scrn_bpp = req->scrn_bpp;\r
+ info->scrn_info.scrn_pitch = req->scrn_pitch;\r
+\r
+ XGI_INFO("info->scrn_info.scrn_start: 0x%lx"\r
+ "info->scrn_info.scrn_xres: 0x%lx"\r
+ "info->scrn_info.scrn_yres: 0x%lx"\r
+ "info->scrn_info.scrn_bpp: 0x%lx"\r
+ "info->scrn_info.scrn_pitch: 0x%lx\n",\r
+ info->scrn_info.scrn_start,\r
+ info->scrn_info.scrn_xres,\r
+ info->scrn_info.scrn_yres,\r
+ info->scrn_info.scrn_bpp,\r
+ info->scrn_info.scrn_pitch);\r
+}\r
+\r
+void xgi_get_screen_info(xgi_info_t *info, xgi_screen_info_t *req)\r
+{\r
+ req->scrn_start = info->scrn_info.scrn_start;\r
+ req->scrn_xres = info->scrn_info.scrn_xres;\r
+ req->scrn_yres = info->scrn_info.scrn_yres;\r
+ req->scrn_bpp = info->scrn_info.scrn_bpp;\r
+ req->scrn_pitch = info->scrn_info.scrn_pitch;\r
+\r
+ XGI_INFO("req->scrn_start: 0x%lx"\r
+ "req->scrn_xres: 0x%lx"\r
+ "req->scrn_yres: 0x%lx"\r
+ "req->scrn_bpp: 0x%lx"\r
+ "req->scrn_pitch: 0x%lx\n",\r
+ req->scrn_start,\r
+ req->scrn_xres,\r
+ req->scrn_yres,\r
+ req->scrn_bpp,\r
+ req->scrn_pitch);\r
+}\r
+\r
+void xgi_ge_reset(xgi_info_t *info)\r
+{\r
+ xgi_disable_ge(info);\r
+ xgi_enable_ge(info);\r
+}\r
+\r
+void xgi_sarea_info(xgi_info_t *info, xgi_sarea_info_t *req)\r
+{\r
+ info->sarea_info.bus_addr = req->bus_addr;\r
+ info->sarea_info.size = req->size;\r
+ XGI_INFO("info->sarea_info.bus_addr: 0x%lx"\r
+ "info->sarea_info.size: 0x%lx\n",\r
+ info->sarea_info.bus_addr,\r
+ info->sarea_info.size);\r
+}\r
+\r
+/*\r
+ * irq functions\r
+ */\r
+#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff\r
+\r
+static U32 s_invalid_begin = 0;\r
+\r
+BOOL xgi_ge_irq_handler(xgi_info_t *info)\r
+{\r
+ volatile U8 *mmio_vbase = info->mmio.vbase;\r
+ volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800);\r
+ U32 int_status = ge_3d_status[4]; // interrupt status\r
+ U32 auto_reset_count = 0;\r
+ BOOL is_support_auto_reset = FALSE;\r
+\r
+ // Check GE on/off\r
+ if (0 == (0xffffc0f0 & int_status))\r
+ {\r
+ U32 old_ge_status = ge_3d_status[0x00];\r
+ U32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a];\r
+ if (0 != (0x1000 & int_status))\r
+ {\r
+ // We got GE stall interrupt.\r
+ ge_3d_status[0x04] = int_status | 0x04000000;\r
+\r
+ if (TRUE == is_support_auto_reset)\r
+ {\r
+ BOOL is_wrong_signal = FALSE;\r
+ static U32 last_int_tick_low, last_int_tick_high;\r
+ static U32 new_int_tick_low, new_int_tick_high;\r
+ static U32 continoue_int_count = 0;\r
+ // OE II is busy.\r
+ while (old_ge_status & 0x001c0000)\r
+ {\r
+ U16 check;\r
+ // Check Read back status\r
+ *(mmio_vbase + 0x235c) = 0x80;\r
+ check = *((volatile U16*)(mmio_vbase + 0x2360));\r
+ if ((check & 0x3f) != ((check & 0x3f00) >> 8))\r
+ {\r
+ is_wrong_signal = TRUE;\r
+ break;\r
+ }\r
+ // Check RO channel\r
+ *(mmio_vbase + 0x235c) = 0x83;\r
+ check = *((volatile U16*)(mmio_vbase + 0x2360));\r
+ if ((check & 0x0f) != ((check & 0xf0) >> 4))\r
+ {\r
+ is_wrong_signal = TRUE;\r
+ break;\r
+ }\r
+ // Check RW channel\r
+ *(mmio_vbase + 0x235c) = 0x88;\r
+ check = *((volatile U16*)(mmio_vbase + 0x2360));\r
+ if ((check & 0x0f) != ((check & 0xf0) >> 4))\r
+ {\r
+ is_wrong_signal = TRUE;\r
+ break;\r
+ }\r
+ // Check RO channel outstanding\r
+ *(mmio_vbase + 0x235c) = 0x8f;\r
+ check = *((volatile U16*)(mmio_vbase + 0x2360));\r
+ if (0 != (check & 0x3ff))\r
+ {\r
+ is_wrong_signal = TRUE;\r
+ break;\r
+ }\r
+ // Check RW channel outstanding\r
+ *(mmio_vbase + 0x235c) = 0x90;\r
+ check = *((volatile U16*)(mmio_vbase + 0x2360));\r
+ if (0 != (check & 0x3ff))\r
+ {\r
+ is_wrong_signal = TRUE;\r
+ break;\r
+ }\r
+ // No pending PCIE request. GE stall.\r
+ break;\r
+ }\r
+\r
+ if (is_wrong_signal)\r
+ {\r
+ // Nothing but skip.\r
+ }\r
+ else if (0 == continoue_int_count++)\r
+ {\r
+ rdtsc(last_int_tick_low, last_int_tick_high);\r
+ }\r
+ else\r
+ {\r
+ rdtscl(new_int_tick_low);\r
+ if ((new_int_tick_low - last_int_tick_low) > STALL_INTERRUPT_RESET_THRESHOLD)\r
+ {\r
+ continoue_int_count = 0;\r
+ }\r
+ else if (continoue_int_count >= 3)\r
+ {\r
+ continoue_int_count = 0;\r
+\r
+ // GE Hung up, need reset.\r
+ XGI_INFO("Reset GE!\n");\r
+\r
+ *(mmio_vbase + 0xb057) = 8;\r
+ int time_out = 0xffff;\r
+ while (0 != (ge_3d_status[0x00] & 0xf0000000))\r
+ {\r
+ while (0 != ((--time_out) & 0xfff));\r
+ if (0 == time_out)\r
+ {\r
+ XGI_INFO("Can not reset back 0x%lx!\n", ge_3d_status[0x00]);\r
+ *(mmio_vbase + 0xb057) = 0;\r
+ // Have to use 3x5.36 to reset.\r
+ // Save and close dynamic gating\r
+ U8 old_3ce = *(mmio_vbase + 0x3ce);\r
+ *(mmio_vbase + 0x3ce) = 0x2a;\r
+ U8 old_3cf = *(mmio_vbase + 0x3cf);\r
+ *(mmio_vbase + 0x3cf) = old_3cf & 0xfe;\r
+ // Reset GE\r
+ U8 old_index = *(mmio_vbase + 0x3d4);\r
+ *(mmio_vbase + 0x3d4) = 0x36;\r
+ U8 old_36 = *(mmio_vbase + 0x3d5);\r
+ *(mmio_vbase + 0x3d5) = old_36 | 0x10;\r
+ while (0 != ((--time_out) & 0xfff));\r
+ *(mmio_vbase + 0x3d5) = old_36;\r
+ *(mmio_vbase + 0x3d4) = old_index;\r
+ // Restore dynamic gating\r
+ *(mmio_vbase + 0x3cf) = old_3cf;\r
+ *(mmio_vbase + 0x3ce) = old_3ce;\r
+ break;\r
+ }\r
+ }\r
+ *(mmio_vbase + 0xb057) = 0;\r
+\r
+ // Increase Reset counter\r
+ auto_reset_count++;\r
+ }\r
+ }\r
+ }\r
+ return TRUE;\r
+ }\r
+ else if (0 != (0x1 & int_status))\r
+ {\r
+ s_invalid_begin++;\r
+ ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000;\r
+ return TRUE;\r
+ }\r
+ }\r
+ return FALSE;\r
+}\r
+\r
+BOOL xgi_crt_irq_handler(xgi_info_t *info)\r
+{\r
+ BOOL ret = FALSE;\r
+ U8 *mmio_vbase = info->mmio.vbase;\r
+ U32 device_status = 0;\r
+ U32 hw_status = 0;\r
+ U8 save_3ce = bReadReg(0x3ce);\r
+\r
+\r
+ if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened\r
+ {\r
+ U8 op3cf_3d;\r
+ U8 op3cf_37;\r
+\r
+ // What happened?\r
+ op3cf_37 = bIn3cf(0x37);\r
+\r
+#if 0\r
+ if (op3cf_37 & 0x04)\r
+ device_status |= GDEVST_CONNECT;\r
+ else\r
+ device_status &= ~GDEVST_CONNECT;\r
+\r
+ device_status |= GDEVST_DEVICE_CHANGED;\r
+ hw_status |= HWST_DEVICE_CHANGED;\r
+#endif\r
+ // Clear CRT interrupt\r
+ op3cf_3d = bIn3cf(0x3d);\r
+ bOut3cf(0x3d, (op3cf_3d | 0x04));\r
+ bOut3cf(0x3d, (op3cf_3d & ~0x04));\r
+ ret = TRUE;\r
+ }\r
+ bWriteReg(0x3ce, save_3ce);\r
+\r
+ return (ret);\r
+}\r
+\r
+BOOL xgi_dvi_irq_handler(xgi_info_t *info)\r
+{\r
+ BOOL ret = FALSE;\r
+ U8 *mmio_vbase = info->mmio.vbase;\r
+ U32 device_status = 0;\r
+ U32 hw_status = 0;\r
+ U8 save_3ce = bReadReg(0x3ce);\r
+\r
+ if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened\r
+ {\r
+ U8 op3cf_39;\r
+ U8 op3cf_37;\r
+ U8 op3x5_5a;\r
+ U8 save_3x4 = bReadReg(0x3d4);;\r
+\r
+ // What happened?\r
+ op3cf_37 = bIn3cf(0x37);\r
+#if 0\r
+ //Also update our internal flag\r
+ if (op3cf_37 & 0x10) // Second Monitor plugged In\r
+ {\r
+ device_status |= GDEVST_CONNECT;\r
+ //Because currenly we cannot determine if DVI digital\r
+ //or DVI analog is connected according to DVI interrupt\r
+ //We should still call BIOS to check it when utility ask us\r
+ device_status &= ~GDEVST_CHECKED;\r
+ }\r
+ else\r
+ {\r
+ device_status &= ~GDEVST_CONNECT;\r
+ }\r
+#endif\r
+ //Notify BIOS that DVI plug/unplug happened\r
+ op3x5_5a = bIn3x5(0x5a);\r
+ bOut3x5(0x5a, op3x5_5a & 0xf7);\r
+\r
+ bWriteReg(0x3d4, save_3x4);\r
+\r
+ //device_status |= GDEVST_DEVICE_CHANGED;\r
+ //hw_status |= HWST_DEVICE_CHANGED;\r
+\r
+ // Clear DVI interrupt\r
+ op3cf_39 = bIn3cf(0x39);\r
+ bOut3c5(0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0\r
+ bOut3c5(0x39, (op3cf_39 | 0x01 )); //Set 3cf.39 bit 0 to 1\r
+\r
+ ret = TRUE;\r
+ }\r
+ bWriteReg(0x3ce, save_3ce);\r
+\r
+ return (ret);\r
+}\r
+\r
+void xgi_dump_register(xgi_info_t *info)\r
+{\r
+ int i, j;\r
+ unsigned char temp;\r
+\r
+ // 0x3C5\r
+ printk("\r\n=====xgi_dump_register========0x%x===============\r\n", 0x3C5);\r
+\r
+ for(i=0; i<0x10; i++)\r
+ {\r
+ if(i == 0)\r
+ {\r
+ printk("%5x", i);\r
+ }\r
+ else\r
+ {\r
+ printk("%3x", i);\r
+ }\r
+ }\r
+ printk("\r\n");\r
+\r
+ for(i=0; i<0x10; i++)\r
+ {\r
+ printk("%1x ", i);\r
+\r
+ for(j=0; j<0x10; j++)\r
+ {\r
+ temp = bIn3c5(i*0x10 + j);\r
+ printk("%3x", temp);\r
+ }\r
+ printk("\r\n");\r
+ }\r
+\r
+ // 0x3D5\r
+ printk("\r\n====xgi_dump_register=========0x%x===============\r\n", 0x3D5);\r
+ for(i=0; i<0x10; i++)\r
+ {\r
+ if(i == 0)\r
+ {\r
+ printk("%5x", i);\r
+ }\r
+ else\r
+ {\r
+ printk("%3x", i);\r
+ }\r
+ }\r
+ printk("\r\n");\r
+\r
+ for(i=0; i<0x10; i++)\r
+ {\r
+ printk("%1x ", i);\r
+\r
+ for(j=0; j<0x10; j++)\r
+ {\r
+ temp = bIn3x5(i*0x10 + j);\r
+ printk("%3x", temp);\r
+ }\r
+ printk("\r\n");\r
+ }\r
+\r
+ // 0x3CF\r
+ printk("\r\n=========xgi_dump_register====0x%x===============\r\n", 0x3CF);\r
+ for(i=0; i<0x10; i++)\r
+ {\r
+ if(i == 0)\r
+ {\r
+ printk("%5x", i);\r
+ }\r
+ else\r
+ {\r
+ printk("%3x", i);\r
+ }\r
+ }\r
+ printk("\r\n");\r
+\r
+ for(i=0; i<0x10; i++)\r
+ {\r
+ printk("%1x ", i);\r
+\r
+ for(j=0; j<0x10; j++)\r
+ {\r
+ temp = bIn3cf(i*0x10 + j);\r
+ printk("%3x", temp);\r
+ }\r
+ printk("\r\n");\r
+ }\r
+\r
+ printk("\r\n=====xgi_dump_register======0x%x===============\r\n", 0xB000);\r
+ for(i=0; i<0x10; i++)\r
+ {\r
+ if(i == 0)\r
+ {\r
+ printk("%5x", i);\r
+ }\r
+ else\r
+ {\r
+ printk("%3x", i);\r
+ }\r
+ }\r
+ printk("\r\n");\r
+\r
+ for(i=0; i<0x5; i++)\r
+ {\r
+ printk("%1x ", i);\r
+\r
+ for(j=0; j<0x10; j++)\r
+ {\r
+ temp = bReadReg(0xB000 + i*0x10 + j);\r
+ printk("%3x", temp);\r
+ }\r
+ printk("\r\n");\r
+ }\r
+\r
+ printk("\r\n==================0x%x===============\r\n", 0x2200);\r
+ for(i=0; i<0x10; i++)\r
+ {\r
+ if(i == 0)\r
+ {\r
+ printk("%5x", i);\r
+ }\r
+ else\r
+ {\r
+ printk("%3x", i);\r
+ }\r
+ }\r
+ printk("\r\n");\r
+\r
+ for(i=0; i<0xB; i++)\r
+ {\r
+ printk("%1x ", i);\r
+\r
+ for(j=0; j<0x10; j++)\r
+ {\r
+ temp = bReadReg(0x2200 + i*0x10 + j);\r
+ printk("%3x", temp);\r
+ }\r
+ printk("\r\n");\r
+ }\r
+\r
+ printk("\r\n==================0x%x===============\r\n", 0x2300);\r
+ for(i=0; i<0x10; i++)\r
+ {\r
+ if(i == 0)\r
+ {\r
+ printk("%5x", i);\r
+ }\r
+ else\r
+ {\r
+ printk("%3x", i);\r
+ }\r
+ }\r
+ printk("\r\n");\r
+\r
+ for(i=0; i<0x7; i++)\r
+ {\r
+ printk("%1x ", i);\r
+\r
+ for(j=0; j<0x10; j++)\r
+ {\r
+ temp = bReadReg(0x2300 + i*0x10 + j);\r
+ printk("%3x", temp);\r
+ }\r
+ printk("\r\n");\r
+ }\r
+\r
+ printk("\r\n==================0x%x===============\r\n", 0x2400);\r
+ for(i=0; i<0x10; i++)\r
+ {\r
+ if(i == 0)\r
+ {\r
+ printk("%5x", i);\r
+ }\r
+ else\r
+ {\r
+ printk("%3x", i);\r
+ }\r
+ }\r
+ printk("\r\n");\r
+\r
+ for(i=0; i<0x10; i++)\r
+ {\r
+ printk("%1x ", i);\r
+\r
+ for(j=0; j<0x10; j++)\r
+ {\r
+ temp = bReadReg(0x2400 + i*0x10 + j);\r
+ printk("%3x", temp);\r
+ }\r
+ printk("\r\n");\r
+ }\r
+\r
+ printk("\r\n==================0x%x===============\r\n", 0x2800);\r
+ for(i=0; i<0x10; i++)\r
+ {\r
+ if(i == 0)\r
+ {\r
+ printk("%5x", i);\r
+ }\r
+ else\r
+ {\r
+ printk("%3x", i);\r
+ }\r
+ }\r
+ printk("\r\n");\r
+\r
+ for(i=0; i<0x10; i++)\r
+ {\r
+ printk("%1x ", i);\r
+\r
+ for(j=0; j<0x10; j++)\r
+ {\r
+ temp = bReadReg(0x2800 + i*0x10 + j);\r
+ printk("%3x", temp);\r
+ }\r
+ printk("\r\n");\r
+ }\r
+}\r
+\r
+void xgi_restore_registers(xgi_info_t *info)\r
+{\r
+ bOut3x5(0x13, 0);\r
+ bOut3x5(0x8b, 2);\r
+}\r
+\r
+void xgi_waitfor_pci_idle(xgi_info_t *info)\r
+{\r
+#define WHOLD_GE_STATUS 0x2800\r
+#define IDLE_MASK ~0x90200000\r
+\r
+ int idleCount = 0;\r
+ while(idleCount < 5)\r
+ {\r
+ if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK)\r
+ {\r
+ idleCount = 0;\r
+ }\r
+ else\r
+ {\r
+ idleCount ++;\r
+ }\r
+ }\r
+}\r
+\r
+int xgi_get_cpu_id(struct cpu_info_s *arg)\r
+{\r
+ int op = arg->_eax;\r
+ __asm__("cpuid"\r
+ : "=a" (arg->_eax),\r
+ "=b" (arg->_ebx),\r
+ "=c" (arg->_ecx),\r
+ "=d" (arg->_edx)\r
+ : "0" (op));\r
+\r
+ XGI_INFO("opCode = 0x%x, eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x \n",\r
+ op, arg->_eax, arg->_ebx, arg->_ecx, arg->_edx);\r
+}\r
+\r
+/*memory collect function*/\r
+extern struct list_head xgi_mempid_list;\r
+void xgi_mem_collect(xgi_info_t *info, unsigned int *pcnt)\r
+{\r
+ xgi_mem_pid_t *mempid_block;\r
+ struct list_head *mempid_list;\r
+ struct task_struct *p,*find;\r
+ unsigned int cnt = 0;\r
+\r
+ mempid_list = xgi_mempid_list.next;\r
+\r
+ while (mempid_list != &xgi_mempid_list)\r
+ {\r
+ mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list);\r
+ mempid_list = mempid_list->next;\r
+\r
+ find = NULL;\r
+ XGI_SCAN_PROCESS(p)\r
+ {\r
+ if (p->pid == mempid_block->pid)\r
+ {\r
+ XGI_INFO("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n", mempid_block->pid, p->state, mempid_block->location, mempid_block->bus_addr);\r
+ find = p;\r
+ if (mempid_block->bus_addr == 0xFFFFFFFF)\r
+ ++cnt;\r
+ break;\r
+ }\r
+ }\r
+ if (!find)\r
+ {\r
+ if (mempid_block->location == LOCAL)\r
+ {\r
+ XGI_INFO("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", mempid_block->pid, mempid_block->bus_addr);\r
+ xgi_fb_free(info, mempid_block->bus_addr);\r
+ }\r
+ else if (mempid_block->bus_addr != 0xFFFFFFFF)\r
+ {\r
+ XGI_INFO("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n", mempid_block->pid, mempid_block->bus_addr);\r
+ xgi_pcie_free(info, mempid_block->bus_addr);\r
+ }\r
+ else\r
+ {\r
+ /*only delete the memory block*/\r
+ list_del(&mempid_block->list);\r
+ XGI_INFO("Memory ProcessID delete one pcie block pid:%ld successfully! \n", mempid_block->pid);\r
+ kfree(mempid_block);\r
+ }\r
+ }\r
+ }\r
+ *pcnt = cnt;\r
+}\r
--- /dev/null
+\r
+/****************************************************************************\r
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan. \r
+ * *\r
+ * All Rights Reserved. *\r
+ * *\r
+ * Permission is hereby granted, free of charge, to any person obtaining\r
+ * a copy of this software and associated documentation files (the \r
+ * "Software"), to deal in the Software without restriction, including \r
+ * without limitation on the rights to use, copy, modify, merge, \r
+ * publish, distribute, sublicense, and/or sell copies of the Software, \r
+ * and to permit persons to whom the Software is furnished to do so, \r
+ * subject to the following conditions: \r
+ * *\r
+ * The above copyright notice and this permission notice (including the \r
+ * next paragraph) shall be included in all copies or substantial \r
+ * portions of the Software. \r
+ * *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, \r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR \r
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r
+ * DEALINGS IN THE SOFTWARE. \r
+ ***************************************************************************/\r
+\r
+\r
+#ifndef _XGI_MISC_H_\r
+#define _XGI_MISC_H_\r
+\r
+extern void xgi_dump_register(xgi_info_t *info);\r
+extern void xgi_get_device_info(xgi_info_t *info, xgi_chip_info_t * req);\r
+extern void xgi_get_mmio_info(xgi_info_t *info, xgi_mmio_info_t *req);\r
+extern void xgi_get_screen_info(xgi_info_t *info, xgi_screen_info_t *req);\r
+extern void xgi_put_screen_info(xgi_info_t *info, xgi_screen_info_t *req);\r
+extern void xgi_ge_reset(xgi_info_t *info);\r
+extern void xgi_sarea_info(xgi_info_t *info, xgi_sarea_info_t *req);\r
+extern int xgi_get_cpu_id(struct cpu_info_s *arg);\r
+\r
+extern void xgi_restore_registers(xgi_info_t *info);\r
+extern BOOL xgi_ge_irq_handler(xgi_info_t *info);\r
+extern BOOL xgi_crt_irq_handler(xgi_info_t *info);\r
+extern BOOL xgi_dvi_irq_handler(xgi_info_t *info);\r
+extern void xgi_waitfor_pci_idle(xgi_info_t *info);\r
+\r
+\r
+#endif\r
--- /dev/null
+\r
+/****************************************************************************\r
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan. \r
+ * *\r
+ * All Rights Reserved. *\r
+ * *\r
+ * Permission is hereby granted, free of charge, to any person obtaining\r
+ * a copy of this software and associated documentation files (the \r
+ * "Software"), to deal in the Software without restriction, including \r
+ * without limitation on the rights to use, copy, modify, merge, \r
+ * publish, distribute, sublicense, and/or sell copies of the Software, \r
+ * and to permit persons to whom the Software is furnished to do so, \r
+ * subject to the following conditions: \r
+ * *\r
+ * The above copyright notice and this permission notice (including the \r
+ * next paragraph) shall be included in all copies or substantial \r
+ * portions of the Software. \r
+ * *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, \r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR \r
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r
+ * DEALINGS IN THE SOFTWARE. \r
+ ***************************************************************************/\r
+\r
+#include "xgi_types.h"\r
+#include "xgi_linux.h"\r
+#include "xgi_drv.h"\r
+#include "xgi_regs.h"\r
+#include "xgi_pcie.h"\r
+#include "xgi_misc.h"\r
+\r
+static xgi_pcie_heap_t *xgi_pcie_heap = NULL;\r
+static kmem_cache_t *xgi_pcie_cache_block = NULL;\r
+static xgi_pcie_block_t *xgi_pcie_vertex_block = NULL;\r
+static xgi_pcie_block_t *xgi_pcie_cmdlist_block = NULL;\r
+static xgi_pcie_block_t *xgi_pcie_scratchpad_block = NULL;\r
+extern struct list_head xgi_mempid_list;\r
+\r
+static unsigned long xgi_pcie_lut_alloc(unsigned long page_order)\r
+{\r
+ struct page *page;\r
+ unsigned long page_addr = 0;\r
+ unsigned long page_count = 0;\r
+ int i;\r
+\r
+ page_count = (1 << page_order);\r
+ page_addr = __get_free_pages(GFP_KERNEL, page_order);\r
+\r
+ if (page_addr == 0UL)\r
+ {\r
+ XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n",\r
+ page_count);\r
+ return 0;\r
+ }\r
+\r
+ page = virt_to_page(page_addr);\r
+\r
+ for (i = 0; i < page_count; i++, page++)\r
+ {\r
+ XGI_INC_PAGE_COUNT(page);\r
+ XGILockPage(page);\r
+ }\r
+\r
+ XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n",\r
+ page_count, page_order, page_addr);\r
+ return page_addr;\r
+}\r
+\r
+static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order)\r
+{\r
+ struct page *page;\r
+ unsigned long page_count = 0;\r
+ int i;\r
+\r
+ page_count = (1 << page_order);\r
+ page = virt_to_page(page_addr);\r
+\r
+ for (i = 0; i < page_count; i++, page++)\r
+ {\r
+ XGI_DEC_PAGE_COUNT(page);\r
+ XGIUnlockPage(page);\r
+ }\r
+\r
+ free_pages(page_addr, page_order);\r
+}\r
+\r
+static int xgi_pcie_lut_init(xgi_info_t *info)\r
+{\r
+ unsigned char *page_addr = NULL;\r
+ unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder;\r
+ unsigned long count = 0;\r
+ u8 temp = 0;\r
+\r
+ /* Jong 06/06/2006 */\r
+ unsigned long pcie_aperture_size;\r
+\r
+ info->pcie.size = 128 * 1024 * 1024;\r
+\r
+ /* Get current FB aperture size */\r
+ temp = In3x5(0x27);\r
+ XGI_INFO("In3x5(0x27): 0x%x \n", temp);\r
+\r
+ if (temp & 0x01) /* 256MB; Jong 06/05/2006; 0x10000000 */\r
+ {\r
+ /* Jong 06/06/2006; allocate memory */\r
+ pcie_aperture_size=256 * 1024 * 1024;\r
+ /* info->pcie.base = 256 * 1024 * 1024; */ /* pcie base is different from fb base */\r
+ }\r
+ else /* 128MB; Jong 06/05/2006; 0x08000000 */\r
+ {\r
+ /* Jong 06/06/2006; allocate memory */\r
+ pcie_aperture_size=128 * 1024 * 1024;\r
+ /* info->pcie.base = 128 * 1024 * 1024; */\r
+ }\r
+\r
+ /* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */\r
+ /* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */\r
+ /* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */\r
+ /* info->pcie.base=ioremap(0x0F000000, 0x10000000); */ /* Cause system hang */\r
+ info->pcie.base=pcie_aperture_size; /* works */\r
+ /* info->pcie.base=info->fb.base + info->fb.size; */ /* System hang */\r
+ /* info->pcie.base=128 * 1024 * 1024;*/ /* System hang */\r
+\r
+ XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base);\r
+\r
+\r
+ /* Get current lookup table page size */\r
+ temp = bReadReg(0xB00C);\r
+ if (temp & 0x04) /* 8KB */\r
+ {\r
+ info->lutPageSize = 8 * 1024;\r
+ }\r
+ else /* 4KB */\r
+ {\r
+ info->lutPageSize = 4 * 1024;\r
+ }\r
+\r
+ XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize);\r
+\r
+#if 0\r
+ /* Get current lookup table location */\r
+ temp = bReadReg(0xB00C);\r
+ if (temp & 0x02) /* LFB */\r
+ {\r
+ info->isLUTInLFB = TRUE;\r
+ /* Current we only support lookup table in LFB */\r
+ temp &= 0xFD;\r
+ bWriteReg(0xB00C, temp);\r
+ info->isLUTInLFB = FALSE;\r
+ }\r
+ else /* SFB */\r
+ {\r
+ info->isLUTInLFB = FALSE;\r
+ }\r
+\r
+ XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize);\r
+\r
+ /* Get current SDFB page size */\r
+ temp = bReadReg(0xB00C);\r
+ if (temp & 0x08) /* 8MB */\r
+ {\r
+ info->sdfbPageSize = 8 * 1024 * 1024;\r
+ }\r
+ else /* 4MB */\r
+ {\r
+ info->sdfbPageSize = 4 * 1024 * 1024;\r
+ }\r
+#endif\r
+ pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE;\r
+\r
+ /*\r
+ * Allocate memory for PCIE GART table;\r
+ */\r
+ lutEntryNum = pciePageCount;\r
+ lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE;\r
+\r
+ /* get page_order base on page_count */\r
+ count = lutPageCount;\r
+ for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder);\r
+\r
+ if ((lutPageCount << 1) == (1 << lutPageOrder))\r
+ {\r
+ lutPageOrder -= 1;\r
+ }\r
+\r
+ XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n",\r
+ lutEntryNum, lutPageCount, lutPageOrder);\r
+\r
+ info->lutPageOrder = lutPageOrder;\r
+ page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder);\r
+\r
+ if (!page_addr)\r
+ {\r
+ XGI_ERROR("cannot allocate PCIE lut page!\n");\r
+ goto fail;\r
+ }\r
+ info->lut_base = (unsigned long *)page_addr;\r
+\r
+ XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n",\r
+ page_addr, virt_to_phys(page_addr));\r
+\r
+ XGI_INFO("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n",\r
+ info->lut_base, __pa(info->lut_base), info->lutPageOrder);\r
+\r
+ /*\r
+ * clean all PCIE GART Entry\r
+ */\r
+ memset(page_addr, 0, PAGE_SIZE << lutPageOrder);\r
+\r
+#if defined(__i386__) || defined(__x86_64__)\r
+ asm volatile ( "wbinvd" ::: "memory" );\r
+#else\r
+ mb();\r
+#endif\r
+\r
+ /* Set GART in SFB */\r
+ bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02);\r
+ /* Set GART base address to HW */\r
+ dwWriteReg(0xB034, __pa(info->lut_base));\r
+\r
+ return 1;\r
+fail:\r
+ return 0;\r
+}\r
+\r
+static void xgi_pcie_lut_cleanup(xgi_info_t *info)\r
+{\r
+ if (info->lut_base)\r
+ {\r
+ XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n",\r
+ info->lut_base, info->lutPageOrder);\r
+ xgi_pcie_lut_free((unsigned long)info->lut_base, info->lutPageOrder);\r
+ info->lut_base = NULL;\r
+ }\r
+}\r
+\r
+static xgi_pcie_block_t *xgi_pcie_new_node(void)\r
+{\r
+ xgi_pcie_block_t *block = (xgi_pcie_block_t *)kmem_cache_alloc(xgi_pcie_cache_block, GFP_KERNEL);\r
+ if (block == NULL)\r
+ {\r
+ return NULL;\r
+ }\r
+\r
+ block->offset = 0; /* block's offset in pcie memory, begin from 0 */\r
+ block->size = 0; /* The block size. */\r
+ block->bus_addr = 0; /* CPU access address/bus address */\r
+ block->hw_addr = 0; /* GE access address */\r
+ block->page_count = 0;\r
+ block->page_order = 0;\r
+ block->page_block = NULL;\r
+ block->page_table = NULL;\r
+ block->owner = PCIE_INVALID;\r
+\r
+ return block;\r
+}\r
+\r
+static void xgi_pcie_block_stuff_free(xgi_pcie_block_t *block)\r
+{\r
+ struct page *page;\r
+ xgi_page_block_t *page_block = block->page_block;\r
+ xgi_page_block_t *free_block;\r
+ unsigned long page_count = 0;\r
+ int i;\r
+\r
+ //XGI_INFO("block->page_block: 0x%p \n", block->page_block);\r
+ while (page_block)\r
+ {\r
+ page_count = page_block->page_count;\r
+\r
+ page = virt_to_page(page_block->virt_addr);\r
+ for (i = 0; i < page_count; i++, page++)\r
+ {\r
+ XGI_DEC_PAGE_COUNT(page);\r
+ XGIUnlockPage(page);\r
+ }\r
+ free_pages(page_block->virt_addr, page_block->page_order);\r
+\r
+ page_block->phys_addr = 0;\r
+ page_block->virt_addr = 0;\r
+ page_block->page_count = 0;\r
+ page_block->page_order = 0;\r
+\r
+ free_block = page_block;\r
+ page_block = page_block->next;\r
+ //XGI_INFO("free free_block: 0x%p \n", free_block);\r
+ kfree(free_block);\r
+ free_block = NULL;\r
+ }\r
+\r
+ if (block->page_table)\r
+ {\r
+ //XGI_INFO("free block->page_table: 0x%p \n", block->page_table);\r
+ kfree(block->page_table);\r
+ block->page_table = NULL;\r
+ }\r
+}\r
+\r
+int xgi_pcie_heap_init(xgi_info_t *info)\r
+{\r
+ xgi_pcie_block_t *block;\r
+\r
+ if (!xgi_pcie_lut_init(info))\r
+ {\r
+ XGI_ERROR("xgi_pcie_lut_init failed\n");\r
+ return 0;\r
+ }\r
+\r
+ xgi_pcie_heap = (xgi_pcie_heap_t *)kmalloc(sizeof(xgi_pcie_heap_t), GFP_KERNEL);\r
+ if(!xgi_pcie_heap)\r
+ {\r
+ XGI_ERROR("xgi_pcie_heap alloc failed\n");\r
+ goto fail1;\r
+ }\r
+ INIT_LIST_HEAD(&xgi_pcie_heap->free_list);\r
+ INIT_LIST_HEAD(&xgi_pcie_heap->used_list);\r
+ INIT_LIST_HEAD(&xgi_pcie_heap->sort_list);\r
+\r
+ xgi_pcie_heap->max_freesize = info->pcie.size;\r
+\r
+ xgi_pcie_cache_block = kmem_cache_create("xgi_pcie_block", sizeof(xgi_pcie_block_t),\r
+ 0, SLAB_HWCACHE_ALIGN, NULL, NULL);\r
+\r
+ if (NULL == xgi_pcie_cache_block)\r
+ {\r
+ XGI_ERROR("Fail to creat xgi_pcie_block\n");\r
+ goto fail2;\r
+ }\r
+\r
+ block = (xgi_pcie_block_t *)xgi_pcie_new_node();\r
+ if (!block)\r
+ {\r
+ XGI_ERROR("xgi_pcie_new_node failed\n");\r
+ goto fail3;\r
+ }\r
+\r
+ block->offset = 0; /* block's offset in pcie memory, begin from 0 */\r
+ block->size = info->pcie.size;\r
+\r
+ list_add(&block->list, &xgi_pcie_heap->free_list);\r
+\r
+ XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n", block->offset, block->size);\r
+ return 1;\r
+fail3:\r
+ if (xgi_pcie_cache_block)\r
+ {\r
+ kmem_cache_destroy(xgi_pcie_cache_block);\r
+ xgi_pcie_cache_block = NULL;\r
+ }\r
+\r
+fail2:\r
+ if(xgi_pcie_heap)\r
+ {\r
+ kfree(xgi_pcie_heap);\r
+ xgi_pcie_heap = NULL;\r
+ }\r
+fail1:\r
+ xgi_pcie_lut_cleanup(info);\r
+ return 0;\r
+}\r
+\r
+void xgi_pcie_heap_check(void)\r
+{\r
+ struct list_head *useList, *temp;\r
+ xgi_pcie_block_t *block;\r
+ unsigned int ownerIndex;\r
+ char *ownerStr[6] = {"2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE"};\r
+\r
+ if (xgi_pcie_heap)\r
+ {\r
+ useList = &xgi_pcie_heap->used_list;\r
+ temp = useList->next;\r
+ XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize);\r
+ while (temp != useList)\r
+ {\r
+ block = list_entry(temp, struct xgi_pcie_block_s, list);\r
+ if (block->owner == PCIE_2D)\r
+ ownerIndex = 0;\r
+ else if (block->owner > PCIE_3D_TEXTURE || block->owner < PCIE_2D || block->owner < PCIE_3D)\r
+ ownerIndex = 5;\r
+ else\r
+ ownerIndex = block->owner - PCIE_3D + 1;\r
+ XGI_INFO("Allocated by %s, block->offset: 0x%lx block->size: 0x%lx \n",\r
+ ownerStr[ownerIndex], block->offset, block->size);\r
+ temp = temp->next;\r
+ }\r
+\r
+ }\r
+}\r
+\r
+\r
+void xgi_pcie_heap_cleanup(xgi_info_t *info)\r
+{\r
+ struct list_head *free_list, *temp;\r
+ xgi_pcie_block_t *block;\r
+ int j;\r
+\r
+ xgi_pcie_lut_cleanup(info);\r
+ XGI_INFO("xgi_pcie_lut_cleanup scceeded\n");\r
+\r
+ if (xgi_pcie_heap)\r
+ {\r
+ free_list = &xgi_pcie_heap->free_list;\r
+ for (j = 0; j < 3; j++, free_list++)\r
+ {\r
+ temp = free_list->next;\r
+\r
+ while (temp != free_list)\r
+ {\r
+ block = list_entry(temp, struct xgi_pcie_block_s, list);\r
+ XGI_INFO("No. %d block->offset: 0x%lx block->size: 0x%lx \n",\r
+ j, block->offset, block->size);\r
+ xgi_pcie_block_stuff_free(block);\r
+ block->bus_addr = 0;\r
+ block->hw_addr = 0;\r
+\r
+ temp = temp->next;\r
+ //XGI_INFO("No. %d free block: 0x%p \n", j, block);\r
+ kmem_cache_free(xgi_pcie_cache_block, block);\r
+ block = NULL;\r
+ }\r
+ }\r
+\r
+ XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap);\r
+ kfree(xgi_pcie_heap);\r
+ xgi_pcie_heap = NULL;\r
+ }\r
+\r
+ if (xgi_pcie_cache_block)\r
+ {\r
+ kmem_cache_destroy(xgi_pcie_cache_block);\r
+ xgi_pcie_cache_block = NULL;\r
+ }\r
+}\r
+\r
+\r
+static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t *info,\r
+ unsigned long originalSize,\r
+ enum PcieOwner owner)\r
+{\r
+ struct list_head *free_list;\r
+ xgi_pcie_block_t *block, *used_block, *free_block;\r
+ xgi_page_block_t *page_block, *prev_page_block;\r
+ struct page *page;\r
+ unsigned long page_order = 0, count = 0, index =0;\r
+ unsigned long page_addr = 0;\r
+ unsigned long *lut_addr = NULL;\r
+ unsigned long lut_id = 0;\r
+ unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;\r
+ int i, j, page_count = 0;\r
+ int temp = 0;\r
+\r
+ XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n");\r
+ XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size);\r
+\r
+ if (owner == PCIE_3D)\r
+ {\r
+ if (xgi_pcie_vertex_block)\r
+ {\r
+ XGI_INFO("PCIE Vertex has been created, return directly.\n");\r
+ return xgi_pcie_vertex_block;\r
+ }\r
+ }\r
+\r
+ if (owner == PCIE_3D_CMDLIST)\r
+ {\r
+ if (xgi_pcie_cmdlist_block)\r
+ {\r
+ XGI_INFO("PCIE Cmdlist has been created, return directly.\n");\r
+ return xgi_pcie_cmdlist_block;\r
+ }\r
+ }\r
+\r
+ if (owner == PCIE_3D_SCRATCHPAD)\r
+ {\r
+ if (xgi_pcie_scratchpad_block)\r
+ {\r
+ XGI_INFO("PCIE Scratchpad has been created, return directly.\n");\r
+ return xgi_pcie_scratchpad_block;\r
+ }\r
+ }\r
+\r
+ if (size == 0)\r
+ {\r
+ XGI_ERROR("size == 0 \n");\r
+ return (NULL);\r
+ }\r
+\r
+ XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize);\r
+ if (size > xgi_pcie_heap->max_freesize)\r
+ {\r
+ XGI_ERROR("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n",\r
+ size, xgi_pcie_heap->max_freesize);\r
+ return (NULL);\r
+ }\r
+\r
+ /* Jong 05/30/2006; find next free list which has enough space*/\r
+ free_list = xgi_pcie_heap->free_list.next;\r
+ while (free_list != &xgi_pcie_heap->free_list)\r
+ {\r
+ //XGI_INFO("free_list: 0x%px \n", free_list);\r
+ block = list_entry(free_list, struct xgi_pcie_block_s, list);\r
+ if (size <= block->size)\r
+ {\r
+ break;\r
+ }\r
+ free_list = free_list->next;\r
+ }\r
+\r
+ if (free_list == &xgi_pcie_heap->free_list)\r
+ {\r
+ XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n", size/1024);\r
+ return (NULL);\r
+ }\r
+\r
+ free_block = block;\r
+ XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",\r
+ size, free_block->offset, free_block->size);\r
+\r
+ if (size == free_block->size)\r
+ {\r
+ used_block = free_block;\r
+ XGI_INFO("size==free_block->size: free_block = 0x%p\n", free_block);\r
+ list_del(&free_block->list);\r
+ }\r
+ else\r
+ {\r
+ used_block = xgi_pcie_new_node();\r
+ if (used_block == NULL)\r
+ {\r
+ return NULL;\r
+ }\r
+\r
+ if (used_block == free_block)\r
+ {\r
+ XGI_ERROR("used_block == free_block = 0x%p\n", used_block);\r
+ }\r
+\r
+ used_block->offset = free_block->offset;\r
+ used_block->size = size;\r
+\r
+ free_block->offset += size;\r
+ free_block->size -= size;\r
+ }\r
+\r
+ xgi_pcie_heap->max_freesize -= size;\r
+\r
+ used_block->bus_addr = info->pcie.base + used_block->offset;\r
+ used_block->hw_addr = info->pcie.base + used_block->offset;\r
+ used_block->page_count = page_count = size / PAGE_SIZE;\r
+\r
+ /* get page_order base on page_count */\r
+ for (used_block->page_order = 0; page_count; page_count >>= 1)\r
+ {\r
+ ++used_block->page_order;\r
+ }\r
+\r
+ if ((used_block->page_count << 1) == (1 << used_block->page_order))\r
+ {\r
+ used_block->page_order--;\r
+ }\r
+ XGI_INFO("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n",\r
+ used_block->offset, used_block->size, used_block->bus_addr, used_block->hw_addr, used_block->page_count, used_block->page_order);\r
+\r
+ used_block->page_block = NULL;\r
+ //used_block->page_block = (xgi_pages_block_t *)kmalloc(sizeof(xgi_pages_block_t), GFP_KERNEL);\r
+ //if (!used_block->page_block) return NULL;\r
+ //used_block->page_block->next = NULL;\r
+\r
+ used_block->page_table = (xgi_pte_t *)kmalloc(sizeof(xgi_pte_t) * used_block->page_count, GFP_KERNEL);\r
+ if (used_block->page_table == NULL)\r
+ {\r
+ goto fail;\r
+ }\r
+\r
+ lut_id = (used_block->offset >> PAGE_SHIFT);\r
+ lut_addr = info->lut_base;\r
+ lut_addr += lut_id;\r
+ XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id);\r
+\r
+ /* alloc free pages from system */\r
+ page_count = used_block->page_count;\r
+ page_block = used_block->page_block;\r
+ prev_page_block = used_block->page_block;\r
+ for (i = 0; page_count > 0; i++)\r
+ {\r
+ /* if size is bigger than 2M bytes, it should be split */\r
+ if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER))\r
+ {\r
+ page_order = XGI_PCIE_ALLOC_MAX_ORDER;\r
+ }\r
+ else\r
+ {\r
+ count = page_count;\r
+ for (page_order = 0; count; count >>= 1, ++page_order);\r
+\r
+ if ((page_count << 1) == (1 << page_order))\r
+ {\r
+ page_order -= 1;\r
+ }\r
+ }\r
+\r
+ count = (1 << page_order);\r
+ page_addr = __get_free_pages(GFP_KERNEL, page_order);\r
+ XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n", page_addr);\r
+\r
+ if (!page_addr)\r
+ {\r
+ XGI_ERROR("No: %d :Can't get free pages: 0x%lx from system memory !\n",\r
+ i, count);\r
+ goto fail;\r
+ }\r
+\r
+ /* Jong 05/30/2006; test */\r
+ memset((unsigned char *)page_addr, 0xFF, PAGE_SIZE << page_order);\r
+ /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */\r
+\r
+ if (page_block == NULL)\r
+ {\r
+ page_block = (xgi_page_block_t *)kmalloc(sizeof(xgi_page_block_t), GFP_KERNEL);\r
+ if (!page_block)\r
+ {\r
+ XGI_ERROR("Can't get memory for page_block! \n");\r
+ goto fail;\r
+ }\r
+ }\r
+\r
+ if (prev_page_block == NULL)\r
+ {\r
+ used_block->page_block = page_block;\r
+ prev_page_block = page_block;\r
+ }\r
+ else\r
+ {\r
+ prev_page_block->next = page_block;\r
+ prev_page_block = page_block;\r
+ }\r
+\r
+ page_block->next = NULL;\r
+ page_block->phys_addr = __pa(page_addr);\r
+ page_block->virt_addr = page_addr;\r
+ page_block->page_count = count;\r
+ page_block->page_order = page_order;\r
+\r
+ XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n", page_block->phys_addr);\r
+ XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n", page_block->virt_addr);\r
+\r
+ page = virt_to_page(page_addr);\r
+\r
+ //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p"\r
+ // "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n",\r
+ // i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr);\r
+\r
+ for (j = 0 ; j < count; j++, page++, lut_addr++)\r
+ {\r
+ used_block->page_table[index + j].phys_addr = __pa(page_address(page));\r
+ used_block->page_table[index + j].virt_addr = (unsigned long)page_address(page);\r
+\r
+ XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n", used_block->page_table[index + j].phys_addr);\r
+ XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n", used_block->page_table[index + j].virt_addr);\r
+\r
+ *lut_addr = __pa(page_address(page));\r
+ XGI_INC_PAGE_COUNT(page);\r
+ XGILockPage(page);\r
+\r
+ if (temp)\r
+ {\r
+ XGI_INFO("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n",\r
+ __pa(page_address(page)), lut_addr, j, *lut_addr);\r
+ temp--;\r
+ }\r
+ }\r
+\r
+ page_block = page_block->next;\r
+ page_count -= count;\r
+ index += count;\r
+ temp = 0;\r
+ }\r
+\r
+ used_block->owner = owner;\r
+ list_add(&used_block->list, &xgi_pcie_heap->used_list);\r
+\r
+#if defined(__i386__) || defined(__x86_64__)\r
+ asm volatile ( "wbinvd" ::: "memory" );\r
+#else\r
+ mb();\r
+#endif\r
+\r
+ /* Flush GART Table */\r
+ bWriteReg(0xB03F, 0x40);\r
+ bWriteReg(0xB03F, 0x00);\r
+\r
+ if (owner == PCIE_3D)\r
+ {\r
+ xgi_pcie_vertex_block = used_block;\r
+ }\r
+\r
+ if (owner == PCIE_3D_CMDLIST)\r
+ {\r
+ xgi_pcie_cmdlist_block = used_block;\r
+ }\r
+\r
+ if (owner == PCIE_3D_SCRATCHPAD)\r
+ {\r
+ xgi_pcie_scratchpad_block = used_block;\r
+ }\r
+\r
+ XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n");\r
+ return (used_block);\r
+\r
+fail:\r
+ xgi_pcie_block_stuff_free(used_block);\r
+ kmem_cache_free(xgi_pcie_cache_block, used_block);\r
+ return NULL;\r
+}\r
+\r
+static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t *info, unsigned long offset)\r
+{\r
+ struct list_head *free_list, *used_list;\r
+ xgi_pcie_block_t *used_block, *block = NULL;\r
+ xgi_pcie_block_t *prev, *next;\r
+ unsigned long upper, lower;\r
+\r
+ used_list = xgi_pcie_heap->used_list.next;\r
+ while (used_list != &xgi_pcie_heap->used_list)\r
+ {\r
+ block = list_entry(used_list, struct xgi_pcie_block_s, list);\r
+ if (block->offset == offset)\r
+ {\r
+ break;\r
+ }\r
+ used_list = used_list->next;\r
+ }\r
+\r
+ if (used_list == &xgi_pcie_heap->used_list)\r
+ {\r
+ XGI_ERROR("can't find block: 0x%lx to free!\n", offset);\r
+ return (NULL);\r
+ }\r
+\r
+ used_block = block;\r
+ XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n",\r
+ used_block, used_block->offset, used_block->size, used_block->bus_addr, used_block->hw_addr);\r
+\r
+ xgi_pcie_block_stuff_free(used_block);\r
+\r
+ /* update xgi_pcie_heap */\r
+ xgi_pcie_heap->max_freesize += used_block->size;\r
+\r
+ prev = next = NULL;\r
+ upper = used_block->offset + used_block->size;\r
+ lower = used_block->offset;\r
+\r
+ free_list = xgi_pcie_heap->free_list.next;\r
+\r
+ while (free_list != &xgi_pcie_heap->free_list)\r
+ {\r
+ block = list_entry(free_list, struct xgi_pcie_block_s, list);\r
+ if (block->offset == upper)\r
+ {\r
+ next = block;\r
+ }\r
+ else if ((block->offset + block->size) == lower)\r
+ {\r
+ prev = block;\r
+ }\r
+ free_list = free_list->next;\r
+ }\r
+\r
+ XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev);\r
+ list_del(&used_block->list);\r
+\r
+ if (prev && next)\r
+ {\r
+ prev->size += (used_block->size + next->size);\r
+ list_del(&next->list);\r
+ XGI_INFO("free node 0x%p\n", next);\r
+ kmem_cache_free(xgi_pcie_cache_block, next);\r
+ kmem_cache_free(xgi_pcie_cache_block, used_block);\r
+ next = NULL;\r
+ used_block = NULL;\r
+ return (prev);\r
+ }\r
+\r
+ if (prev)\r
+ {\r
+ prev->size += used_block->size;\r
+ XGI_INFO("free node 0x%p\n", used_block);\r
+ kmem_cache_free(xgi_pcie_cache_block, used_block);\r
+ used_block = NULL;\r
+ return (prev);\r
+ }\r
+\r
+ if (next)\r
+ {\r
+ next->size += used_block->size;\r
+ next->offset = used_block->offset;\r
+ XGI_INFO("free node 0x%p\n", used_block);\r
+ kmem_cache_free(xgi_pcie_cache_block, used_block);\r
+ used_block = NULL;\r
+ return (next);\r
+ }\r
+\r
+ used_block->bus_addr = 0;\r
+ used_block->hw_addr = 0;\r
+ used_block->page_count = 0;\r
+ used_block->page_order = 0;\r
+ list_add(&used_block->list, &xgi_pcie_heap->free_list);\r
+ XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",\r
+ used_block, used_block->offset, used_block->size);\r
+ return (used_block);\r
+}\r
+\r
+void xgi_pcie_alloc(xgi_info_t *info, unsigned long size,\r
+ enum PcieOwner owner, xgi_mem_alloc_t *alloc)\r
+{\r
+ xgi_pcie_block_t *block;\r
+ xgi_mem_pid_t *mempid_block;\r
+\r
+ xgi_down(info->pcie_sem);\r
+ block = xgi_pcie_mem_alloc(info, size, owner);\r
+ xgi_up(info->pcie_sem);\r
+\r
+ if (block == NULL)\r
+ {\r
+ alloc->location = INVALID;\r
+ alloc->size = 0;\r
+ alloc->bus_addr = 0;\r
+ alloc->hw_addr = 0;\r
+ XGI_ERROR("PCIE RAM allocation failed\n");\r
+ }\r
+ else\r
+ {\r
+ XGI_INFO("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n",\r
+ block->offset, block->bus_addr);\r
+ alloc->location = NON_LOCAL;\r
+ alloc->size = block->size;\r
+ alloc->bus_addr = block->bus_addr;\r
+ alloc->hw_addr = block->hw_addr;\r
+\r
+ /*\r
+ manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE.\r
+ PCIE_3D request means a opengl process created.\r
+ PCIE_3D_TEXTURE request means texture cannot alloc from fb.\r
+ */\r
+ if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE)\r
+ {\r
+ mempid_block = kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL);\r
+ if (!mempid_block)\r
+ XGI_ERROR("mempid_block alloc failed\n");\r
+ mempid_block->location = NON_LOCAL;\r
+ if (owner == PCIE_3D)\r
+ mempid_block->bus_addr = 0xFFFFFFFF;/*xgi_pcie_vertex_block has the address*/\r
+ else\r
+ mempid_block->bus_addr = alloc->bus_addr;\r
+ mempid_block->pid = alloc->pid;\r
+\r
+ XGI_INFO("Memory ProcessID add one pcie block pid:%ld successfully! \n", mempid_block->pid);\r
+ list_add(&mempid_block->list, &xgi_mempid_list);\r
+ }\r
+ }\r
+}\r
+\r
+void xgi_pcie_free(xgi_info_t *info, unsigned long bus_addr)\r
+{\r
+ xgi_pcie_block_t *block;\r
+ unsigned long offset = bus_addr - info->pcie.base;\r
+ xgi_mem_pid_t *mempid_block;\r
+ xgi_mem_pid_t *mempid_freeblock = NULL;\r
+ struct list_head *mempid_list;\r
+ char isvertex = 0;\r
+ int processcnt;\r
+\r
+ if (xgi_pcie_vertex_block && xgi_pcie_vertex_block->bus_addr == bus_addr)\r
+ isvertex = 1;\r
+\r
+ if (isvertex)\r
+ {\r
+ /*check is there any other process using vertex*/\r
+ processcnt = 0;\r
+ mempid_list = xgi_mempid_list.next;\r
+ while (mempid_list != &xgi_mempid_list)\r
+ {\r
+ mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list);\r
+ if (mempid_block->location == NON_LOCAL && mempid_block->bus_addr == 0xFFFFFFFF)\r
+ {\r
+ ++processcnt;\r
+ }\r
+ mempid_list = mempid_list->next;\r
+ }\r
+ if (processcnt > 1)\r
+ {\r
+ return;\r
+ }\r
+ }\r
+\r
+ xgi_down(info->pcie_sem);\r
+ block = xgi_pcie_mem_free(info, offset);\r
+ xgi_up(info->pcie_sem);\r
+\r
+ if (block == NULL)\r
+ {\r
+ XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset);\r
+ }\r
+\r
+ if (isvertex)\r
+ xgi_pcie_vertex_block = NULL;\r
+\r
+ /* manage mempid */\r
+ mempid_list = xgi_mempid_list.next;\r
+ while (mempid_list != &xgi_mempid_list)\r
+ {\r
+ mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list);\r
+ if (mempid_block->location == NON_LOCAL && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) || (!isvertex && mempid_block->bus_addr == bus_addr)))\r
+ {\r
+ mempid_freeblock = mempid_block;\r
+ break;\r
+ }\r
+ mempid_list = mempid_list->next;\r
+ }\r
+ if (mempid_freeblock)\r
+ {\r
+ list_del(&mempid_freeblock->list);\r
+ XGI_INFO("Memory ProcessID delete one pcie block pid:%ld successfully! \n", mempid_freeblock->pid);\r
+ kfree(mempid_freeblock);\r
+ }\r
+}\r
+\r
+/*\r
+ * given a bus address, fid the pcie mem block\r
+ * uses the bus address as the key.\r
+ */\r
+void *xgi_find_pcie_block(xgi_info_t *info, unsigned long address)\r
+{\r
+ struct list_head *used_list;\r
+ xgi_pcie_block_t *block;\r
+ int i;\r
+\r
+ used_list = xgi_pcie_heap->used_list.next;\r
+\r
+ while (used_list != &xgi_pcie_heap->used_list)\r
+ {\r
+ block = list_entry(used_list, struct xgi_pcie_block_s, list);\r
+\r
+ if (block->bus_addr == address)\r
+ {\r
+ return block;\r
+ }\r
+\r
+ if (block->page_table)\r
+ {\r
+ for (i = 0; i < block->page_count; i++)\r
+ {\r
+ unsigned long offset = block->bus_addr;\r
+ if ( (address >= offset) && (address < (offset + PAGE_SIZE)))\r
+ {\r
+ return block;\r
+ }\r
+ }\r
+ }\r
+ used_list = used_list->next;\r
+ }\r
+\r
+ XGI_ERROR("could not find map for vm 0x%lx\n", address);\r
+\r
+ return NULL;\r
+}\r
+\r
+/*\r
+ address -- GE HW address\r
+ return -- CPU virtual address\r
+\r
+ assume the CPU VAddr is continuous in not the same block\r
+*/\r
+void *xgi_find_pcie_virt(xgi_info_t *info, unsigned long address)\r
+{\r
+ struct list_head *used_list;\r
+ xgi_pcie_block_t *block;\r
+ unsigned long offset_in_page;\r
+ unsigned long loc_in_pagetable;\r
+ void * ret;\r
+\r
+ XGI_INFO("Jong_05292006-xgi_find_pcie_virt-Begin\n");\r
+\r
+ used_list = xgi_pcie_heap->used_list.next;\r
+ XGI_INFO("Jong_05292006-used_list=%ul\n", used_list);\r
+\r
+ offset_in_page = address & (PAGE_SIZE-1);\r
+ XGI_INFO("Jong_05292006-address=0x%px, PAGE_SIZE-1=%ul, offset_in_page=%ul\n", address, PAGE_SIZE-1, offset_in_page);\r
+\r
+ while (used_list != &xgi_pcie_heap->used_list)\r
+ {\r
+ block = list_entry(used_list, struct xgi_pcie_block_s, list);\r
+ XGI_INFO("Jong_05292006-block=0x%px\n", block);\r
+ XGI_INFO("Jong_05292006-block->hw_addr=0x%px\n", block->hw_addr);\r
+ XGI_INFO("Jong_05292006- block->size=%ul\n", block->size);\r
+\r
+ if ((address >= block->hw_addr) && (address < (block->hw_addr + block->size)))\r
+ {\r
+ loc_in_pagetable = (address - block->hw_addr) >> PAGE_SHIFT;\r
+ ret = (void*)(block->page_table[loc_in_pagetable].virt_addr + offset_in_page);\r
+\r
+ XGI_INFO("Jong_05292006-PAGE_SHIFT=%d\n", PAGE_SHIFT);\r
+ XGI_INFO("Jong_05292006-loc_in_pagetable=0x%px\n", loc_in_pagetable);\r
+ XGI_INFO("Jong_05292006-block->page_table[loc_in_pagetable].virt_addr=0x%px\n", block->page_table[loc_in_pagetable].virt_addr);\r
+ XGI_INFO("Jong_05292006-offset_in_page=%d\n", offset_in_page);\r
+ XGI_INFO("Jong_05292006-return(virt_addr)=0x%px\n", ret);\r
+\r
+ return ret ;\r
+ }\r
+ else\r
+ {\r
+ XGI_INFO("Jong_05292006-used_list = used_list->next;\n");\r
+ used_list = used_list->next;\r
+ }\r
+ }\r
+\r
+ XGI_ERROR("could not find map for vm 0x%lx\n", address);\r
+ return NULL;\r
+}\r
+\r
+\r
+void xgi_read_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req)\r
+{\r
+\r
+}\r
+\r
+void xgi_write_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req)\r
+{\r
+}\r
+\r
+/*\r
+ address -- GE hw address\r
+*/\r
+void xgi_test_rwinkernel(xgi_info_t *info, unsigned long address)\r
+{\r
+ unsigned long * virtaddr = 0;\r
+ if (address == 0)\r
+ {\r
+ XGI_INFO("[Jong-kd] input GE HW addr is 0x00000000\n");\r
+ return;\r
+ }\r
+\r
+ virtaddr = (unsigned long *) xgi_find_pcie_virt(info, address);\r
+\r
+ XGI_INFO("[Jong-kd] input GE HW addr is 0x%lx\n", address);\r
+ XGI_INFO("[Jong-kd] convert to CPU virt addr 0x%px\n", virtaddr);\r
+ XGI_INFO("[Jong-kd] origin [virtaddr] = 0x%lx\n", *virtaddr);\r
+ if (virtaddr != NULL)\r
+ {\r
+ *virtaddr = 0x00f00fff;\r
+ }\r
+\r
+ XGI_INFO("[Jong-kd] modified [virtaddr] = 0x%lx\n", *virtaddr);\r
+}\r
+\r
--- /dev/null
+\r
+/****************************************************************************\r
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan. \r
+ * *\r
+ * All Rights Reserved. *\r
+ * *\r
+ * Permission is hereby granted, free of charge, to any person obtaining\r
+ * a copy of this software and associated documentation files (the \r
+ * "Software"), to deal in the Software without restriction, including \r
+ * without limitation on the rights to use, copy, modify, merge, \r
+ * publish, distribute, sublicense, and/or sell copies of the Software, \r
+ * and to permit persons to whom the Software is furnished to do so, \r
+ * subject to the following conditions: \r
+ * *\r
+ * The above copyright notice and this permission notice (including the \r
+ * next paragraph) shall be included in all copies or substantial \r
+ * portions of the Software. \r
+ * *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, \r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR \r
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r
+ * DEALINGS IN THE SOFTWARE. \r
+ ***************************************************************************/\r
+\r
+#ifndef _XGI_PCIE_H_\r
+#define _XGI_PCIE_H_\r
+\r
+#ifndef XGI_PCIE_ALLOC_MAX_ORDER\r
+#define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */\r
+#endif\r
+\r
+typedef struct xgi_page_block_s {\r
+ struct xgi_page_block_s *next;\r
+ unsigned long phys_addr;\r
+ unsigned long virt_addr;\r
+ unsigned long page_count;\r
+ unsigned long page_order;\r
+} xgi_page_block_t;\r
+\r
+typedef struct xgi_pcie_block_s {\r
+ struct list_head list;\r
+ unsigned long offset; /* block's offset in pcie memory, begin from 0 */\r
+ unsigned long size; /* The block size. */\r
+ unsigned long bus_addr; /* CPU access address/bus address */\r
+ unsigned long hw_addr; /* GE access address */\r
+\r
+ unsigned long page_count;\r
+ unsigned long page_order;\r
+ xgi_page_block_t *page_block;\r
+ xgi_pte_t *page_table; /* list of physical pages allocated */\r
+\r
+ atomic_t use_count;\r
+ enum PcieOwner owner;\r
+ unsigned long processID;\r
+} xgi_pcie_block_t;\r
+\r
+typedef struct xgi_pcie_list_s {\r
+ xgi_pcie_block_t *head;\r
+ xgi_pcie_block_t *tail;\r
+} xgi_pcie_list_t;\r
+\r
+typedef struct xgi_pcie_heap_s {\r
+ struct list_head free_list;\r
+ struct list_head used_list;\r
+ struct list_head sort_list;\r
+ unsigned long max_freesize;\r
+} xgi_pcie_heap_t;\r
+\r
+#endif\r
--- /dev/null
+\r
+/****************************************************************************\r
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan. \r
+ * *\r
+ * All Rights Reserved. *\r
+ * *\r
+ * Permission is hereby granted, free of charge, to any person obtaining\r
+ * a copy of this software and associated documentation files (the \r
+ * "Software"), to deal in the Software without restriction, including \r
+ * without limitation on the rights to use, copy, modify, merge, \r
+ * publish, distribute, sublicense, and/or sell copies of the Software, \r
+ * and to permit persons to whom the Software is furnished to do so, \r
+ * subject to the following conditions: \r
+ * *\r
+ * The above copyright notice and this permission notice (including the \r
+ * next paragraph) shall be included in all copies or substantial \r
+ * portions of the Software. \r
+ * *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, \r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR \r
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r
+ * DEALINGS IN THE SOFTWARE. \r
+ ***************************************************************************/\r
+\r
+\r
+#ifndef _XGI_REGS_H_\r
+#define _XGI_REGS_H_\r
+\r
+#ifndef XGI_MMIO\r
+ #define XGI_MMIO 1\r
+#endif\r
+\r
+#if XGI_MMIO\r
+#define OUTB(port, value) writeb(value, info->mmio.vbase + port)\r
+#define INB(port) readb(info->mmio.vbase + port)\r
+#define OUTW(port, value) writew(value, info->mmio.vbase + port)\r
+#define INW(port) readw(info->mmio.vbase + port)\r
+#define OUTDW(port, value) writel(value, info->mmio.vbase + port)\r
+#define INDW(port) readl(info->mmio.vbase + port)\r
+#else\r
+#define OUTB(port, value) outb(value, port)\r
+#define INB(port) inb(port)\r
+#define OUTW(port, value) outw(value, port)\r
+#define INW(port) inw(port)\r
+#define OUTDW(port, value) outl(value, port)\r
+#define INDW(port) inl(port)\r
+#endif\r
+\r
+/* Hardware access functions */\r
+static inline void OUT3C5B(xgi_info_t *info, u8 index, u8 data)\r
+{\r
+ OUTB(0x3C4, index);\r
+ OUTB(0x3C5, data);\r
+}\r
+\r
+static inline void OUT3X5B(xgi_info_t *info, u8 index, u8 data)\r
+{\r
+ OUTB(0x3D4, index);\r
+ OUTB(0x3D5, data);\r
+}\r
+\r
+static inline void OUT3CFB(xgi_info_t *info, u8 index, u8 data)\r
+{\r
+ OUTB(0x3CE, index);\r
+ OUTB(0x3CF, data);\r
+}\r
+\r
+static inline u8 IN3C5B(xgi_info_t *info, u8 index)\r
+{\r
+ volatile u8 data=0;\r
+ OUTB(0x3C4, index);\r
+ data = INB(0x3C5);\r
+ return data;\r
+}\r
+\r
+static inline u8 IN3X5B(xgi_info_t *info, u8 index)\r
+{\r
+ volatile u8 data=0;\r
+ OUTB(0x3D4, index);\r
+ data = INB(0x3D5);\r
+ return data;\r
+}\r
+\r
+static inline u8 IN3CFB(xgi_info_t *info, u8 index)\r
+{\r
+ volatile u8 data=0;\r
+ OUTB(0x3CE, index);\r
+ data = INB(0x3CF);\r
+ return data;\r
+}\r
+\r
+static inline void OUT3C5W(xgi_info_t *info, u8 index, u16 data)\r
+{\r
+ OUTB(0x3C4, index);\r
+ OUTB(0x3C5, data);\r
+}\r
+\r
+static inline void OUT3X5W(xgi_info_t *info, u8 index, u16 data)\r
+{\r
+ OUTB(0x3D4, index);\r
+ OUTB(0x3D5, data);\r
+}\r
+\r
+static inline void OUT3CFW(xgi_info_t *info, u8 index, u8 data)\r
+{\r
+ OUTB(0x3CE, index);\r
+ OUTB(0x3CF, data);\r
+}\r
+\r
+static inline u8 IN3C5W(xgi_info_t *info, u8 index)\r
+{\r
+ volatile u8 data=0;\r
+ OUTB(0x3C4, index);\r
+ data = INB(0x3C5);\r
+ return data;\r
+}\r
+\r
+static inline u8 IN3X5W(xgi_info_t *info, u8 index)\r
+{\r
+ volatile u8 data=0;\r
+ OUTB(0x3D4, index);\r
+ data = INB(0x3D5);\r
+ return data;\r
+}\r
+\r
+static inline u8 IN3CFW(xgi_info_t *info, u8 index)\r
+{\r
+ volatile u8 data=0;\r
+ OUTB(0x3CE, index);\r
+ data = INB(0x3CF);\r
+ return data;\r
+}\r
+\r
+static inline u8 readAttr(xgi_info_t *info, u8 index)\r
+{\r
+ INB(0x3DA); /* flip-flop to index */\r
+ OUTB(0x3C0, index);\r
+ return INB(0x3C1);\r
+}\r
+\r
+static inline void writeAttr(xgi_info_t *info, u8 index, u8 value)\r
+{\r
+ INB(0x3DA); /* flip-flop to index */\r
+ OUTB(0x3C0, index);\r
+ OUTB(0x3C0, value);\r
+}\r
+\r
+/*\r
+ * Graphic engine register (2d/3d) acessing interface\r
+ */\r
+static inline void WriteRegDWord(xgi_info_t *info, u32 addr, u32 data)\r
+{\r
+ /* Jong 05/25/2006 */\r
+ XGI_INFO("Jong-WriteRegDWord()-Begin \n");\r
+ XGI_INFO("Jong-WriteRegDWord()-info->mmio.vbase=0x%lx \n", info->mmio.vbase);\r
+ XGI_INFO("Jong-WriteRegDWord()-addr=0x%lx \n", addr);\r
+ XGI_INFO("Jong-WriteRegDWord()-data=0x%lx \n", data); \r
+ /* return; */\r
+\r
+ *(volatile u32*)(info->mmio.vbase + addr) = (data);\r
+ XGI_INFO("Jong-WriteRegDWord()-End \n"); \r
+}\r
+\r
+static inline void WriteRegWord(xgi_info_t *info, u32 addr, u16 data)\r
+{\r
+ *(volatile u16*)(info->mmio.vbase + addr) = (data);\r
+}\r
+\r
+static inline void WriteRegByte(xgi_info_t *info, u32 addr, u8 data)\r
+{\r
+ *(volatile u8*)(info->mmio.vbase + addr) = (data);\r
+}\r
+\r
+static inline u32 ReadRegDWord(xgi_info_t *info, u32 addr)\r
+{\r
+ volatile u32 data;\r
+ data = *(volatile u32*)(info->mmio.vbase + addr);\r
+ return data;\r
+}\r
+\r
+static inline u16 ReadRegWord(xgi_info_t *info, u32 addr)\r
+{\r
+ volatile u16 data;\r
+ data = *(volatile u16*)(info->mmio.vbase + addr);\r
+ return data;\r
+}\r
+\r
+static inline u8 ReadRegByte(xgi_info_t *info, u32 addr)\r
+{\r
+ volatile u8 data;\r
+ data = *(volatile u8*)(info->mmio.vbase + addr);\r
+ return data;\r
+}\r
+#if 0\r
+extern void OUT3C5B(xgi_info_t *info, u8 index, u8 data);\r
+extern void OUT3X5B(xgi_info_t *info, u8 index, u8 data);\r
+extern void OUT3CFB(xgi_info_t *info, u8 index, u8 data);\r
+extern u8 IN3C5B(xgi_info_t *info, u8 index);\r
+extern u8 IN3X5B(xgi_info_t *info, u8 index);\r
+extern u8 IN3CFB(xgi_info_t *info, u8 index);\r
+extern void OUT3C5W(xgi_info_t *info, u8 index, u8 data);\r
+extern void OUT3X5W(xgi_info_t *info, u8 index, u8 data);\r
+extern void OUT3CFW(xgi_info_t *info, u8 index, u8 data);\r
+extern u8 IN3C5W(xgi_info_t *info, u8 index);\r
+extern u8 IN3X5W(xgi_info_t *info, u8 index);\r
+extern u8 IN3CFW(xgi_info_t *info, u8 index);\r
+\r
+extern void WriteRegDWord(xgi_info_t *info, u32 addr, u32 data);\r
+extern void WriteRegWord(xgi_info_t *info, u32 addr, u16 data);\r
+extern void WriteRegByte(xgi_info_t *info, u32 addr, u8 data);\r
+extern u32 ReadRegDWord(xgi_info_t *info, u32 addr);\r
+extern u16 ReadRegWord(xgi_info_t *info, u32 addr);\r
+extern u8 ReadRegByte(xgi_info_t *info, u32 addr);\r
+\r
+extern void EnableProtect();\r
+extern void DisableProtect();\r
+#endif\r
+\r
+#define Out(port, data) OUTB(port, data)\r
+#define bOut(port, data) OUTB(port, data)\r
+#define wOut(port, data) OUTW(port, data)\r
+#define dwOut(port, data) OUTDW(port, data)\r
+\r
+#define Out3x5(index, data) OUT3X5B(info, index, data)\r
+#define bOut3x5(index, data) OUT3X5B(info, index, data)\r
+#define wOut3x5(index, data) OUT3X5W(info, index, data)\r
+\r
+#define Out3c5(index, data) OUT3C5B(info, index, data)\r
+#define bOut3c5(index, data) OUT3C5B(info, index, data)\r
+#define wOut3c5(index, data) OUT3C5W(info, index, data)\r
+\r
+#define Out3cf(index, data) OUT3CFB(info, index, data)\r
+#define bOut3cf(index, data) OUT3CFB(info, index, data)\r
+#define wOut3cf(index, data) OUT3CFW(info, index, data)\r
+\r
+#define In(port) INB(port)\r
+#define bIn(port) INB(port)\r
+#define wIn(port) INW(port)\r
+#define dwIn(port) INDW(port)\r
+\r
+#define In3x5(index) IN3X5B(info, index)\r
+#define bIn3x5(index) IN3X5B(info, index)\r
+#define wIn3x5(index) IN3X5W(info, index)\r
+\r
+#define In3c5(index) IN3C5B(info, index)\r
+#define bIn3c5(index) IN3C5B(info, index)\r
+#define wIn3c5(index) IN3C5W(info, index)\r
+\r
+#define In3cf(index) IN3CFB(info, index)\r
+#define bIn3cf(index) IN3CFB(info, index)\r
+#define wIn3cf(index) IN3CFW(info, index)\r
+\r
+#define dwWriteReg(addr, data) WriteRegDWord(info, addr, data)\r
+#define wWriteReg(addr, data) WriteRegWord(info, addr, data)\r
+#define bWriteReg(addr, data) WriteRegByte(info, addr, data)\r
+#define dwReadReg(addr) ReadRegDWord(info, addr)\r
+#define wReadReg(addr) ReadRegWord(info, addr)\r
+#define bReadReg(addr) ReadRegByte(info, addr)\r
+\r
+static inline void xgi_protect_all(xgi_info_t *info)\r
+{\r
+ OUTB(0x3C4, 0x11);\r
+ OUTB(0x3C5, 0x92);\r
+}\r
+\r
+static inline void xgi_unprotect_all(xgi_info_t *info)\r
+{\r
+ OUTB(0x3C4, 0x11);\r
+ OUTB(0x3C5, 0x92);\r
+}\r
+\r
+static inline void xgi_enable_mmio(xgi_info_t *info)\r
+{\r
+ u8 protect = 0;\r
+\r
+ /* Unprotect registers */\r
+ outb(0x11, 0x3C4);\r
+ protect = inb(0x3C5);\r
+ outb(0x92, 0x3C5);\r
+\r
+ outb(0x3A, 0x3D4);\r
+ outb(inb(0x3D5) | 0x20, 0x3D5);\r
+\r
+ /* Enable MMIO */\r
+ outb(0x39, 0x3D4);\r
+ outb(inb(0x3D5) | 0x01, 0x3D5);\r
+\r
+ OUTB(0x3C4, 0x11);\r
+ OUTB(0x3C5, protect);\r
+}\r
+\r
+static inline void xgi_disable_mmio(xgi_info_t *info)\r
+{\r
+ u8 protect = 0;\r
+\r
+ /* unprotect registers */\r
+ OUTB(0x3C4, 0x11);\r
+ protect = INB(0x3C5);\r
+ OUTB(0x3C5, 0x92);\r
+\r
+ /* Disable MMIO access */\r
+ OUTB(0x3D4, 0x39);\r
+ OUTB(0x3D5, INB(0x3D5) & 0xFE);\r
+\r
+ /* Protect registers */\r
+ outb(0x11, 0x3C4);\r
+ outb(protect, 0x3C5);\r
+}\r
+\r
+static inline void xgi_enable_ge(xgi_info_t *info)\r
+{\r
+ unsigned char bOld3cf2a = 0;\r
+ int wait = 0;\r
+\r
+ // Enable GE\r
+ OUTW(0x3C4, 0x9211);\r
+\r
+ // Save and close dynamic gating\r
+ bOld3cf2a = bIn3cf(0x2a);\r
+ bOut3cf(0x2a, bOld3cf2a & 0xfe);\r
+\r
+ // Reset both 3D and 2D engine\r
+ bOut3x5(0x36, 0x84);\r
+ wait = 10;\r
+ while (wait--)\r
+ {\r
+ bIn(0x36);\r
+ }\r
+ bOut3x5(0x36, 0x94);\r
+ wait = 10;\r
+ while (wait--)\r
+ {\r
+ bIn(0x36);\r
+ }\r
+ bOut3x5(0x36, 0x84);\r
+ wait = 10;\r
+ while (wait--)\r
+ {\r
+ bIn(0x36);\r
+ }\r
+ // Enable 2D engine only\r
+ bOut3x5(0x36, 0x80);\r
+\r
+ // Enable 2D+3D engine\r
+ bOut3x5(0x36, 0x84);\r
+\r
+ // Restore dynamic gating\r
+ bOut3cf(0x2a, bOld3cf2a);\r
+}\r
+\r
+static inline void xgi_disable_ge(xgi_info_t *info)\r
+{\r
+ int wait = 0;\r
+\r
+ // Reset both 3D and 2D engine\r
+ bOut3x5(0x36, 0x84);\r
+\r
+ wait = 10;\r
+ while (wait--)\r
+ {\r
+ bIn(0x36);\r
+ }\r
+ bOut3x5(0x36, 0x94);\r
+\r
+ wait = 10;\r
+ while (wait--)\r
+ {\r
+ bIn(0x36);\r
+ }\r
+ bOut3x5(0x36, 0x84);\r
+\r
+ wait = 10;\r
+ while (wait--)\r
+ {\r
+ bIn(0x36);\r
+ }\r
+\r
+ // Disable 2D engine only\r
+ bOut3x5(0x36, 0);\r
+}\r
+\r
+static inline void xgi_enable_dvi_interrupt(xgi_info_t *info)\r
+{\r
+ Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0\r
+ Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1\r
+ Out3cf(0x39, In3cf(0x39) | 0x02);\r
+}\r
+static inline void xgi_disable_dvi_interrupt(xgi_info_t *info)\r
+{\r
+ Out3cf(0x39,In3cf(0x39) & ~0x02);\r
+}\r
+\r
+static inline void xgi_enable_crt1_interrupt(xgi_info_t *info)\r
+{\r
+ Out3cf(0x3d,In3cf(0x3d) | 0x04);\r
+ Out3cf(0x3d,In3cf(0x3d) & ~0x04);\r
+ Out3cf(0x3d,In3cf(0x3d) | 0x08);\r
+}\r
+\r
+static inline void xgi_disable_crt1_interrupt(xgi_info_t *info)\r
+{\r
+ Out3cf(0x3d,In3cf(0x3d) & ~0x08);\r
+}\r
+\r
+#endif\r
+\r
--- /dev/null
+\r
+/****************************************************************************\r
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan. \r
+ * *\r
+ * All Rights Reserved. *\r
+ * *\r
+ * Permission is hereby granted, free of charge, to any person obtaining\r
+ * a copy of this software and associated documentation files (the \r
+ * "Software"), to deal in the Software without restriction, including \r
+ * without limitation on the rights to use, copy, modify, merge, \r
+ * publish, distribute, sublicense, and/or sell copies of the Software, \r
+ * and to permit persons to whom the Software is furnished to do so, \r
+ * subject to the following conditions: \r
+ * *\r
+ * The above copyright notice and this permission notice (including the \r
+ * next paragraph) shall be included in all copies or substantial \r
+ * portions of the Software. \r
+ * *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, \r
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR \r
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER \r
+ * DEALINGS IN THE SOFTWARE. \r
+ ***************************************************************************/\r
+\r
+#ifndef _XGI_TYPES_H_\r
+#define _XGI_TYPES_H_\r
+\r
+/****************************************************************************\r
+ * Typedefs *\r
+ ***************************************************************************/\r
+\r
+typedef unsigned char V8; /* "void": enumerated or multiple fields */\r
+typedef unsigned short V16; /* "void": enumerated or multiple fields */\r
+typedef unsigned char U8; /* 0 to 255 */\r
+typedef unsigned short U16; /* 0 to 65535 */\r
+typedef signed char S8; /* -128 to 127 */\r
+typedef signed short S16; /* -32768 to 32767 */\r
+typedef float F32; /* IEEE Single Precision (S1E8M23) */\r
+typedef double F64; /* IEEE Double Precision (S1E11M52) */\r
+typedef unsigned long BOOL;\r
+/*\r
+ * mainly for 64-bit linux, where long is 64 bits\r
+ * and win9x, where int is 16 bit.\r
+ */\r
+#if defined(vxworks)\r
+typedef unsigned int V32; /* "void": enumerated or multiple fields */\r
+typedef unsigned int U32; /* 0 to 4294967295 */\r
+typedef signed int S32; /* -2147483648 to 2147483647 */\r
+#else\r
+typedef unsigned long V32; /* "void": enumerated or multiple fields */\r
+typedef unsigned long U32; /* 0 to 4294967295 */\r
+typedef signed long S32; /* -2147483648 to 2147483647 */\r
+#endif\r
+\r
+#ifndef TRUE\r
+#define TRUE 1UL\r
+#endif\r
+\r
+#ifndef FALSE\r
+#define FALSE 0UL\r
+#endif\r
+\r
+#endif\r
+\r