FLUSH_2D
};
-xgi_cmdring_info_t s_cmdring;
+struct xgi_cmdring_info s_cmdring;
-static void addFlush2D(xgi_info_t * info);
-static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo);
-static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter);
+static void addFlush2D(struct xgi_info * info);
+static U32 getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo);
+static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter);
static void xgi_cmdlist_reset(void);
-int xgi_cmdlist_initialize(xgi_info_t * info, U32 size)
+int xgi_cmdlist_initialize(struct xgi_info * info, U32 size)
{
- //xgi_mem_req_t mem_req;
- xgi_mem_alloc_t mem_alloc;
+ //struct xgi_mem_req mem_req;
+ struct xgi_mem_alloc mem_alloc;
//mem_req.size = size;
return 1;
}
-void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo)
+void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo)
{
U32 beginPort;
/** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/
2 - fb
3 - logout
*/
-void xgi_state_change(xgi_info_t * info, xgi_state_info_t * pStateInfo)
+void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo)
{
#define STATE_CONSOLE 0
#define STATE_GRAPHIC 1
s_cmdring._cmdRingOffset = 0;
}
-void xgi_cmdlist_cleanup(xgi_info_t * info)
+void xgi_cmdlist_cleanup(struct xgi_info * info)
{
if (s_cmdring._cmdRingBuffer != 0) {
xgi_pcie_free(info, s_cmdring._cmdRingBusAddr);
}
}
-static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter)
+static void triggerHWCommandList(struct xgi_info * info, U32 triggerCounter)
{
static U32 s_triggerID = 1;
}
}
-static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo)
+static U32 getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo)
{
// Convert the batch type to begin port ID
switch (pCmdInfo->_firstBeginType) {
}
}
-static void addFlush2D(xgi_info_t * info)
+static void addFlush2D(struct xgi_info * info)
{
U32 *flushBatchVirtAddr;
U32 flushBatchHWAddr;
AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE
} CMD_SIZE;
-typedef struct xgi_cmdring_info_s {
+struct xgi_cmdring_info {
U32 _cmdRingSize;
U32 _cmdRingBuffer;
U32 _cmdRingBusAddr;
U32 _lastBatchStartAddr;
U32 _cmdRingOffset;
-} xgi_cmdring_info_t;
+};
-extern int xgi_cmdlist_initialize(xgi_info_t * info, U32 size);
+extern int xgi_cmdlist_initialize(struct xgi_info * info, U32 size);
-extern void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo);
+extern void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo);
-extern void xgi_state_change(xgi_info_t * info, xgi_state_info_t * pStateInfo);
+extern void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo);
-extern void xgi_cmdlist_cleanup(xgi_info_t * info);
+extern void xgi_cmdlist_cleanup(struct xgi_info * info);
#endif /* _XGI_CMDLIST_H_ */
static int xgi_num_devices = 0;
-xgi_info_t xgi_devices[XGI_MAX_DEVICES];
+struct xgi_info xgi_devices[XGI_MAX_DEVICES];
#if defined(XGI_PM_SUPPORT_APM)
static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 };
#endif
/* add one for the control device */
-xgi_info_t xgi_ctl_device;
+struct xgi_info xgi_ctl_device;
wait_queue_head_t xgi_ctl_waitqueue;
#ifdef CONFIG_PROC_FS
struct list_head xgi_mempid_list;
/* xgi_ functions.. do not take a state device parameter */
-static int xgi_post_vbios(xgi_ioctl_post_vbios_t * info);
+static int xgi_post_vbios(struct xgi_ioctl_post_vbios * info);
static void xgi_proc_create(void);
static void xgi_proc_remove_all(struct proc_dir_entry *);
static void xgi_proc_remove(void);
void xgi_kern_isr_bh(unsigned long);
irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *);
-static void xgi_lock_init(xgi_info_t * info);
+static void xgi_lock_init(struct xgi_info * info);
#if defined(XGI_PM_SUPPORT_ACPI)
int xgi_kern_acpi_standby(struct pci_dev *, u32);
#define XGI_CHECK_PCI_CONFIG(xgi) \
xgi_check_pci_config(xgi, __LINE__)
-static inline void xgi_check_pci_config(xgi_info_t * info, int line)
+static inline void xgi_check_pci_config(struct xgi_info * info, int line)
{
unsigned short cmd, flag = 0;
*/
int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table)
{
- xgi_info_t *info;
+ struct xgi_info *info;
if ((dev->vendor != PCI_VENDOR_ID_XGI)
|| (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) {
vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma));
if (XGI_VMA_PRIVATE(vma)) {
- xgi_pcie_block_t *block =
- (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma);
+ struct xgi_pcie_block *block =
+ (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma);
XGI_ATOMIC_INC(block->use_count);
}
}
vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma));
if (XGI_VMA_PRIVATE(vma)) {
- xgi_pcie_block_t *block =
- (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma);
+ struct xgi_pcie_block *block =
+ (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma);
XGI_ATOMIC_DEC(block->use_count);
/*
struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,
unsigned long address, int *type)
{
- xgi_pcie_block_t *block = (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma);
+ struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma);
struct page *page = NOPAGE_SIGBUS;
unsigned long offset = 0;
unsigned long page_addr = 0;
struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,
unsigned long address, int write_access)
{
- xgi_pcie_block_t *block = (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma);
+ struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma);
struct page *page = NOPAGE_SIGBUS;
unsigned long offset = 0;
unsigned long page_addr = 0;
.release = xgi_kern_release,
};
-static xgi_file_private_t *xgi_alloc_file_private(void)
+static struct xgi_file_private *xgi_alloc_file_private(void)
{
- xgi_file_private_t *fp;
+ struct xgi_file_private *fp;
- XGI_KMALLOC(fp, sizeof(xgi_file_private_t));
+ XGI_KMALLOC(fp, sizeof(struct xgi_file_private));
if (!fp)
return NULL;
- memset(fp, 0, sizeof(xgi_file_private_t));
+ memset(fp, 0, sizeof(struct xgi_file_private));
/* initialize this file's event queue */
init_waitqueue_head(&fp->wait_queue);
return fp;
}
-static void xgi_free_file_private(xgi_file_private_t * fp)
+static void xgi_free_file_private(struct xgi_file_private * fp)
{
if (fp == NULL)
return;
- XGI_KFREE(fp, sizeof(xgi_file_private_t));
+ XGI_KFREE(fp, sizeof(struct xgi_file_private));
}
int xgi_kern_open(struct inode *inode, struct file *filp)
{
- xgi_info_t *info = NULL;
+ struct xgi_info *info = NULL;
int dev_num;
int result = 0, status;
int xgi_kern_release(struct inode *inode, struct file *filp)
{
- xgi_info_t *info = XGI_INFO_FROM_FP(filp);
+ struct xgi_info *info = XGI_INFO_FROM_FP(filp);
XGI_CHECK_PCI_CONFIG(info);
int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma)
{
//struct inode *inode = INODE_FROM_FP(filp);
- xgi_info_t *info = XGI_INFO_FROM_FP(filp);
- xgi_pcie_block_t *block;
+ struct xgi_info *info = XGI_INFO_FROM_FP(filp);
+ struct xgi_pcie_block *block;
int pages = 0;
unsigned long prot;
unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait)
{
- xgi_file_private_t *fp;
- xgi_info_t *info;
+ struct xgi_file_private *fp;
+ struct xgi_info *info;
unsigned int mask = 0;
unsigned long eflags;
int xgi_kern_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- xgi_info_t *info;
- xgi_mem_alloc_t *alloc = NULL;
+ struct xgi_info *info;
+ struct xgi_mem_alloc *alloc = NULL;
int status = 0;
void *arg_copy;
} else
XGI_INFO("Jong-copy_from_user-OK! \n");
- alloc = (xgi_mem_alloc_t *) arg_copy;
+ alloc = (struct xgi_mem_alloc *) arg_copy;
XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg,
arg_size);
switch (_IOC_NR(cmd)) {
case XGI_ESC_DEVICE_INFO:
XGI_INFO("Jong-xgi_ioctl_get_device_info \n");
- xgi_get_device_info(info, (struct xgi_chip_info_s *)arg_copy);
+ xgi_get_device_info(info, (struct xgi_chip_info *)arg_copy);
break;
case XGI_ESC_POST_VBIOS:
XGI_INFO("Jong-xgi_ioctl_post_vbios \n");
break;
case XGI_ESC_FB_ALLOC:
XGI_INFO("Jong-xgi_ioctl_fb_alloc \n");
- xgi_fb_alloc(info, (struct xgi_mem_req_s *)arg_copy, alloc);
+ xgi_fb_alloc(info, (struct xgi_mem_req *)arg_copy, alloc);
break;
case XGI_ESC_FB_FREE:
XGI_INFO("Jong-xgi_ioctl_fb_free \n");
break;
case XGI_ESC_PCIE_ALLOC:
XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n");
- xgi_pcie_alloc(info, ((xgi_mem_req_t *) arg_copy)->size,
- ((xgi_mem_req_t *) arg_copy)->owner, alloc);
+ xgi_pcie_alloc(info, ((struct xgi_mem_req *) arg_copy)->size,
+ ((struct xgi_mem_req *) arg_copy)->owner, alloc);
break;
case XGI_ESC_PCIE_FREE:
XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n",
break;
case XGI_ESC_GET_SCREEN_INFO:
XGI_INFO("Jong-xgi_get_screen_info \n");
- xgi_get_screen_info(info, (struct xgi_screen_info_s *)arg_copy);
+ xgi_get_screen_info(info, (struct xgi_screen_info *)arg_copy);
break;
case XGI_ESC_PUT_SCREEN_INFO:
XGI_INFO("Jong-xgi_put_screen_info \n");
- xgi_put_screen_info(info, (struct xgi_screen_info_s *)arg_copy);
+ xgi_put_screen_info(info, (struct xgi_screen_info *)arg_copy);
break;
case XGI_ESC_MMIO_INFO:
XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n");
- xgi_get_mmio_info(info, (struct xgi_mmio_info_s *)arg_copy);
+ xgi_get_mmio_info(info, (struct xgi_mmio_info *)arg_copy);
break;
case XGI_ESC_GE_RESET:
XGI_INFO("Jong-xgi_ioctl_ge_reset \n");
break;
case XGI_ESC_SAREA_INFO:
XGI_INFO("Jong-xgi_ioctl_sarea_info \n");
- xgi_sarea_info(info, (struct xgi_sarea_info_s *)arg_copy);
+ xgi_sarea_info(info, (struct xgi_sarea_info *)arg_copy);
break;
case XGI_ESC_DUMP_REGISTER:
XGI_INFO("Jong-xgi_ioctl_dump_register \n");
case XGI_ESC_DEBUG_INFO:
XGI_INFO("Jong-xgi_ioctl_restore_registers \n");
xgi_restore_registers(info);
- //xgi_write_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy);
- //xgi_read_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy);
+ //xgi_write_pcie_mem(info, (struct xgi_mem_req *) arg_copy);
+ //xgi_read_pcie_mem(info, (struct xgi_mem_req *) arg_copy);
break;
case XGI_ESC_SUBMIT_CMDLIST:
XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n");
- xgi_submit_cmdlist(info, (xgi_cmd_info_t *) arg_copy);
+ xgi_submit_cmdlist(info, (struct xgi_cmd_info *) arg_copy);
break;
case XGI_ESC_TEST_RWINKERNEL:
XGI_INFO("Jong-xgi_test_rwinkernel \n");
break;
case XGI_ESC_STATE_CHANGE:
XGI_INFO("Jong-xgi_state_change \n");
- xgi_state_change(info, (xgi_state_info_t *) arg_copy);
+ xgi_state_change(info, (struct xgi_state_info *) arg_copy);
break;
case XGI_ESC_CPUID:
XGI_INFO("Jong-XGI_ESC_CPUID \n");
- xgi_get_cpu_id((struct cpu_info_s *)arg_copy);
+ xgi_get_cpu_id((struct cpu_info *)arg_copy);
break;
default:
XGI_INFO("Jong-xgi_ioctl_default \n");
*/
int xgi_kern_ctl_open(struct inode *inode, struct file *filp)
{
- xgi_info_t *info = &xgi_ctl_device;
+ struct xgi_info *info = &xgi_ctl_device;
int rc = 0;
int xgi_kern_ctl_close(struct inode *inode, struct file *filp)
{
- xgi_info_t *info = XGI_INFO_FROM_FP(filp);
+ struct xgi_info *info = XGI_INFO_FROM_FP(filp);
XGI_INFO("Jong-xgi_kern_ctl_close\n");
unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table * wait)
{
- //xgi_info_t *info = XGI_INFO_FROM_FP(filp);;
+ //struct xgi_info *info = XGI_INFO_FROM_FP(filp);;
unsigned int ret = 0;
if (!(filp->f_flags & O_NONBLOCK)) {
return 0;
}
-static struct pci_dev *xgi_get_pci_device(xgi_info_t * info)
+static struct pci_dev *xgi_get_pci_device(struct xgi_info * info)
{
struct pci_dev *dev;
char *type;
int len = 0;
- xgi_info_t *info;
- info = (xgi_info_t *) data;
+ struct xgi_info *info;
+ info = (struct xgi_info *) data;
dev = xgi_get_pci_device(info);
if (!dev)
struct proc_dir_entry *entry;
struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards;
- xgi_info_t *info;
- xgi_info_t *xgi_max_devices;
+ struct xgi_info *info;
+ struct xgi_info *xgi_max_devices;
/* world readable directory */
int flags = S_IFDIR | S_IRUGO | S_IXUGO;
*/
irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs)
{
- xgi_info_t *info = (xgi_info_t *) dev_id;
+ struct xgi_info *info = (struct xgi_info *) dev_id;
u32 need_to_run_bottom_half = 0;
//XGI_INFO("xgi_kern_isr \n");
void xgi_kern_isr_bh(unsigned long data)
{
- xgi_info_t *info = (xgi_info_t *) data;
+ struct xgi_info *info = (struct xgi_info *) data;
XGI_INFO("xgi_kern_isr_bh \n");
XGI_CHECK_PCI_CONFIG(info);
}
-static void xgi_lock_init(xgi_info_t * info)
+static void xgi_lock_init(struct xgi_info * info)
{
if (info == NULL)
return;
XGI_ATOMIC_SET(info->use_count, 0);
}
-static void xgi_dev_init(xgi_info_t * info)
+static void xgi_dev_init(struct xgi_info * info)
{
struct pci_dev *pdev = NULL;
struct xgi_dev *dev;
static int __init xgi_init_module(void)
{
- xgi_info_t *info = &xgi_devices[xgi_num_devices];
+ struct xgi_info *info = &xgi_devices[xgi_num_devices];
int i, result;
XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION);
/* init the xgi control device */
{
- xgi_info_t *info_ctl = &xgi_ctl_device;
+ struct xgi_info *info_ctl = &xgi_ctl_device;
xgi_lock_init(info_ctl);
}
/* need a fake device number for control device; just to flag it for msgs */
#define XGI_CONTROL_DEVICE_NUMBER 100
-typedef struct {
+struct xgi_aperture {
U32 base; // pcie base is different from fb base
U32 size;
U8 *vbase;
-} xgi_aperture_t;
+};
-typedef struct xgi_screen_info_s {
+struct xgi_screen_info {
U32 scrn_start;
U32 scrn_xres;
U32 scrn_yres;
U32 scrn_bpp;
U32 scrn_pitch;
-} xgi_screen_info_t;
+};
-typedef struct xgi_sarea_info_s {
+struct xgi_sarea_info {
U32 bus_addr;
U32 size;
-} xgi_sarea_info_t;
+};
-typedef struct xgi_info_s {
+struct xgi_info {
struct pci_dev *dev;
int flags;
int device_number;
U8 revision_id;
/* physical characteristics */
- xgi_aperture_t mmio;
- xgi_aperture_t fb;
- xgi_aperture_t pcie;
- xgi_screen_info_t scrn_info;
- xgi_sarea_info_t sarea_info;
+ struct xgi_aperture mmio;
+ struct xgi_aperture fb;
+ struct xgi_aperture pcie;
+ struct xgi_screen_info scrn_info;
+ struct xgi_sarea_info sarea_info;
/* look up table parameters */
U32 *lut_base;
struct semaphore info_sem;
struct semaphore fb_sem;
struct semaphore pcie_sem;
-} xgi_info_t;
+};
-typedef struct xgi_ioctl_post_vbios {
+struct xgi_ioctl_post_vbios {
U32 bus;
U32 slot;
-} xgi_ioctl_post_vbios_t;
+};
-typedef enum xgi_mem_location_s {
+enum xgi_mem_location {
NON_LOCAL = 0,
LOCAL = 1,
INVALID = 0x7fffffff
-} xgi_mem_location_t;
+};
enum PcieOwner {
PCIE_2D = 0,
PCIE_INVALID = 0x7fffffff
};
-typedef struct xgi_mem_req_s {
- xgi_mem_location_t location;
+struct xgi_mem_req {
+ enum xgi_mem_location location;
unsigned long size;
unsigned long is_front;
enum PcieOwner owner;
unsigned long pid;
-} xgi_mem_req_t;
+};
-typedef struct xgi_mem_alloc_s {
- xgi_mem_location_t location;
+struct xgi_mem_alloc {
+ enum xgi_mem_location location;
unsigned long size;
unsigned long bus_addr;
unsigned long hw_addr;
unsigned long pid;
-} xgi_mem_alloc_t;
+};
-typedef struct xgi_chip_info_s {
+struct xgi_chip_info {
U32 device_id;
char device_name[32];
U32 vendor_id;
U32 fb_size;
U32 sarea_bus_addr;
U32 sarea_size;
-} xgi_chip_info_t;
+};
-typedef struct xgi_opengl_cmd_s {
+struct xgi_opengl_cmd {
U32 cmd;
-} xgi_opengl_cmd_t;
+};
-typedef struct xgi_mmio_info_s {
- xgi_opengl_cmd_t cmd_head;
+struct xgi_mmio_info {
+ struct xgi_opengl_cmd cmd_head;
void *mmioBase;
int size;
-} xgi_mmio_info_t;
+};
typedef enum {
BTYPE_2D = 0,
BTYPE_NONE = 0x7fffffff
} BATCH_TYPE;
-typedef struct xgi_cmd_info_s {
+struct xgi_cmd_info {
BATCH_TYPE _firstBeginType;
U32 _firstBeginAddr;
U32 _firstSize;
U32 _curDebugID;
U32 _lastBeginAddr;
U32 _beginCount;
-} xgi_cmd_info_t;
+};
-typedef struct xgi_state_info_s {
+struct xgi_state_info {
U32 _fromState;
U32 _toState;
-} xgi_state_info_t;
+};
-typedef struct cpu_info_s {
+struct cpu_info {
U32 _eax;
U32 _ebx;
U32 _ecx;
U32 _edx;
-} cpu_info_t;
+};
-typedef struct xgi_mem_pid_s {
+struct xgi_mem_pid {
struct list_head list;
- xgi_mem_location_t location;
+ enum xgi_mem_location location;
unsigned long bus_addr;
unsigned long pid;
-} xgi_mem_pid_t;
+};
/*
* Ioctl definitions
#define XGI_ESC_CPUID (XGI_IOCTL_BASE + 20)
#define XGI_ESC_MEM_COLLECT (XGI_IOCTL_BASE + 21)
-#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, xgi_chip_info_t)
+#define XGI_IOCTL_DEVICE_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, struct xgi_chip_info)
#define XGI_IOCTL_POST_VBIOS _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS)
#define XGI_IOCTL_FB_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT)
-#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, xgi_mem_req_t)
+#define XGI_IOCTL_FB_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, struct xgi_mem_req)
#define XGI_IOCTL_FB_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long)
#define XGI_IOCTL_PCIE_INIT _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT)
-#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, xgi_mem_req_t)
+#define XGI_IOCTL_PCIE_ALLOC _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, struct xgi_mem_req)
#define XGI_IOCTL_PCIE_FREE _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long)
-#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, xgi_screen_info_t)
-#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, xgi_screen_info_t)
+#define XGI_IOCTL_PUT_SCREEN_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, struct xgi_screen_info)
+#define XGI_IOCTL_GET_SCREEN_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, struct xgi_screen_info)
#define XGI_IOCTL_GE_RESET _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET)
-#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, xgi_sarea_info_t)
+#define XGI_IOCTL_SAREA_INFO _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, struct xgi_sarea_info)
#define XGI_IOCTL_DUMP_REGISTER _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER)
#define XGI_IOCTL_DEBUG_INFO _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO)
-#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, xgi_mmio_info_t)
+#define XGI_IOCTL_MMIO_INFO _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, struct xgi_mmio_info)
-#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, xgi_cmd_info_t)
+#define XGI_IOCTL_SUBMIT_CMDLIST _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, struct xgi_cmd_info)
#define XGI_IOCTL_TEST_RWINKERNEL _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long)
-#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, xgi_state_info_t)
+#define XGI_IOCTL_STATE_CHANGE _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, struct xgi_state_info)
#define XGI_IOCTL_PCIE_CHECK _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK)
-#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, cpu_info_t)
+#define XGI_IOCTL_CPUID _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, struct cpu_info)
#define XGI_IOCTL_MAXNR 30
/*
(((offset) >= (info)->pcie.base) \
&& (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size))
-extern int xgi_fb_heap_init(xgi_info_t * info);
-extern void xgi_fb_heap_cleanup(xgi_info_t * info);
+extern int xgi_fb_heap_init(struct xgi_info * info);
+extern void xgi_fb_heap_cleanup(struct xgi_info * info);
-extern void xgi_fb_alloc(xgi_info_t * info, xgi_mem_req_t * req,
- xgi_mem_alloc_t * alloc);
-extern void xgi_fb_free(xgi_info_t * info, unsigned long offset);
-extern void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt);
+extern void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_req * req,
+ struct xgi_mem_alloc * alloc);
+extern void xgi_fb_free(struct xgi_info * info, unsigned long offset);
+extern void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt);
-extern int xgi_pcie_heap_init(xgi_info_t * info);
-extern void xgi_pcie_heap_cleanup(xgi_info_t * info);
+extern int xgi_pcie_heap_init(struct xgi_info * info);
+extern void xgi_pcie_heap_cleanup(struct xgi_info * info);
-extern void xgi_pcie_alloc(xgi_info_t * info, unsigned long size,
- enum PcieOwner owner, xgi_mem_alloc_t * alloc);
-extern void xgi_pcie_free(xgi_info_t * info, unsigned long offset);
+extern void xgi_pcie_alloc(struct xgi_info * info, unsigned long size,
+ enum PcieOwner owner, struct xgi_mem_alloc * alloc);
+extern void xgi_pcie_free(struct xgi_info * info, unsigned long offset);
extern void xgi_pcie_heap_check(void);
-extern struct xgi_pcie_block_s *xgi_find_pcie_block(xgi_info_t * info,
+extern struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info,
unsigned long address);
-extern void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address);
+extern void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address);
-extern void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req);
-extern void xgi_write_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req);
+extern void xgi_read_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req);
+extern void xgi_write_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req);
-extern void xgi_test_rwinkernel(xgi_info_t * info, unsigned long address);
+extern void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address);
#endif
#define XGI_FB_HEAP_START 0x1000000
-static xgi_mem_heap_t *xgi_fb_heap;
-static kmem_cache_t *xgi_fb_cache_block = NULL;
+static struct xgi_mem_heap *xgi_fb_heap;
+static struct kmem_cache *xgi_fb_cache_block = NULL;
extern struct list_head xgi_mempid_list;
-static xgi_mem_block_t *xgi_mem_new_node(void);
-static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, unsigned long size);
-static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset);
+static struct xgi_mem_block *xgi_mem_new_node(void);
+static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long size);
+static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset);
-void xgi_fb_alloc(xgi_info_t * info,
- xgi_mem_req_t * req, xgi_mem_alloc_t * alloc)
+void xgi_fb_alloc(struct xgi_info * info,
+ struct xgi_mem_req * req, struct xgi_mem_alloc * alloc)
{
- xgi_mem_block_t *block;
- xgi_mem_pid_t *mempid_block;
+ struct xgi_mem_block *block;
+ struct xgi_mem_pid *mempid_block;
if (req->is_front) {
alloc->location = LOCAL;
/* manage mempid */
mempid_block =
- kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL);
+ kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL);
mempid_block->location = LOCAL;
mempid_block->bus_addr = alloc->bus_addr;
mempid_block->pid = alloc->pid;
}
}
-void xgi_fb_free(xgi_info_t * info, unsigned long bus_addr)
+void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr)
{
- xgi_mem_block_t *block;
+ struct xgi_mem_block *block;
unsigned long offset = bus_addr - info->fb.base;
- xgi_mem_pid_t *mempid_block;
- xgi_mem_pid_t *mempid_freeblock = NULL;
+ struct xgi_mem_pid *mempid_block;
+ struct xgi_mem_pid *mempid_freeblock = NULL;
struct list_head *mempid_list;
if (offset < 0) {
mempid_list = xgi_mempid_list.next;
while (mempid_list != &xgi_mempid_list) {
mempid_block =
- list_entry(mempid_list, struct xgi_mem_pid_s, list);
+ list_entry(mempid_list, struct xgi_mem_pid, list);
if (mempid_block->location == LOCAL
&& mempid_block->bus_addr == bus_addr) {
mempid_freeblock = mempid_block;
}
}
-int xgi_fb_heap_init(xgi_info_t * info)
+int xgi_fb_heap_init(struct xgi_info * info)
{
- xgi_mem_block_t *block;
+ struct xgi_mem_block *block;
- xgi_fb_heap = kmalloc(sizeof(xgi_mem_heap_t), GFP_KERNEL);
+ xgi_fb_heap = kmalloc(sizeof(struct xgi_mem_heap), GFP_KERNEL);
if (!xgi_fb_heap) {
XGI_ERROR("xgi_fb_heap alloc failed\n");
return 0;
INIT_LIST_HEAD(&xgi_fb_heap->sort_list);
xgi_fb_cache_block =
- kmem_cache_create("xgi_fb_block", sizeof(xgi_mem_block_t), 0,
+ kmem_cache_create("xgi_fb_block", sizeof(struct xgi_mem_block), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (NULL == xgi_fb_cache_block) {
}
block =
- (xgi_mem_block_t *) kmem_cache_alloc(xgi_fb_cache_block,
+ (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block,
GFP_KERNEL);
if (!block) {
XGI_ERROR("kmem_cache_alloc failed\n");
return 0;
}
-void xgi_fb_heap_cleanup(xgi_info_t * info)
+void xgi_fb_heap_cleanup(struct xgi_info * info)
{
struct list_head *free_list, *temp;
- xgi_mem_block_t *block;
+ struct xgi_mem_block *block;
int i;
if (xgi_fb_heap) {
temp = free_list->next;
while (temp != free_list) {
block =
- list_entry(temp, struct xgi_mem_block_s,
+ list_entry(temp, struct xgi_mem_block,
list);
temp = temp->next;
}
}
-static xgi_mem_block_t *xgi_mem_new_node(void)
+static struct xgi_mem_block *xgi_mem_new_node(void)
{
- xgi_mem_block_t *block;
+ struct xgi_mem_block *block;
block =
- (xgi_mem_block_t *) kmem_cache_alloc(xgi_fb_cache_block,
+ (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block,
GFP_KERNEL);
if (!block) {
XGI_ERROR("kmem_cache_alloc failed\n");
}
#if 0
-static void xgi_mem_insert_node_after(xgi_mem_list_t * list,
- xgi_mem_block_t * current,
- xgi_mem_block_t * block);
-static void xgi_mem_insert_node_before(xgi_mem_list_t * list,
- xgi_mem_block_t * current,
- xgi_mem_block_t * block);
-static void xgi_mem_insert_node_head(xgi_mem_list_t * list,
- xgi_mem_block_t * block);
-static void xgi_mem_insert_node_tail(xgi_mem_list_t * list,
- xgi_mem_block_t * block);
-static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block);
+static void xgi_mem_insert_node_after(struct xgi_mem_list * list,
+ struct xgi_mem_block * current,
+ struct xgi_mem_block * block);
+static void xgi_mem_insert_node_before(struct xgi_mem_list * list,
+ struct xgi_mem_block * current,
+ struct xgi_mem_block * block);
+static void xgi_mem_insert_node_head(struct xgi_mem_list * list,
+ struct xgi_mem_block * block);
+static void xgi_mem_insert_node_tail(struct xgi_mem_list * list,
+ struct xgi_mem_block * block);
+static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block);
/*
* insert node:block after node:current
*/
-static void xgi_mem_insert_node_after(xgi_mem_list_t * list,
- xgi_mem_block_t * current,
- xgi_mem_block_t * block)
+static void xgi_mem_insert_node_after(struct xgi_mem_list * list,
+ struct xgi_mem_block * current,
+ struct xgi_mem_block * block)
{
block->prev = current;
block->next = current->next;
/*
* insert node:block before node:current
*/
-static void xgi_mem_insert_node_before(xgi_mem_list_t * list,
- xgi_mem_block_t * current,
- xgi_mem_block_t * block)
+static void xgi_mem_insert_node_before(struct xgi_mem_list * list,
+ struct xgi_mem_block * current,
+ struct xgi_mem_block * block)
{
block->prev = current->prev;
block->next = current;
block->prev->next = block;
}
}
-void xgi_mem_insert_node_head(xgi_mem_list_t * list, xgi_mem_block_t * block)
+void xgi_mem_insert_node_head(struct xgi_mem_list * list, struct xgi_mem_block * block)
{
block->next = list->head;
block->prev = NULL;
list->head = block;
}
-static void xgi_mem_insert_node_tail(xgi_mem_list_t * list,
- xgi_mem_block_t * block)
+static void xgi_mem_insert_node_tail(struct xgi_mem_list * list,
+ struct xgi_mem_block * block)
{
block->next = NULL;
block->prev = list->tail;
list->tail = block;
}
-static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block)
+static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block)
{
if (block == list->head) {
list->head = block->next;
block->next = block->prev = NULL;
}
#endif
-static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info,
+static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info,
unsigned long originalSize)
{
struct list_head *free_list;
- xgi_mem_block_t *block, *free_block, *used_block;
+ struct xgi_mem_block *block, *free_block, *used_block;
unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;
while (free_list != &xgi_fb_heap->free_list) {
XGI_INFO("free_list: 0x%px \n", free_list);
- block = list_entry(free_list, struct xgi_mem_block_s, list);
+ block = list_entry(free_list, struct xgi_mem_block, list);
if (size <= block->size) {
break;
}
return (used_block);
}
-static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset)
+static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset)
{
struct list_head *free_list, *used_list;
- xgi_mem_block_t *used_block = NULL, *block = NULL;
- xgi_mem_block_t *prev, *next;
+ struct xgi_mem_block *used_block = NULL, *block = NULL;
+ struct xgi_mem_block *prev, *next;
unsigned long upper;
unsigned long lower;
used_list = xgi_fb_heap->used_list.next;
while (used_list != &xgi_fb_heap->used_list) {
- block = list_entry(used_list, struct xgi_mem_block_s, list);
+ block = list_entry(used_list, struct xgi_mem_block, list);
if (block->offset == offset) {
break;
}
free_list = xgi_fb_heap->free_list.next;
while (free_list != &xgi_fb_heap->free_list) {
- block = list_entry(free_list, struct xgi_mem_block_s, list);
+ block = list_entry(free_list, struct xgi_mem_block, list);
if (block->offset == upper) {
next = block;
#ifndef _XGI_FB_H_
#define _XGI_FB_H_
-typedef struct xgi_mem_block_s {
+struct xgi_mem_block {
struct list_head list;
unsigned long offset;
unsigned long size;
atomic_t use_count;
-} xgi_mem_block_t;
+};
-typedef struct xgi_mem_heap_s {
+struct xgi_mem_heap {
struct list_head free_list;
struct list_head used_list;
struct list_head sort_list;
unsigned long max_freesize;
spinlock_t lock;
-} xgi_mem_heap_t;
-
-#if 0
-typedef struct xgi_mem_block_s {
- struct xgi_mem_block_s *next;
- struct xgi_mem_block_s *prev;
- unsigned long offset;
- unsigned long size;
- atomic_t use_count;
-} xgi_mem_block_t;
-
-typedef struct xgi_mem_list_s {
- xgi_mem_block_t *head;
- xgi_mem_block_t *tail;
-} xgi_mem_list_t;
-
-typedef struct xgi_mem_heap_s {
- xgi_mem_list_t *free_list;
- xgi_mem_list_t *used_list;
- xgi_mem_list_t *sort_list;
- unsigned long max_freesize;
- spinlock_t lock;
-} xgi_mem_heap_t;
-#endif
+};
#endif
free_pages(ptr, order); \
}
-typedef struct xgi_pte_s {
+struct xgi_pte {
unsigned long phys_addr;
unsigned long virt_addr;
-} xgi_pte_t;
+};
/*
* AMD Athlon processors expose a subtle bug in the Linux
* 2.4.20 is the first kernel to address it properly. The
* page_attr API provides the means to solve the problem.
*/
-static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t * page_ptr)
+static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(struct xgi_pte * page_ptr)
{
struct page *page = virt_to_page(__va(page_ptr->phys_addr));
change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
}
-static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr)
+static inline void XGI_SET_PAGE_ATTRIB_CACHED(struct xgi_pte * page_ptr)
{
struct page *page = virt_to_page(__va(page_ptr->phys_addr));
change_page_attr(page, 1, PAGE_KERNEL);
#define XGILockPage(page) SetPageLocked(page)
#define XGIUnlockPage(page) ClearPageLocked(page)
-/*
- * hide a pointer to struct xgi_info_t in a file-private info
- */
-
-typedef struct {
- void *info;
+struct xgi_file_private {
+ struct xgi_info *info;
U32 num_events;
spinlock_t fp_lock;
wait_queue_head_t wait_queue;
-} xgi_file_private_t;
+};
#define FILE_PRIVATE(filp) ((filp)->private_data)
-#define XGI_GET_FP(filp) ((xgi_file_private_t *) FILE_PRIVATE(filp))
+#define XGI_GET_FP(filp) ((struct xgi_file_private *) FILE_PRIVATE(filp))
/* for the card devices */
#define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info)
#include "xgi_regs.h"
#include "xgi_pcie.h"
-void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req)
+void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req)
{
req->device_id = info->device_id;
req->device_name[0] = 'x';
req->sarea_size = info->sarea_info.size;
}
-void xgi_get_mmio_info(xgi_info_t * info, xgi_mmio_info_t * req)
+void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req)
{
req->mmioBase = (void *)info->mmio.base;
req->size = info->mmio.size;
}
-void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req)
+void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req)
{
info->scrn_info.scrn_start = req->scrn_start;
info->scrn_info.scrn_xres = req->scrn_xres;
info->scrn_info.scrn_bpp, info->scrn_info.scrn_pitch);
}
-void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req)
+void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * req)
{
req->scrn_start = info->scrn_info.scrn_start;
req->scrn_xres = info->scrn_info.scrn_xres;
req->scrn_yres, req->scrn_bpp, req->scrn_pitch);
}
-void xgi_ge_reset(xgi_info_t * info)
+void xgi_ge_reset(struct xgi_info * info)
{
xgi_disable_ge(info);
xgi_enable_ge(info);
}
-void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req)
+void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req)
{
info->sarea_info.bus_addr = req->bus_addr;
info->sarea_info.size = req->size;
static U32 s_invalid_begin = 0;
-BOOL xgi_ge_irq_handler(xgi_info_t * info)
+BOOL xgi_ge_irq_handler(struct xgi_info * info)
{
volatile U8 *mmio_vbase = info->mmio.vbase;
volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800);
return FALSE;
}
-BOOL xgi_crt_irq_handler(xgi_info_t * info)
+BOOL xgi_crt_irq_handler(struct xgi_info * info)
{
BOOL ret = FALSE;
U8 save_3ce = bReadReg(0x3ce);
return (ret);
}
-BOOL xgi_dvi_irq_handler(xgi_info_t * info)
+BOOL xgi_dvi_irq_handler(struct xgi_info * info)
{
BOOL ret = FALSE;
U8 save_3ce = bReadReg(0x3ce);
return (ret);
}
-void xgi_dump_register(xgi_info_t * info)
+void xgi_dump_register(struct xgi_info * info)
{
int i, j;
unsigned char temp;
}
}
-void xgi_restore_registers(xgi_info_t * info)
+void xgi_restore_registers(struct xgi_info * info)
{
bOut3x5(0x13, 0);
bOut3x5(0x8b, 2);
}
-void xgi_waitfor_pci_idle(xgi_info_t * info)
+void xgi_waitfor_pci_idle(struct xgi_info * info)
{
#define WHOLD_GE_STATUS 0x2800
#define IDLE_MASK ~0x90200000
}
}
-int xgi_get_cpu_id(struct cpu_info_s *arg)
+int xgi_get_cpu_id(struct cpu_info *arg)
{
int op = arg->_eax;
__asm__("cpuid":"=a"(arg->_eax),
/*memory collect function*/
extern struct list_head xgi_mempid_list;
-void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt)
+void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt)
{
- xgi_mem_pid_t *mempid_block;
+ struct xgi_mem_pid *mempid_block;
struct list_head *mempid_list;
struct task_struct *p, *find;
unsigned int cnt = 0;
while (mempid_list != &xgi_mempid_list) {
mempid_block =
- list_entry(mempid_list, struct xgi_mem_pid_s, list);
+ list_entry(mempid_list, struct xgi_mem_pid, list);
mempid_list = mempid_list->next;
find = NULL;
#ifndef _XGI_MISC_H_
#define _XGI_MISC_H_
-extern void xgi_dump_register(xgi_info_t * info);
-extern void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req);
-extern void xgi_get_mmio_info(xgi_info_t * info, xgi_mmio_info_t * req);
-extern void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req);
-extern void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req);
-extern void xgi_ge_reset(xgi_info_t * info);
-extern void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req);
-extern int xgi_get_cpu_id(struct cpu_info_s *arg);
+extern void xgi_dump_register(struct xgi_info * info);
+extern void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req);
+extern void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req);
+extern void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * req);
+extern void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req);
+extern void xgi_ge_reset(struct xgi_info * info);
+extern void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req);
+extern int xgi_get_cpu_id(struct cpu_info *arg);
-extern void xgi_restore_registers(xgi_info_t * info);
-extern BOOL xgi_ge_irq_handler(xgi_info_t * info);
-extern BOOL xgi_crt_irq_handler(xgi_info_t * info);
-extern BOOL xgi_dvi_irq_handler(xgi_info_t * info);
-extern void xgi_waitfor_pci_idle(xgi_info_t * info);
+extern void xgi_restore_registers(struct xgi_info * info);
+extern BOOL xgi_ge_irq_handler(struct xgi_info * info);
+extern BOOL xgi_crt_irq_handler(struct xgi_info * info);
+extern BOOL xgi_dvi_irq_handler(struct xgi_info * info);
+extern void xgi_waitfor_pci_idle(struct xgi_info * info);
#endif
#include "xgi_pcie.h"
#include "xgi_misc.h"
-static xgi_pcie_heap_t *xgi_pcie_heap = NULL;
-static kmem_cache_t *xgi_pcie_cache_block = NULL;
-static xgi_pcie_block_t *xgi_pcie_vertex_block = NULL;
-static xgi_pcie_block_t *xgi_pcie_cmdlist_block = NULL;
-static xgi_pcie_block_t *xgi_pcie_scratchpad_block = NULL;
+static struct xgi_pcie_heap *xgi_pcie_heap = NULL;
+static struct kmem_cache *xgi_pcie_cache_block = NULL;
+static struct xgi_pcie_block *xgi_pcie_vertex_block = NULL;
+static struct xgi_pcie_block *xgi_pcie_cmdlist_block = NULL;
+static struct xgi_pcie_block *xgi_pcie_scratchpad_block = NULL;
extern struct list_head xgi_mempid_list;
static unsigned long xgi_pcie_lut_alloc(unsigned long page_order)
free_pages(page_addr, page_order);
}
-static int xgi_pcie_lut_init(xgi_info_t * info)
+static int xgi_pcie_lut_init(struct xgi_info * info)
{
unsigned char *page_addr = NULL;
unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder;
return 0;
}
-static void xgi_pcie_lut_cleanup(xgi_info_t * info)
+static void xgi_pcie_lut_cleanup(struct xgi_info * info)
{
if (info->lut_base) {
XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n",
}
}
-static xgi_pcie_block_t *xgi_pcie_new_node(void)
+static struct xgi_pcie_block *xgi_pcie_new_node(void)
{
- xgi_pcie_block_t *block =
- (xgi_pcie_block_t *) kmem_cache_alloc(xgi_pcie_cache_block,
+ struct xgi_pcie_block *block =
+ (struct xgi_pcie_block *) kmem_cache_alloc(xgi_pcie_cache_block,
GFP_KERNEL);
if (block == NULL) {
return NULL;
return block;
}
-static void xgi_pcie_block_stuff_free(xgi_pcie_block_t * block)
+static void xgi_pcie_block_stuff_free(struct xgi_pcie_block * block)
{
struct page *page;
- xgi_page_block_t *page_block = block->page_block;
- xgi_page_block_t *free_block;
+ struct xgi_page_block *page_block = block->page_block;
+ struct xgi_page_block *free_block;
unsigned long page_count = 0;
int i;
}
}
-int xgi_pcie_heap_init(xgi_info_t * info)
+int xgi_pcie_heap_init(struct xgi_info * info)
{
- xgi_pcie_block_t *block;
+ struct xgi_pcie_block *block;
if (!xgi_pcie_lut_init(info)) {
XGI_ERROR("xgi_pcie_lut_init failed\n");
}
xgi_pcie_heap =
- (xgi_pcie_heap_t *) kmalloc(sizeof(xgi_pcie_heap_t), GFP_KERNEL);
+ (struct xgi_pcie_heap *) kmalloc(sizeof(struct xgi_pcie_heap), GFP_KERNEL);
if (!xgi_pcie_heap) {
XGI_ERROR("xgi_pcie_heap alloc failed\n");
goto fail1;
xgi_pcie_heap->max_freesize = info->pcie.size;
xgi_pcie_cache_block =
- kmem_cache_create("xgi_pcie_block", sizeof(xgi_pcie_block_t), 0,
+ kmem_cache_create("xgi_pcie_block", sizeof(struct xgi_pcie_block), 0,
SLAB_HWCACHE_ALIGN, NULL, NULL);
if (NULL == xgi_pcie_cache_block) {
goto fail2;
}
- block = (xgi_pcie_block_t *) xgi_pcie_new_node();
+ block = (struct xgi_pcie_block *) xgi_pcie_new_node();
if (!block) {
XGI_ERROR("xgi_pcie_new_node failed\n");
goto fail3;
void xgi_pcie_heap_check(void)
{
struct list_head *useList, *temp;
- xgi_pcie_block_t *block;
+ struct xgi_pcie_block *block;
unsigned int ownerIndex;
#ifdef XGI_DEBUG
char *ownerStr[6] =
temp = useList->next;
XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize);
while (temp != useList) {
- block = list_entry(temp, struct xgi_pcie_block_s, list);
+ block = list_entry(temp, struct xgi_pcie_block, list);
if (block->owner == PCIE_2D)
ownerIndex = 0;
else if (block->owner > PCIE_3D_TEXTURE
}
}
-void xgi_pcie_heap_cleanup(xgi_info_t * info)
+void xgi_pcie_heap_cleanup(struct xgi_info * info)
{
struct list_head *free_list, *temp;
- xgi_pcie_block_t *block;
+ struct xgi_pcie_block *block;
int j;
xgi_pcie_lut_cleanup(info);
while (temp != free_list) {
block =
- list_entry(temp, struct xgi_pcie_block_s,
+ list_entry(temp, struct xgi_pcie_block,
list);
XGI_INFO
("No. %d block->offset: 0x%lx block->size: 0x%lx \n",
}
}
-static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info,
+static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info,
unsigned long originalSize,
enum PcieOwner owner)
{
struct list_head *free_list;
- xgi_pcie_block_t *block, *used_block, *free_block;
- xgi_page_block_t *page_block, *prev_page_block;
+ struct xgi_pcie_block *block, *used_block, *free_block;
+ struct xgi_page_block *page_block, *prev_page_block;
struct page *page;
unsigned long page_order = 0, count = 0, index = 0;
unsigned long page_addr = 0;
free_list = xgi_pcie_heap->free_list.next;
while (free_list != &xgi_pcie_heap->free_list) {
//XGI_INFO("free_list: 0x%px \n", free_list);
- block = list_entry(free_list, struct xgi_pcie_block_s, list);
+ block = list_entry(free_list, struct xgi_pcie_block, list);
if (size <= block->size) {
break;
}
used_block->page_order);
used_block->page_block = NULL;
- //used_block->page_block = (xgi_pages_block_t *)kmalloc(sizeof(xgi_pages_block_t), GFP_KERNEL);
- //if (!used_block->page_block) return NULL;
+ //used_block->page_block = (struct xgi_pages_block *)kmalloc(sizeof(struct xgi_pages_block), GFP_KERNEL);
+ //if (!used_block->page_block) return NULL;_t
//used_block->page_block->next = NULL;
used_block->page_table =
- (xgi_pte_t *) kmalloc(sizeof(xgi_pte_t) * used_block->page_count,
+ (struct xgi_pte *) kmalloc(sizeof(struct xgi_pte) * used_block->page_count,
GFP_KERNEL);
if (used_block->page_table == NULL) {
goto fail;
if (page_block == NULL) {
page_block =
- (xgi_page_block_t *)
- kmalloc(sizeof(xgi_page_block_t), GFP_KERNEL);
+ (struct xgi_page_block *)
+ kmalloc(sizeof(struct xgi_page_block), GFP_KERNEL);
if (!page_block) {
XGI_ERROR
("Can't get memory for page_block! \n");
return NULL;
}
-static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t * info,
+static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info,
unsigned long offset)
{
struct list_head *free_list, *used_list;
- xgi_pcie_block_t *used_block, *block = NULL;
- xgi_pcie_block_t *prev, *next;
+ struct xgi_pcie_block *used_block, *block = NULL;
+ struct xgi_pcie_block *prev, *next;
unsigned long upper, lower;
used_list = xgi_pcie_heap->used_list.next;
while (used_list != &xgi_pcie_heap->used_list) {
- block = list_entry(used_list, struct xgi_pcie_block_s, list);
+ block = list_entry(used_list, struct xgi_pcie_block, list);
if (block->offset == offset) {
break;
}
free_list = xgi_pcie_heap->free_list.next;
while (free_list != &xgi_pcie_heap->free_list) {
- block = list_entry(free_list, struct xgi_pcie_block_s, list);
+ block = list_entry(free_list, struct xgi_pcie_block, list);
if (block->offset == upper) {
next = block;
} else if ((block->offset + block->size) == lower) {
return (used_block);
}
-void xgi_pcie_alloc(xgi_info_t * info, unsigned long size,
- enum PcieOwner owner, xgi_mem_alloc_t * alloc)
+void xgi_pcie_alloc(struct xgi_info * info, unsigned long size,
+ enum PcieOwner owner, struct xgi_mem_alloc * alloc)
{
- xgi_pcie_block_t *block;
- xgi_mem_pid_t *mempid_block;
+ struct xgi_pcie_block *block;
+ struct xgi_mem_pid *mempid_block;
xgi_down(info->pcie_sem);
block = xgi_pcie_mem_alloc(info, size, owner);
*/
if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE) {
mempid_block =
- kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL);
+ kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL);
if (!mempid_block)
XGI_ERROR("mempid_block alloc failed\n");
mempid_block->location = NON_LOCAL;
}
}
-void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr)
+void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr)
{
- xgi_pcie_block_t *block;
+ struct xgi_pcie_block *block;
unsigned long offset = bus_addr - info->pcie.base;
- xgi_mem_pid_t *mempid_block;
- xgi_mem_pid_t *mempid_freeblock = NULL;
+ struct xgi_mem_pid *mempid_block;
+ struct xgi_mem_pid *mempid_freeblock = NULL;
struct list_head *mempid_list;
char isvertex = 0;
int processcnt;
mempid_list = xgi_mempid_list.next;
while (mempid_list != &xgi_mempid_list) {
mempid_block =
- list_entry(mempid_list, struct xgi_mem_pid_s, list);
+ list_entry(mempid_list, struct xgi_mem_pid, list);
if (mempid_block->location == NON_LOCAL
&& mempid_block->bus_addr == 0xFFFFFFFF) {
++processcnt;
mempid_list = xgi_mempid_list.next;
while (mempid_list != &xgi_mempid_list) {
mempid_block =
- list_entry(mempid_list, struct xgi_mem_pid_s, list);
+ list_entry(mempid_list, struct xgi_mem_pid, list);
if (mempid_block->location == NON_LOCAL
&& ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF)
|| (!isvertex && mempid_block->bus_addr == bus_addr))) {
* given a bus address, fid the pcie mem block
* uses the bus address as the key.
*/
-struct xgi_pcie_block_s *xgi_find_pcie_block(xgi_info_t * info,
- unsigned long address)
+struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info,
+ unsigned long address)
{
struct list_head *used_list;
- xgi_pcie_block_t *block;
+ struct xgi_pcie_block *block;
int i;
used_list = xgi_pcie_heap->used_list.next;
while (used_list != &xgi_pcie_heap->used_list) {
- block = list_entry(used_list, struct xgi_pcie_block_s, list);
+ block = list_entry(used_list, struct xgi_pcie_block, list);
if (block->bus_addr == address) {
return block;
* Returns CPU virtual address. Assumes the CPU VAddr is continuous in not
* the same block
*/
-void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address)
+void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address)
{
struct list_head *used_list = xgi_pcie_heap->used_list.next;
const unsigned long offset_in_page = address & (PAGE_SIZE - 1);
used_list, address, PAGE_SIZE - 1, offset_in_page);
while (used_list != &xgi_pcie_heap->used_list) {
- xgi_pcie_block_t *block =
- list_entry(used_list, struct xgi_pcie_block_s, list);
+ struct xgi_pcie_block *block =
+ list_entry(used_list, struct xgi_pcie_block, list);
XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n",
block, block->hw_addr, block->size);
return NULL;
}
-void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req)
+void xgi_read_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req)
{
}
-void xgi_write_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req)
+void xgi_write_pcie_mem(struct xgi_info * info, struct xgi_mem_req * req)
{
}
/*
address -- GE hw address
*/
-void xgi_test_rwinkernel(xgi_info_t * info, unsigned long address)
+void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address)
{
unsigned long *virtaddr = 0;
if (address == 0) {
#define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */
#endif
-typedef struct xgi_page_block_s {
- struct xgi_page_block_s *next;
+struct xgi_page_block {
+ struct xgi_page_block *next;
unsigned long phys_addr;
unsigned long virt_addr;
unsigned long page_count;
unsigned long page_order;
-} xgi_page_block_t;
+};
-typedef struct xgi_pcie_block_s {
+struct xgi_pcie_block {
struct list_head list;
unsigned long offset; /* block's offset in pcie memory, begin from 0 */
unsigned long size; /* The block size. */
unsigned long page_count;
unsigned long page_order;
- xgi_page_block_t *page_block;
- xgi_pte_t *page_table; /* list of physical pages allocated */
+ struct xgi_page_block *page_block;
+ struct xgi_pte *page_table; /* list of physical pages allocated */
atomic_t use_count;
enum PcieOwner owner;
unsigned long processID;
-} xgi_pcie_block_t;
+};
-typedef struct xgi_pcie_heap_s {
+struct xgi_pcie_heap {
struct list_head free_list;
struct list_head used_list;
struct list_head sort_list;
unsigned long max_freesize;
-} xgi_pcie_heap_t;
+};
#endif
#endif
/* Hardware access functions */
-static inline void OUT3C5B(xgi_info_t * info, u8 index, u8 data)
+static inline void OUT3C5B(struct xgi_info * info, u8 index, u8 data)
{
OUTB(0x3C4, index);
OUTB(0x3C5, data);
}
-static inline void OUT3X5B(xgi_info_t * info, u8 index, u8 data)
+static inline void OUT3X5B(struct xgi_info * info, u8 index, u8 data)
{
OUTB(0x3D4, index);
OUTB(0x3D5, data);
}
-static inline void OUT3CFB(xgi_info_t * info, u8 index, u8 data)
+static inline void OUT3CFB(struct xgi_info * info, u8 index, u8 data)
{
OUTB(0x3CE, index);
OUTB(0x3CF, data);
}
-static inline u8 IN3C5B(xgi_info_t * info, u8 index)
+static inline u8 IN3C5B(struct xgi_info * info, u8 index)
{
volatile u8 data = 0;
OUTB(0x3C4, index);
return data;
}
-static inline u8 IN3X5B(xgi_info_t * info, u8 index)
+static inline u8 IN3X5B(struct xgi_info * info, u8 index)
{
volatile u8 data = 0;
OUTB(0x3D4, index);
return data;
}
-static inline u8 IN3CFB(xgi_info_t * info, u8 index)
+static inline u8 IN3CFB(struct xgi_info * info, u8 index)
{
volatile u8 data = 0;
OUTB(0x3CE, index);
return data;
}
-static inline void OUT3C5W(xgi_info_t * info, u8 index, u16 data)
+static inline void OUT3C5W(struct xgi_info * info, u8 index, u16 data)
{
OUTB(0x3C4, index);
OUTB(0x3C5, data);
}
-static inline void OUT3X5W(xgi_info_t * info, u8 index, u16 data)
+static inline void OUT3X5W(struct xgi_info * info, u8 index, u16 data)
{
OUTB(0x3D4, index);
OUTB(0x3D5, data);
}
-static inline void OUT3CFW(xgi_info_t * info, u8 index, u8 data)
+static inline void OUT3CFW(struct xgi_info * info, u8 index, u8 data)
{
OUTB(0x3CE, index);
OUTB(0x3CF, data);
}
-static inline u8 IN3C5W(xgi_info_t * info, u8 index)
+static inline u8 IN3C5W(struct xgi_info * info, u8 index)
{
volatile u8 data = 0;
OUTB(0x3C4, index);
return data;
}
-static inline u8 IN3X5W(xgi_info_t * info, u8 index)
+static inline u8 IN3X5W(struct xgi_info * info, u8 index)
{
volatile u8 data = 0;
OUTB(0x3D4, index);
return data;
}
-static inline u8 IN3CFW(xgi_info_t * info, u8 index)
+static inline u8 IN3CFW(struct xgi_info * info, u8 index)
{
volatile u8 data = 0;
OUTB(0x3CE, index);
return data;
}
-static inline u8 readAttr(xgi_info_t * info, u8 index)
+static inline u8 readAttr(struct xgi_info * info, u8 index)
{
INB(0x3DA); /* flip-flop to index */
OUTB(0x3C0, index);
return INB(0x3C1);
}
-static inline void writeAttr(xgi_info_t * info, u8 index, u8 value)
+static inline void writeAttr(struct xgi_info * info, u8 index, u8 value)
{
INB(0x3DA); /* flip-flop to index */
OUTB(0x3C0, index);
/*
* Graphic engine register (2d/3d) acessing interface
*/
-static inline void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data)
+static inline void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data)
{
/* Jong 05/25/2006 */
XGI_INFO("Jong-WriteRegDWord()-Begin \n");
XGI_INFO("Jong-WriteRegDWord()-End \n");
}
-static inline void WriteRegWord(xgi_info_t * info, u32 addr, u16 data)
+static inline void WriteRegWord(struct xgi_info * info, u32 addr, u16 data)
{
*(volatile u16 *)(info->mmio.vbase + addr) = (data);
}
-static inline void WriteRegByte(xgi_info_t * info, u32 addr, u8 data)
+static inline void WriteRegByte(struct xgi_info * info, u32 addr, u8 data)
{
*(volatile u8 *)(info->mmio.vbase + addr) = (data);
}
-static inline u32 ReadRegDWord(xgi_info_t * info, u32 addr)
+static inline u32 ReadRegDWord(struct xgi_info * info, u32 addr)
{
volatile u32 data;
data = *(volatile u32 *)(info->mmio.vbase + addr);
return data;
}
-static inline u16 ReadRegWord(xgi_info_t * info, u32 addr)
+static inline u16 ReadRegWord(struct xgi_info * info, u32 addr)
{
volatile u16 data;
data = *(volatile u16 *)(info->mmio.vbase + addr);
return data;
}
-static inline u8 ReadRegByte(xgi_info_t * info, u32 addr)
+static inline u8 ReadRegByte(struct xgi_info * info, u32 addr)
{
volatile u8 data;
data = *(volatile u8 *)(info->mmio.vbase + addr);
}
#if 0
-extern void OUT3C5B(xgi_info_t * info, u8 index, u8 data);
-extern void OUT3X5B(xgi_info_t * info, u8 index, u8 data);
-extern void OUT3CFB(xgi_info_t * info, u8 index, u8 data);
-extern u8 IN3C5B(xgi_info_t * info, u8 index);
-extern u8 IN3X5B(xgi_info_t * info, u8 index);
-extern u8 IN3CFB(xgi_info_t * info, u8 index);
-extern void OUT3C5W(xgi_info_t * info, u8 index, u8 data);
-extern void OUT3X5W(xgi_info_t * info, u8 index, u8 data);
-extern void OUT3CFW(xgi_info_t * info, u8 index, u8 data);
-extern u8 IN3C5W(xgi_info_t * info, u8 index);
-extern u8 IN3X5W(xgi_info_t * info, u8 index);
-extern u8 IN3CFW(xgi_info_t * info, u8 index);
-
-extern void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data);
-extern void WriteRegWord(xgi_info_t * info, u32 addr, u16 data);
-extern void WriteRegByte(xgi_info_t * info, u32 addr, u8 data);
-extern u32 ReadRegDWord(xgi_info_t * info, u32 addr);
-extern u16 ReadRegWord(xgi_info_t * info, u32 addr);
-extern u8 ReadRegByte(xgi_info_t * info, u32 addr);
+extern void OUT3C5B(struct xgi_info * info, u8 index, u8 data);
+extern void OUT3X5B(struct xgi_info * info, u8 index, u8 data);
+extern void OUT3CFB(struct xgi_info * info, u8 index, u8 data);
+extern u8 IN3C5B(struct xgi_info * info, u8 index);
+extern u8 IN3X5B(struct xgi_info * info, u8 index);
+extern u8 IN3CFB(struct xgi_info * info, u8 index);
+extern void OUT3C5W(struct xgi_info * info, u8 index, u8 data);
+extern void OUT3X5W(struct xgi_info * info, u8 index, u8 data);
+extern void OUT3CFW(struct xgi_info * info, u8 index, u8 data);
+extern u8 IN3C5W(struct xgi_info * info, u8 index);
+extern u8 IN3X5W(struct xgi_info * info, u8 index);
+extern u8 IN3CFW(struct xgi_info * info, u8 index);
+
+extern void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data);
+extern void WriteRegWord(struct xgi_info * info, u32 addr, u16 data);
+extern void WriteRegByte(struct xgi_info * info, u32 addr, u8 data);
+extern u32 ReadRegDWord(struct xgi_info * info, u32 addr);
+extern u16 ReadRegWord(struct xgi_info * info, u32 addr);
+extern u8 ReadRegByte(struct xgi_info * info, u32 addr);
extern void EnableProtect();
extern void DisableProtect();
#define wReadReg(addr) ReadRegWord(info, addr)
#define bReadReg(addr) ReadRegByte(info, addr)
-static inline void xgi_protect_all(xgi_info_t * info)
+static inline void xgi_protect_all(struct xgi_info * info)
{
OUTB(0x3C4, 0x11);
OUTB(0x3C5, 0x92);
}
-static inline void xgi_unprotect_all(xgi_info_t * info)
+static inline void xgi_unprotect_all(struct xgi_info * info)
{
OUTB(0x3C4, 0x11);
OUTB(0x3C5, 0x92);
}
-static inline void xgi_enable_mmio(xgi_info_t * info)
+static inline void xgi_enable_mmio(struct xgi_info * info)
{
u8 protect = 0;
OUTB(0x3C5, protect);
}
-static inline void xgi_disable_mmio(xgi_info_t * info)
+static inline void xgi_disable_mmio(struct xgi_info * info)
{
u8 protect = 0;
outb(protect, 0x3C5);
}
-static inline void xgi_enable_ge(xgi_info_t * info)
+static inline void xgi_enable_ge(struct xgi_info * info)
{
unsigned char bOld3cf2a = 0;
int wait = 0;
bOut3cf(0x2a, bOld3cf2a);
}
-static inline void xgi_disable_ge(xgi_info_t * info)
+static inline void xgi_disable_ge(struct xgi_info * info)
{
int wait = 0;
bOut3x5(0x36, 0);
}
-static inline void xgi_enable_dvi_interrupt(xgi_info_t * info)
+static inline void xgi_enable_dvi_interrupt(struct xgi_info * info)
{
Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0
Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1
Out3cf(0x39, In3cf(0x39) | 0x02);
}
-static inline void xgi_disable_dvi_interrupt(xgi_info_t * info)
+static inline void xgi_disable_dvi_interrupt(struct xgi_info * info)
{
Out3cf(0x39, In3cf(0x39) & ~0x02);
}
-static inline void xgi_enable_crt1_interrupt(xgi_info_t * info)
+static inline void xgi_enable_crt1_interrupt(struct xgi_info * info)
{
Out3cf(0x3d, In3cf(0x3d) | 0x04);
Out3cf(0x3d, In3cf(0x3d) & ~0x04);
Out3cf(0x3d, In3cf(0x3d) | 0x08);
}
-static inline void xgi_disable_crt1_interrupt(xgi_info_t * info)
+static inline void xgi_disable_crt1_interrupt(struct xgi_info * info)
{
Out3cf(0x3d, In3cf(0x3d) & ~0x08);
}