1 /****************************************************************************
2 * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation on the rights to use, copy, modify, merge,
10 * publish, distribute, sublicense, and/or sell copies of the Software,
11 * and to permit persons to whom the Software is furnished to do so,
12 * subject to the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 ***************************************************************************/
32 #include "xgi_cmdlist.h"
34 #include "drm_pciids.h"
36 static struct pci_device_id pciidlist[] = {
41 extern struct drm_fence_driver xgi_fence_driver;
42 #endif /* XGI_HAVE_FENCE */
44 int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
46 static struct drm_ioctl_desc xgi_ioctls[] = {
47 DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
48 DRM_IOCTL_DEF(DRM_XGI_ALLOC, xgi_alloc_ioctl, DRM_AUTH),
49 DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH),
50 DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH),
51 DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER),
52 DRM_IOCTL_DEF(DRM_XGI_SET_FENCE, xgi_set_fence_ioctl, DRM_AUTH),
53 DRM_IOCTL_DEF(DRM_XGI_WAIT_FENCE, xgi_wait_fence_ioctl, DRM_AUTH),
56 static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls);
58 static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
59 static int xgi_driver_load(struct drm_device *dev, unsigned long flags);
60 static int xgi_driver_unload(struct drm_device *dev);
61 static void xgi_driver_lastclose(struct drm_device * dev);
62 static void xgi_reclaim_buffers_locked(struct drm_device * dev,
63 struct drm_file * filp);
64 static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS);
65 static int xgi_kern_isr_postinstall(struct drm_device * dev);
68 static struct drm_driver driver = {
70 DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ |
71 DRIVER_IRQ_SHARED | DRIVER_SG,
72 .dev_priv_size = sizeof(struct xgi_info),
73 .load = xgi_driver_load,
74 .unload = xgi_driver_unload,
75 .lastclose = xgi_driver_lastclose,
76 .dma_quiescent = NULL,
77 .irq_preinstall = NULL,
78 .irq_postinstall = xgi_kern_isr_postinstall,
79 .irq_uninstall = NULL,
80 .irq_handler = xgi_kern_isr,
81 .reclaim_buffers = drm_core_reclaim_buffers,
82 .reclaim_buffers_idlelocked = xgi_reclaim_buffers_locked,
83 .get_map_ofs = drm_core_get_map_ofs,
84 .get_reg_ofs = drm_core_get_reg_ofs,
91 .release = drm_release,
96 #if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
97 .compat_ioctl = xgi_compat_ioctl,
103 .id_table = pciidlist,
105 .remove = __devexit_p(drm_cleanup_pci),
108 #ifdef XGI_HAVE_FENCE
109 .fence_driver = &xgi_fence_driver,
110 #endif /* XGI_HAVE_FENCE */
115 .major = DRIVER_MAJOR,
116 .minor = DRIVER_MINOR,
117 .patchlevel = DRIVER_PATCHLEVEL,
121 static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
123 return drm_get_dev(pdev, ent, &driver);
127 static int __init xgi_init(void)
129 driver.num_ioctls = xgi_max_ioctl;
130 return drm_init(&driver, pciidlist);
133 static void __exit xgi_exit(void)
138 module_init(xgi_init);
139 module_exit(xgi_exit);
141 MODULE_AUTHOR(DRIVER_AUTHOR);
142 MODULE_DESCRIPTION(DRIVER_DESC);
143 MODULE_LICENSE("GPL and additional rights");
146 void xgi_engine_init(struct xgi_info * info)
151 OUT3C5B(info->mmio_map, 0x11, 0x92);
153 /* -------> copy from OT2D
154 * PCI Retry Control Register.
155 * disable PCI read retry & enable write retry in mem. (10xx xxxx)b
157 temp = IN3X5B(info->mmio_map, 0x55);
158 OUT3X5B(info->mmio_map, 0x55, (temp & 0xbf) | 0x80);
162 /* Enable linear addressing of the card. */
163 temp = IN3X5B(info->mmio_map, 0x21);
164 OUT3X5B(info->mmio_map, 0x21, temp | 0x20);
166 /* Enable 32-bit internal data path */
167 temp = IN3X5B(info->mmio_map, 0x2A);
168 OUT3X5B(info->mmio_map, 0x2A, temp | 0x40);
170 /* Enable PCI burst write ,disable burst read and enable MMIO. */
172 * 0x3D4.39 Enable PCI burst write, disable burst read and enable MMIO.
173 * 7 ---- Pixel Data Format 1: big endian 0: little endian
174 * 6 5 4 3---- Memory Data with Big Endian Format, BE[3:0]# with Big Endian Format
175 * 2 ---- PCI Burst Write Enable
176 * 1 ---- PCI Burst Read Enable
177 * 0 ---- MMIO Control
179 temp = IN3X5B(info->mmio_map, 0x39);
180 OUT3X5B(info->mmio_map, 0x39, (temp | 0x05) & 0xfd);
182 /* enable GEIO decode */
183 /* temp = IN3X5B(info->mmio_map, 0x29);
184 * OUT3X5B(info->mmio_map, 0x29, temp | 0x08);
187 /* Enable graphic engine I/O PCI retry function*/
188 /* temp = IN3X5B(info->mmio_map, 0x62);
189 * OUT3X5B(info->mmio_map, 0x62, temp | 0x50);
192 /* protect all register except which protected by 3c5.0e.7 */
193 /* OUT3C5B(info->mmio_map, 0x11, 0x87); */
197 int xgi_bootstrap(struct drm_device * dev, void * data,
198 struct drm_file * filp)
200 struct xgi_info *info = dev->dev_private;
201 struct xgi_bootstrap * bs = (struct xgi_bootstrap *) data;
202 struct drm_map_list *maplist;
206 DRM_SPININIT(&info->fence_lock, "fence lock");
207 info->next_sequence = 0;
208 info->complete_sequence = 0;
210 if (info->mmio_map == NULL) {
211 err = drm_addmap(dev, info->mmio.base, info->mmio.size,
212 _DRM_REGISTERS, _DRM_KERNEL,
215 DRM_ERROR("Unable to map MMIO region: %d\n", err);
219 xgi_enable_mmio(info);
220 xgi_engine_init(info);
224 info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024;
226 DRM_INFO("fb base: 0x%lx, size: 0x%x (probed)\n",
227 (unsigned long) info->fb.base, info->fb.size);
230 if ((info->fb.base == 0) || (info->fb.size == 0)) {
231 DRM_ERROR("framebuffer appears to be wrong: 0x%lx 0x%x\n",
232 (unsigned long) info->fb.base, info->fb.size);
237 /* Init the resource manager */
238 if (!info->fb_heap_initialized) {
239 err = xgi_fb_heap_init(info);
241 DRM_ERROR("Unable to initialize FB heap.\n");
247 info->pcie.size = bs->gart.size;
249 /* Init the resource manager */
250 if (!info->pcie_heap_initialized) {
251 err = xgi_pcie_heap_init(info);
253 DRM_ERROR("Unable to initialize GART heap.\n");
257 /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */
258 err = xgi_cmdlist_initialize(info, 0x100000, filp);
260 DRM_ERROR("xgi_cmdlist_initialize() failed\n");
266 if (info->pcie_map == NULL) {
267 err = drm_addmap(info->dev, 0, info->pcie.size,
268 _DRM_SCATTER_GATHER, _DRM_LOCKED,
271 DRM_ERROR("Could not add map for GART backing "
278 maplist = drm_find_matching_map(dev, info->pcie_map);
279 if (maplist == NULL) {
280 DRM_ERROR("Could not find GART backing store map.\n");
284 bs->gart = *info->pcie_map;
285 bs->gart.handle = (void *)(unsigned long) maplist->user_token;
290 void xgi_driver_lastclose(struct drm_device * dev)
292 struct xgi_info * info = dev->dev_private;
295 if (info->mmio_map != NULL) {
296 xgi_cmdlist_cleanup(info);
297 xgi_disable_ge(info);
298 xgi_disable_mmio(info);
301 /* The core DRM lastclose routine will destroy all of our
302 * mappings for us. NULL out the pointers here so that
303 * xgi_bootstrap can do the right thing.
305 info->pcie_map = NULL;
306 info->mmio_map = NULL;
309 if (info->pcie_heap_initialized) {
310 drm_ati_pcigart_cleanup(dev, &info->gart_info);
313 if (info->fb_heap_initialized
314 || info->pcie_heap_initialized) {
315 drm_sman_cleanup(&info->sman);
317 info->fb_heap_initialized = false;
318 info->pcie_heap_initialized = false;
324 void xgi_reclaim_buffers_locked(struct drm_device * dev,
325 struct drm_file * filp)
327 struct xgi_info * info = dev->dev_private;
329 mutex_lock(&info->dev->struct_mutex);
330 if (drm_sman_owner_clean(&info->sman, (unsigned long) filp)) {
331 mutex_unlock(&info->dev->struct_mutex);
335 if (dev->driver->dma_quiescent) {
336 dev->driver->dma_quiescent(dev);
339 drm_sman_owner_cleanup(&info->sman, (unsigned long) filp);
340 mutex_unlock(&info->dev->struct_mutex);
346 * driver receives an interrupt if someone waiting, then hand it off.
348 irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
350 struct drm_device *dev = (struct drm_device *) arg;
351 struct xgi_info *info = dev->dev_private;
352 const u32 irq_bits = le32_to_cpu(DRM_READ32(info->mmio_map,
354 + M2REG_AUTO_LINK_STATUS_ADDRESS)))
355 & (M2REG_ACTIVE_TIMER_INTERRUPT_MASK
356 | M2REG_ACTIVE_INTERRUPT_0_MASK
357 | M2REG_ACTIVE_INTERRUPT_2_MASK
358 | M2REG_ACTIVE_INTERRUPT_3_MASK);
362 DRM_WRITE32(info->mmio_map,
363 0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
364 cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits));
365 #ifdef XGI_HAVE_FENCE
366 xgi_fence_handler(dev);
367 #endif /* XGI_HAVE_FENCE */
368 DRM_WAKEUP(&info->fence_queue);
376 int xgi_kern_isr_postinstall(struct drm_device * dev)
378 struct xgi_info *info = dev->dev_private;
380 DRM_INIT_WAITQUEUE(&info->fence_queue);
385 int xgi_driver_load(struct drm_device *dev, unsigned long flags)
387 struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER);
393 (void) memset(info, 0, sizeof(*info));
394 dev->dev_private = info;
397 info->mmio.base = drm_get_resource_start(dev, 1);
398 info->mmio.size = drm_get_resource_len(dev, 1);
400 DRM_INFO("mmio base: 0x%lx, size: 0x%x\n",
401 (unsigned long) info->mmio.base, info->mmio.size);
404 if ((info->mmio.base == 0) || (info->mmio.size == 0)) {
405 DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n",
406 (unsigned long) info->mmio.base, info->mmio.size);
412 info->fb.base = drm_get_resource_start(dev, 0);
413 info->fb.size = drm_get_resource_len(dev, 0);
415 DRM_INFO("fb base: 0x%lx, size: 0x%x\n",
416 (unsigned long) info->fb.base, info->fb.size);
419 err = drm_sman_init(&info->sman, 2, 12, 8);
428 drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
432 int xgi_driver_unload(struct drm_device *dev)
434 struct xgi_info * info = dev->dev_private;
436 drm_sman_takedown(&info->sman);
437 drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
438 dev->dev_private = NULL;