radeon: remove unused legacy state
[platform/upstream/libdrm.git] / linux-core / xgi_drv.c
1 /****************************************************************************
2  * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
3  *
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation on the rights to use, copy, modify, merge,
10  * publish, distribute, sublicense, and/or sell copies of the Software,
11  * and to permit persons to whom the Software is furnished to do so,
12  * subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial
16  * portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.  IN NO EVENT SHALL
21  * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24  * DEALINGS IN THE SOFTWARE.
25  ***************************************************************************/
26
27 #include "drmP.h"
28 #include "drm.h"
29 #include "xgi_drv.h"
30 #include "xgi_regs.h"
31 #include "xgi_misc.h"
32 #include "xgi_cmdlist.h"
33
34 #include "drm_pciids.h"
35
36 static struct pci_device_id pciidlist[] = {
37         xgi_PCI_IDS
38 };
39
40 #ifdef XGI_HAVE_FENCE
41 extern struct drm_fence_driver xgi_fence_driver;
42 #endif /* XGI_HAVE_FENCE */
43
44 int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
45
46 static struct drm_ioctl_desc xgi_ioctls[] = {
47         DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
48         DRM_IOCTL_DEF(DRM_XGI_ALLOC, xgi_alloc_ioctl, DRM_AUTH),
49         DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH),
50         DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH),
51         DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER),
52         DRM_IOCTL_DEF(DRM_XGI_SET_FENCE, xgi_set_fence_ioctl, DRM_AUTH),
53         DRM_IOCTL_DEF(DRM_XGI_WAIT_FENCE, xgi_wait_fence_ioctl, DRM_AUTH),
54 };
55
56 static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls);
57
58 static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
59 static int xgi_driver_load(struct drm_device *dev, unsigned long flags);
60 static int xgi_driver_unload(struct drm_device *dev);
61 static void xgi_driver_lastclose(struct drm_device * dev);
62 static void xgi_reclaim_buffers_locked(struct drm_device * dev,
63         struct drm_file * filp);
64 static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS);
65 static int xgi_kern_isr_postinstall(struct drm_device * dev);
66
67
68 static struct drm_driver driver = {
69         .driver_features =
70                 DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ |
71                 DRIVER_IRQ_SHARED | DRIVER_SG,
72         .dev_priv_size = sizeof(struct xgi_info),
73         .load = xgi_driver_load,
74         .unload = xgi_driver_unload,
75         .lastclose = xgi_driver_lastclose,
76         .dma_quiescent = NULL,
77         .irq_preinstall = NULL,
78         .irq_postinstall = xgi_kern_isr_postinstall,
79         .irq_uninstall = NULL,
80         .irq_handler = xgi_kern_isr,
81         .reclaim_buffers = drm_core_reclaim_buffers,
82         .reclaim_buffers_idlelocked = xgi_reclaim_buffers_locked,
83         .get_map_ofs = drm_core_get_map_ofs,
84         .get_reg_ofs = drm_core_get_reg_ofs,
85         .ioctls = xgi_ioctls,
86         .dma_ioctl = NULL,
87
88         .fops = {
89                 .owner = THIS_MODULE,
90                 .open = drm_open,
91                 .release = drm_release,
92                 .ioctl = drm_ioctl,
93                 .mmap = drm_mmap,
94                 .poll = drm_poll,
95                 .fasync = drm_fasync,
96 #if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
97                 .compat_ioctl = xgi_compat_ioctl,
98 #endif
99         },
100
101         .pci_driver = {
102                 .name = DRIVER_NAME,
103                 .id_table = pciidlist,
104                 .probe = probe,
105                 .remove = __devexit_p(drm_cleanup_pci),
106         },
107
108 #ifdef XGI_HAVE_FENCE
109         .fence_driver = &xgi_fence_driver,
110 #endif /* XGI_HAVE_FENCE */
111
112         .name = DRIVER_NAME,
113         .desc = DRIVER_DESC,
114         .date = DRIVER_DATE,
115         .major = DRIVER_MAJOR,
116         .minor = DRIVER_MINOR,
117         .patchlevel = DRIVER_PATCHLEVEL,
118
119 };
120
121 static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
122 {
123         return drm_get_dev(pdev, ent, &driver);
124 }
125
126
127 static int __init xgi_init(void)
128 {
129         driver.num_ioctls = xgi_max_ioctl;
130         return drm_init(&driver, pciidlist);
131 }
132
133 static void __exit xgi_exit(void)
134 {
135         drm_exit(&driver);
136 }
137
138 module_init(xgi_init);
139 module_exit(xgi_exit);
140
141 MODULE_AUTHOR(DRIVER_AUTHOR);
142 MODULE_DESCRIPTION(DRIVER_DESC);
143 MODULE_LICENSE("GPL and additional rights");
144
145
146 void xgi_engine_init(struct xgi_info * info)
147 {
148         u8 temp;
149
150
151         OUT3C5B(info->mmio_map, 0x11, 0x92);
152
153         /* -------> copy from OT2D
154          * PCI Retry Control Register.
155          * disable PCI read retry & enable write retry in mem. (10xx xxxx)b
156          */
157         temp = IN3X5B(info->mmio_map, 0x55);
158         OUT3X5B(info->mmio_map, 0x55, (temp & 0xbf) | 0x80);
159
160         xgi_enable_ge(info);
161
162         /* Enable linear addressing of the card. */
163         temp = IN3X5B(info->mmio_map, 0x21);
164         OUT3X5B(info->mmio_map, 0x21, temp | 0x20);
165
166         /* Enable 32-bit internal data path */
167         temp = IN3X5B(info->mmio_map, 0x2A);
168         OUT3X5B(info->mmio_map, 0x2A, temp | 0x40);
169
170         /* Enable PCI burst write ,disable burst read and enable MMIO. */
171         /*
172          * 0x3D4.39 Enable PCI burst write, disable burst read and enable MMIO.
173          * 7 ---- Pixel Data Format 1:  big endian 0:  little endian
174          * 6 5 4 3---- Memory Data with Big Endian Format, BE[3:0]#  with Big Endian Format
175          * 2 ---- PCI Burst Write Enable
176          * 1 ---- PCI Burst Read Enable
177          * 0 ---- MMIO Control
178          */
179         temp = IN3X5B(info->mmio_map, 0x39);
180         OUT3X5B(info->mmio_map, 0x39, (temp | 0x05) & 0xfd);
181
182         /* enable GEIO decode */
183         /* temp = IN3X5B(info->mmio_map, 0x29);
184          * OUT3X5B(info->mmio_map, 0x29, temp | 0x08);
185          */
186
187         /* Enable graphic engine I/O PCI retry function*/
188         /* temp = IN3X5B(info->mmio_map, 0x62);
189          * OUT3X5B(info->mmio_map, 0x62, temp | 0x50);
190          */
191
192         /* protect all register except which protected by 3c5.0e.7 */
193         /* OUT3C5B(info->mmio_map, 0x11, 0x87); */
194 }
195
196
197 int xgi_bootstrap(struct drm_device * dev, void * data,
198                   struct drm_file * filp)
199 {
200         struct xgi_info *info = dev->dev_private;
201         struct xgi_bootstrap * bs = (struct xgi_bootstrap *) data;
202         struct drm_map_list *maplist;
203         int err;
204
205
206         DRM_SPININIT(&info->fence_lock, "fence lock");
207         info->next_sequence = 0;
208         info->complete_sequence = 0;
209
210         if (info->mmio_map == NULL) {
211                 err = drm_addmap(dev, info->mmio.base, info->mmio.size,
212                                  _DRM_REGISTERS, _DRM_KERNEL,
213                                  &info->mmio_map);
214                 if (err) {
215                         DRM_ERROR("Unable to map MMIO region: %d\n", err);
216                         return err;
217                 }
218
219                 xgi_enable_mmio(info);
220                 xgi_engine_init(info);
221         }
222
223
224         info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024;
225
226         DRM_INFO("fb   base: 0x%lx, size: 0x%x (probed)\n",
227                  (unsigned long) info->fb.base, info->fb.size);
228
229
230         if ((info->fb.base == 0) || (info->fb.size == 0)) {
231                 DRM_ERROR("framebuffer appears to be wrong: 0x%lx 0x%x\n",
232                           (unsigned long) info->fb.base, info->fb.size);
233                 return -EINVAL;
234         }
235
236
237         /* Init the resource manager */
238         if (!info->fb_heap_initialized) {
239                 err = xgi_fb_heap_init(info);
240                 if (err) {
241                         DRM_ERROR("Unable to initialize FB heap.\n");
242                         return err;
243                 }
244         }
245
246
247         info->pcie.size = bs->gart.size;
248
249         /* Init the resource manager */
250         if (!info->pcie_heap_initialized) {
251                 err = xgi_pcie_heap_init(info);
252                 if (err) {
253                         DRM_ERROR("Unable to initialize GART heap.\n");
254                         return err;
255                 }
256
257                 /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */
258                 err = xgi_cmdlist_initialize(info, 0x100000, filp);
259                 if (err) {
260                         DRM_ERROR("xgi_cmdlist_initialize() failed\n");
261                         return err;
262                 }
263         }
264
265
266         if (info->pcie_map == NULL) {
267                 err = drm_addmap(info->dev, 0, info->pcie.size,
268                                  _DRM_SCATTER_GATHER, _DRM_LOCKED,
269                                  & info->pcie_map);
270                 if (err) {
271                         DRM_ERROR("Could not add map for GART backing "
272                                   "store.\n");
273                         return err;
274                 }
275         }
276
277
278         maplist = drm_find_matching_map(dev, info->pcie_map);
279         if (maplist == NULL) {
280                 DRM_ERROR("Could not find GART backing store map.\n");
281                 return -EINVAL;
282         }
283
284         bs->gart = *info->pcie_map;
285         bs->gart.handle = (void *)(unsigned long) maplist->user_token;
286         return 0;
287 }
288
289
290 void xgi_driver_lastclose(struct drm_device * dev)
291 {
292         struct xgi_info * info = dev->dev_private;
293
294         if (info != NULL) {
295                 if (info->mmio_map != NULL) {
296                         xgi_cmdlist_cleanup(info);
297                         xgi_disable_ge(info);
298                         xgi_disable_mmio(info);
299                 }
300
301                 /* The core DRM lastclose routine will destroy all of our
302                  * mappings for us.  NULL out the pointers here so that
303                  * xgi_bootstrap can do the right thing.
304                  */
305                 info->pcie_map = NULL;
306                 info->mmio_map = NULL;
307                 info->fb_map = NULL;
308
309                 if (info->pcie_heap_initialized) {
310                         drm_ati_pcigart_cleanup(dev, &info->gart_info);
311                 }
312
313                 if (info->fb_heap_initialized
314                     || info->pcie_heap_initialized) {
315                         drm_sman_cleanup(&info->sman);
316
317                         info->fb_heap_initialized = false;
318                         info->pcie_heap_initialized = false;
319                 }
320         }
321 }
322
323
324 void xgi_reclaim_buffers_locked(struct drm_device * dev,
325                                 struct drm_file * filp)
326 {
327         struct xgi_info * info = dev->dev_private;
328
329         mutex_lock(&info->dev->struct_mutex);
330         if (drm_sman_owner_clean(&info->sman, (unsigned long) filp)) {
331                 mutex_unlock(&info->dev->struct_mutex);
332                 return;
333         }
334
335         if (dev->driver->dma_quiescent) {
336                 dev->driver->dma_quiescent(dev);
337         }
338
339         drm_sman_owner_cleanup(&info->sman, (unsigned long) filp);
340         mutex_unlock(&info->dev->struct_mutex);
341         return;
342 }
343
344
345 /*
346  * driver receives an interrupt if someone waiting, then hand it off.
347  */
348 irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
349 {
350         struct drm_device *dev = (struct drm_device *) arg;
351         struct xgi_info *info = dev->dev_private;
352         const u32 irq_bits = le32_to_cpu(DRM_READ32(info->mmio_map,
353                                         (0x2800
354                                          + M2REG_AUTO_LINK_STATUS_ADDRESS)))
355                 & (M2REG_ACTIVE_TIMER_INTERRUPT_MASK
356                    | M2REG_ACTIVE_INTERRUPT_0_MASK
357                    | M2REG_ACTIVE_INTERRUPT_2_MASK
358                    | M2REG_ACTIVE_INTERRUPT_3_MASK);
359
360
361         if (irq_bits != 0) {
362                 DRM_WRITE32(info->mmio_map,
363                             0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
364                             cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits));
365 #ifdef XGI_HAVE_FENCE
366                 xgi_fence_handler(dev);
367 #endif /* XGI_HAVE_FENCE */
368                 DRM_WAKEUP(&info->fence_queue);
369                 return IRQ_HANDLED;
370         } else {
371                 return IRQ_NONE;
372         }
373 }
374
375
376 int xgi_kern_isr_postinstall(struct drm_device * dev)
377 {
378         struct xgi_info *info = dev->dev_private;
379
380         DRM_INIT_WAITQUEUE(&info->fence_queue);
381         return 0;
382 }
383
384
385 int xgi_driver_load(struct drm_device *dev, unsigned long flags)
386 {
387         struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER);
388         int err;
389
390         if (!info)
391                 return -ENOMEM;
392
393         (void) memset(info, 0, sizeof(*info));
394         dev->dev_private = info;
395         info->dev = dev;
396
397         info->mmio.base = drm_get_resource_start(dev, 1);
398         info->mmio.size = drm_get_resource_len(dev, 1);
399
400         DRM_INFO("mmio base: 0x%lx, size: 0x%x\n",
401                  (unsigned long) info->mmio.base, info->mmio.size);
402
403
404         if ((info->mmio.base == 0) || (info->mmio.size == 0)) {
405                 DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n",
406                           (unsigned long) info->mmio.base, info->mmio.size);
407                 err = -EINVAL;
408                 goto fail;
409         }
410
411
412         info->fb.base = drm_get_resource_start(dev, 0);
413         info->fb.size = drm_get_resource_len(dev, 0);
414
415         DRM_INFO("fb   base: 0x%lx, size: 0x%x\n",
416                  (unsigned long) info->fb.base, info->fb.size);
417
418
419         err = drm_sman_init(&info->sman, 2, 12, 8);
420         if (err) {
421                 goto fail;
422         }
423
424
425         return 0;
426
427 fail:
428         drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
429         return err;
430 }
431
432 int xgi_driver_unload(struct drm_device *dev)
433 {
434         struct xgi_info * info = dev->dev_private;
435
436         drm_sman_takedown(&info->sman);
437         drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
438         dev->dev_private = NULL;
439
440         return 0;
441 }