2 * Copyright 2003 José Fonseca.
3 * Copyright 2003 Leif Delgass.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20 * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
21 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include <linux/dma-mapping.h>
26 #include <linux/export.h>
27 #include <linux/list.h>
28 #include <linux/mutex.h>
29 #include <linux/pci.h>
30 #include <linux/slab.h>
33 #include <drm/drm_drv.h>
34 #include <drm/drm_print.h>
36 #include "drm_internal.h"
37 #include "drm_legacy.h"
39 #ifdef CONFIG_DRM_LEGACY
40 /* List of devices hanging off drivers with stealth attach. */
41 static LIST_HEAD(legacy_dev_list);
42 static DEFINE_MUTEX(legacy_dev_list_lock);
45 static int drm_get_pci_domain(struct drm_device *dev)
48 /* For historical reasons, drm_get_pci_domain() is busticated
49 * on most archs and has to remain so for userspace interface
50 * < 1.4, except on alpha which was right from the beginning
52 if (dev->if_version < 0x10004)
54 #endif /* __alpha__ */
56 return pci_domain_nr(to_pci_dev(dev->dev)->bus);
59 int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
61 struct pci_dev *pdev = to_pci_dev(dev->dev);
63 master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d",
64 drm_get_pci_domain(dev),
66 PCI_SLOT(pdev->devfn),
67 PCI_FUNC(pdev->devfn));
71 master->unique_len = strlen(master->unique);
75 static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
77 struct pci_dev *pdev = to_pci_dev(dev->dev);
79 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
80 (p->busnum & 0xff) != pdev->bus->number ||
81 p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn))
86 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
92 * drm_legacy_irq_by_busid - Get interrupt from bus ID
94 * @data: IOCTL parameter pointing to a drm_irq_busid structure
95 * @file_priv: DRM file private.
97 * Finds the PCI device with the specified bus id and gets its IRQ number.
98 * This IOCTL is deprecated, and will now return EINVAL for any busid not equal
99 * to that of the device that this DRM instance attached to.
101 * Return: 0 on success or a negative error code on failure.
103 int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
104 struct drm_file *file_priv)
106 struct drm_irq_busid *p = data;
108 if (!drm_core_check_feature(dev, DRIVER_LEGACY))
111 /* UMS was only ever support on PCI devices. */
112 if (WARN_ON(!dev_is_pci(dev->dev)))
115 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
118 return drm_pci_irq_by_busid(dev, p);
121 #ifdef CONFIG_DRM_LEGACY
123 void drm_legacy_pci_agp_destroy(struct drm_device *dev)
126 arch_phys_wc_del(dev->agp->agp_mtrr);
127 drm_legacy_agp_clear(dev);
133 static void drm_legacy_pci_agp_init(struct drm_device *dev)
135 if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
136 if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP))
137 dev->agp = drm_legacy_agp_init(dev);
139 dev->agp->agp_mtrr = arch_phys_wc_add(
140 dev->agp->agp_info.aper_base,
141 dev->agp->agp_info.aper_size *
147 static int drm_legacy_get_pci_dev(struct pci_dev *pdev,
148 const struct pci_device_id *ent,
149 const struct drm_driver *driver)
151 struct drm_device *dev;
156 dev = drm_dev_alloc(driver, &pdev->dev);
160 ret = pci_enable_device(pdev);
165 dev->hose = pdev->sysdata;
168 if (drm_core_check_feature(dev, DRIVER_MODESET))
169 pci_set_drvdata(pdev, dev);
171 drm_legacy_pci_agp_init(dev);
173 ret = drm_dev_register(dev, ent->driver_data);
177 if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
178 mutex_lock(&legacy_dev_list_lock);
179 list_add_tail(&dev->legacy_dev_list, &legacy_dev_list);
180 mutex_unlock(&legacy_dev_list_lock);
186 drm_legacy_pci_agp_destroy(dev);
187 pci_disable_device(pdev);
194 * drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
195 * @driver: DRM device driver
196 * @pdriver: PCI device driver
198 * This is only used by legacy dri1 drivers and deprecated.
200 * Return: 0 on success or a negative error code on failure.
202 int drm_legacy_pci_init(const struct drm_driver *driver,
203 struct pci_driver *pdriver)
205 struct pci_dev *pdev = NULL;
206 const struct pci_device_id *pid;
211 if (WARN_ON(!(driver->driver_features & DRIVER_LEGACY)))
214 /* If not using KMS, fall back to stealth mode manual scanning. */
215 for (i = 0; pdriver->id_table[i].vendor != 0; i++) {
216 pid = &pdriver->id_table[i];
218 /* Loop around setting up a DRM device for each PCI device
219 * matching our ID and device class. If we had the internal
220 * function that pci_get_subsys and pci_get_class used, we'd
221 * be able to just pass pid in instead of doing a two-stage
226 pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
227 pid->subdevice, pdev)) != NULL) {
228 if ((pdev->class & pid->class_mask) != pid->class)
231 /* stealth mode requires a manual probe */
233 drm_legacy_get_pci_dev(pdev, pid, driver);
238 EXPORT_SYMBOL(drm_legacy_pci_init);
241 * drm_legacy_pci_exit - unregister shadow-attach legacy DRM driver
242 * @driver: DRM device driver
243 * @pdriver: PCI device driver
245 * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This
246 * is deprecated and only used by dri1 drivers.
248 void drm_legacy_pci_exit(const struct drm_driver *driver,
249 struct pci_driver *pdriver)
251 struct drm_device *dev, *tmp;
255 if (!(driver->driver_features & DRIVER_LEGACY)) {
258 mutex_lock(&legacy_dev_list_lock);
259 list_for_each_entry_safe(dev, tmp, &legacy_dev_list,
261 if (dev->driver == driver) {
262 list_del(&dev->legacy_dev_list);
266 mutex_unlock(&legacy_dev_list_lock);
268 DRM_INFO("Module unloaded\n");
270 EXPORT_SYMBOL(drm_legacy_pci_exit);