1 // SPDX-License-Identifier: GPL-2.0
3 * Procfs interface for the PCI bus
5 * Copyright (c) 1997--1999 Martin Mares <mj@ucw.cz>
8 #include <linux/init.h>
10 #include <linux/slab.h>
11 #include <linux/module.h>
12 #include <linux/proc_fs.h>
13 #include <linux/seq_file.h>
14 #include <linux/capability.h>
15 #include <linux/uaccess.h>
16 #include <linux/security.h>
17 #include <asm/byteorder.h>
20 static int proc_initialized; /* = 0 */
22 static loff_t proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
24 struct pci_dev *dev = pde_data(file_inode(file));
25 return fixed_size_llseek(file, off, whence, dev->cfg_size);
28 static ssize_t proc_bus_pci_read(struct file *file, char __user *buf,
29 size_t nbytes, loff_t *ppos)
31 struct pci_dev *dev = pde_data(file_inode(file));
32 unsigned int pos = *ppos;
33 unsigned int cnt, size;
36 * Normal users can read only the standardized portion of the
37 * configuration space as several chips lock up when trying to read
38 * undefined locations (think of Intel PIIX4 as a typical example).
41 if (capable(CAP_SYS_ADMIN))
43 else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
52 if (pos + nbytes > size)
56 if (!access_ok(buf, cnt))
59 pci_config_pm_runtime_get(dev);
61 if ((pos & 1) && cnt) {
63 pci_user_read_config_byte(dev, pos, &val);
70 if ((pos & 3) && cnt > 2) {
72 pci_user_read_config_word(dev, pos, &val);
73 __put_user(cpu_to_le16(val), (__le16 __user *) buf);
81 pci_user_read_config_dword(dev, pos, &val);
82 __put_user(cpu_to_le32(val), (__le32 __user *) buf);
91 pci_user_read_config_word(dev, pos, &val);
92 __put_user(cpu_to_le16(val), (__le16 __user *) buf);
100 pci_user_read_config_byte(dev, pos, &val);
101 __put_user(val, buf);
105 pci_config_pm_runtime_put(dev);
111 static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf,
112 size_t nbytes, loff_t *ppos)
114 struct inode *ino = file_inode(file);
115 struct pci_dev *dev = pde_data(ino);
117 int size = dev->cfg_size;
120 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
128 if (pos + nbytes > size)
132 if (!access_ok(buf, cnt))
135 pci_config_pm_runtime_get(dev);
137 if ((pos & 1) && cnt) {
139 __get_user(val, buf);
140 pci_user_write_config_byte(dev, pos, val);
146 if ((pos & 3) && cnt > 2) {
148 __get_user(val, (__le16 __user *) buf);
149 pci_user_write_config_word(dev, pos, le16_to_cpu(val));
157 __get_user(val, (__le32 __user *) buf);
158 pci_user_write_config_dword(dev, pos, le32_to_cpu(val));
166 __get_user(val, (__le16 __user *) buf);
167 pci_user_write_config_word(dev, pos, le16_to_cpu(val));
175 __get_user(val, buf);
176 pci_user_write_config_byte(dev, pos, val);
180 pci_config_pm_runtime_put(dev);
183 i_size_write(ino, dev->cfg_size);
188 struct pci_filp_private {
189 enum pci_mmap_state mmap_state;
192 #endif /* HAVE_PCI_MMAP */
194 static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
197 struct pci_dev *dev = pde_data(file_inode(file));
199 struct pci_filp_private *fpriv = file->private_data;
200 #endif /* HAVE_PCI_MMAP */
203 ret = security_locked_down(LOCKDOWN_PCI_ACCESS);
208 case PCIIOC_CONTROLLER:
209 ret = pci_domain_nr(dev->bus);
213 case PCIIOC_MMAP_IS_IO:
214 if (!arch_can_pci_mmap_io())
216 fpriv->mmap_state = pci_mmap_io;
219 case PCIIOC_MMAP_IS_MEM:
220 fpriv->mmap_state = pci_mmap_mem;
223 case PCIIOC_WRITE_COMBINE:
224 if (arch_can_pci_mmap_wc()) {
226 fpriv->write_combine = 1;
228 fpriv->write_combine = 0;
231 /* If arch decided it can't, fall through... */
233 #endif /* HAVE_PCI_MMAP */
243 static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
245 struct pci_dev *dev = pde_data(file_inode(file));
246 struct pci_filp_private *fpriv = file->private_data;
247 resource_size_t start, end;
248 int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM;
250 if (!capable(CAP_SYS_RAWIO) ||
251 security_locked_down(LOCKDOWN_PCI_ACCESS))
254 if (fpriv->mmap_state == pci_mmap_io) {
255 if (!arch_can_pci_mmap_io())
257 res_bit = IORESOURCE_IO;
260 /* Make sure the caller is mapping a real resource for this device */
261 for (i = 0; i < PCI_STD_NUM_BARS; i++) {
262 if (dev->resource[i].flags & res_bit &&
263 pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS))
267 if (i >= PCI_STD_NUM_BARS)
270 if (fpriv->mmap_state == pci_mmap_mem &&
271 fpriv->write_combine) {
272 if (dev->resource[i].flags & IORESOURCE_PREFETCH)
278 if (dev->resource[i].flags & IORESOURCE_MEM &&
279 iomem_is_exclusive(dev->resource[i].start))
282 pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
284 /* Adjust vm_pgoff to be the offset within the resource */
285 vma->vm_pgoff -= start >> PAGE_SHIFT;
286 ret = pci_mmap_resource_range(dev, i, vma,
287 fpriv->mmap_state, write_combine);
294 static int proc_bus_pci_open(struct inode *inode, struct file *file)
296 struct pci_filp_private *fpriv = kmalloc(sizeof(*fpriv), GFP_KERNEL);
301 fpriv->mmap_state = pci_mmap_io;
302 fpriv->write_combine = 0;
304 file->private_data = fpriv;
305 file->f_mapping = iomem_get_mapping();
310 static int proc_bus_pci_release(struct inode *inode, struct file *file)
312 kfree(file->private_data);
313 file->private_data = NULL;
317 #endif /* HAVE_PCI_MMAP */
319 static const struct proc_ops proc_bus_pci_ops = {
320 .proc_lseek = proc_bus_pci_lseek,
321 .proc_read = proc_bus_pci_read,
322 .proc_write = proc_bus_pci_write,
323 .proc_ioctl = proc_bus_pci_ioctl,
325 .proc_compat_ioctl = proc_bus_pci_ioctl,
328 .proc_open = proc_bus_pci_open,
329 .proc_release = proc_bus_pci_release,
330 .proc_mmap = proc_bus_pci_mmap,
331 #ifdef HAVE_ARCH_PCI_GET_UNMAPPED_AREA
332 .proc_get_unmapped_area = get_pci_unmapped_area,
333 #endif /* HAVE_ARCH_PCI_GET_UNMAPPED_AREA */
334 #endif /* HAVE_PCI_MMAP */
338 static void *pci_seq_start(struct seq_file *m, loff_t *pos)
340 struct pci_dev *dev = NULL;
343 for_each_pci_dev(dev) {
350 static void *pci_seq_next(struct seq_file *m, void *v, loff_t *pos)
352 struct pci_dev *dev = v;
355 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
359 static void pci_seq_stop(struct seq_file *m, void *v)
362 struct pci_dev *dev = v;
367 static int show_device(struct seq_file *m, void *v)
369 const struct pci_dev *dev = v;
370 const struct pci_driver *drv;
376 drv = pci_dev_driver(dev);
377 seq_printf(m, "%02x%02x\t%04x%04x\t%x",
384 /* only print standard and ROM resources to preserve compatibility */
385 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
386 resource_size_t start, end;
387 pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
388 seq_printf(m, "\t%16llx",
389 (unsigned long long)(start |
390 (dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
392 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
393 resource_size_t start, end;
394 pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
395 seq_printf(m, "\t%16llx",
396 dev->resource[i].start < dev->resource[i].end ?
397 (unsigned long long)(end - start) + 1 : 0);
401 seq_puts(m, drv->name);
406 static const struct seq_operations proc_bus_pci_devices_op = {
407 .start = pci_seq_start,
408 .next = pci_seq_next,
409 .stop = pci_seq_stop,
413 static struct proc_dir_entry *proc_bus_pci_dir;
415 int pci_proc_attach_device(struct pci_dev *dev)
417 struct pci_bus *bus = dev->bus;
418 struct proc_dir_entry *e;
421 if (!proc_initialized)
425 if (pci_proc_domain(bus)) {
426 sprintf(name, "%04x:%02x", pci_domain_nr(bus),
429 sprintf(name, "%02x", bus->number);
431 bus->procdir = proc_mkdir(name, proc_bus_pci_dir);
436 sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
437 e = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir,
438 &proc_bus_pci_ops, dev);
441 proc_set_size(e, dev->cfg_size);
447 int pci_proc_detach_device(struct pci_dev *dev)
449 proc_remove(dev->procent);
454 int pci_proc_detach_bus(struct pci_bus *bus)
456 proc_remove(bus->procdir);
460 static int __init pci_proc_init(void)
462 struct pci_dev *dev = NULL;
463 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
464 proc_create_seq("devices", 0, proc_bus_pci_dir,
465 &proc_bus_pci_devices_op);
466 proc_initialized = 1;
467 for_each_pci_dev(dev)
468 pci_proc_attach_device(dev);
472 device_initcall(pci_proc_init);