1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/export.h>
7 #include <linux/of_address.h>
9 enum devm_ioremap_type {
16 void devm_ioremap_release(struct device *dev, void *res)
18 iounmap(*(void __iomem **)res);
21 static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
23 return *(void **)res == match_data;
26 static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset,
28 enum devm_ioremap_type type)
30 void __iomem **ptr, *addr = NULL;
32 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
38 addr = ioremap(offset, size);
41 addr = ioremap_uc(offset, size);
44 addr = ioremap_wc(offset, size);
47 addr = ioremap_np(offset, size);
61 * devm_ioremap - Managed ioremap()
62 * @dev: Generic device to remap IO address for
63 * @offset: Resource address to map
66 * Managed ioremap(). Map is automatically unmapped on driver detach.
68 void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
71 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP);
73 EXPORT_SYMBOL(devm_ioremap);
76 * devm_ioremap_uc - Managed ioremap_uc()
77 * @dev: Generic device to remap IO address for
78 * @offset: Resource address to map
81 * Managed ioremap_uc(). Map is automatically unmapped on driver detach.
83 void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
86 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_UC);
88 EXPORT_SYMBOL_GPL(devm_ioremap_uc);
91 * devm_ioremap_wc - Managed ioremap_wc()
92 * @dev: Generic device to remap IO address for
93 * @offset: Resource address to map
96 * Managed ioremap_wc(). Map is automatically unmapped on driver detach.
98 void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
101 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_WC);
103 EXPORT_SYMBOL(devm_ioremap_wc);
106 * devm_ioremap_np - Managed ioremap_np()
107 * @dev: Generic device to remap IO address for
108 * @offset: Resource address to map
111 * Managed ioremap_np(). Map is automatically unmapped on driver detach.
113 void __iomem *devm_ioremap_np(struct device *dev, resource_size_t offset,
114 resource_size_t size)
116 return __devm_ioremap(dev, offset, size, DEVM_IOREMAP_NP);
118 EXPORT_SYMBOL(devm_ioremap_np);
121 * devm_iounmap - Managed iounmap()
122 * @dev: Generic device to unmap for
123 * @addr: Address to unmap
125 * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
127 void devm_iounmap(struct device *dev, void __iomem *addr)
129 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
130 (__force void *)addr));
133 EXPORT_SYMBOL(devm_iounmap);
135 static void __iomem *
136 __devm_ioremap_resource(struct device *dev, const struct resource *res,
137 enum devm_ioremap_type type)
139 resource_size_t size;
140 void __iomem *dest_ptr;
145 if (!res || resource_type(res) != IORESOURCE_MEM) {
146 dev_err(dev, "invalid resource\n");
147 return IOMEM_ERR_PTR(-EINVAL);
150 if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED)
151 type = DEVM_IOREMAP_NP;
153 size = resource_size(res);
156 pretty_name = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
157 dev_name(dev), res->name);
159 pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
161 return IOMEM_ERR_PTR(-ENOMEM);
163 if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
164 dev_err(dev, "can't request region for resource %pR\n", res);
165 return IOMEM_ERR_PTR(-EBUSY);
168 dest_ptr = __devm_ioremap(dev, res->start, size, type);
170 dev_err(dev, "ioremap failed for resource %pR\n", res);
171 devm_release_mem_region(dev, res->start, size);
172 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
179 * devm_ioremap_resource() - check, request region, and ioremap resource
180 * @dev: generic device to handle the resource for
181 * @res: resource to be handled
183 * Checks that a resource is a valid memory region, requests the memory
184 * region and ioremaps it. All operations are managed and will be undone
189 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
190 * base = devm_ioremap_resource(&pdev->dev, res);
192 * return PTR_ERR(base);
194 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
197 void __iomem *devm_ioremap_resource(struct device *dev,
198 const struct resource *res)
200 return __devm_ioremap_resource(dev, res, DEVM_IOREMAP);
202 EXPORT_SYMBOL(devm_ioremap_resource);
205 * devm_ioremap_resource_wc() - write-combined variant of
206 * devm_ioremap_resource()
207 * @dev: generic device to handle the resource for
208 * @res: resource to be handled
210 * Return: a pointer to the remapped memory or an ERR_PTR() encoded error code
213 void __iomem *devm_ioremap_resource_wc(struct device *dev,
214 const struct resource *res)
216 return __devm_ioremap_resource(dev, res, DEVM_IOREMAP_WC);
220 * devm_of_iomap - Requests a resource and maps the memory mapped IO
221 * for a given device_node managed by a given device
223 * Checks that a resource is a valid memory region, requests the memory
224 * region and ioremaps it. All operations are managed and will be undone
225 * on driver detach of the device.
227 * This is to be used when a device requests/maps resources described
228 * by other device tree nodes (children or otherwise).
230 * @dev: The device "managing" the resource
231 * @node: The device-tree node where the resource resides
232 * @index: index of the MMIO range in the "reg" property
233 * @size: Returns the size of the resource (pass NULL if not needed)
237 * base = devm_of_iomap(&pdev->dev, node, 0, NULL);
239 * return PTR_ERR(base);
241 * Please Note: This is not a one-to-one replacement for of_iomap() because the
242 * of_iomap() function does not track whether the region is already mapped. If
243 * two drivers try to map the same memory, the of_iomap() function will succeed
244 * but the devm_of_iomap() function will return -EBUSY.
246 * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded
247 * error code on failure.
249 void __iomem *devm_of_iomap(struct device *dev, struct device_node *node, int index,
250 resource_size_t *size)
254 if (of_address_to_resource(node, index, &res))
255 return IOMEM_ERR_PTR(-EINVAL);
257 *size = resource_size(&res);
258 return devm_ioremap_resource(dev, &res);
260 EXPORT_SYMBOL(devm_of_iomap);
262 #ifdef CONFIG_HAS_IOPORT_MAP
264 * Generic iomap devres
266 static void devm_ioport_map_release(struct device *dev, void *res)
268 ioport_unmap(*(void __iomem **)res);
271 static int devm_ioport_map_match(struct device *dev, void *res,
274 return *(void **)res == match_data;
278 * devm_ioport_map - Managed ioport_map()
279 * @dev: Generic device to map ioport for
281 * @nr: Number of ports to map
283 * Managed ioport_map(). Map is automatically unmapped on driver
286 * Return: a pointer to the remapped memory or NULL on failure.
288 void __iomem *devm_ioport_map(struct device *dev, unsigned long port,
291 void __iomem **ptr, *addr;
293 ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
297 addr = ioport_map(port, nr);
300 devres_add(dev, ptr);
306 EXPORT_SYMBOL(devm_ioport_map);
309 * devm_ioport_unmap - Managed ioport_unmap()
310 * @dev: Generic device to unmap for
311 * @addr: Address to unmap
313 * Managed ioport_unmap(). @addr must have been mapped using
316 void devm_ioport_unmap(struct device *dev, void __iomem *addr)
319 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
320 devm_ioport_map_match, (__force void *)addr));
322 EXPORT_SYMBOL(devm_ioport_unmap);
323 #endif /* CONFIG_HAS_IOPORT_MAP */
329 #define PCIM_IOMAP_MAX PCI_STD_NUM_BARS
331 struct pcim_iomap_devres {
332 void __iomem *table[PCIM_IOMAP_MAX];
335 static void pcim_iomap_release(struct device *gendev, void *res)
337 struct pci_dev *dev = to_pci_dev(gendev);
338 struct pcim_iomap_devres *this = res;
341 for (i = 0; i < PCIM_IOMAP_MAX; i++)
343 pci_iounmap(dev, this->table[i]);
347 * pcim_iomap_table - access iomap allocation table
348 * @pdev: PCI device to access iomap table for
350 * Access iomap allocation table for @dev. If iomap table doesn't
351 * exist and @pdev is managed, it will be allocated. All iomaps
352 * recorded in the iomap table are automatically unmapped on driver
355 * This function might sleep when the table is first allocated but can
356 * be safely called without context and guaranteed to succed once
359 void __iomem * const *pcim_iomap_table(struct pci_dev *pdev)
361 struct pcim_iomap_devres *dr, *new_dr;
363 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
367 new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
370 dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
373 EXPORT_SYMBOL(pcim_iomap_table);
376 * pcim_iomap - Managed pcim_iomap()
377 * @pdev: PCI device to iomap for
379 * @maxlen: Maximum length of iomap
381 * Managed pci_iomap(). Map is automatically unmapped on driver
384 void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
388 BUG_ON(bar >= PCIM_IOMAP_MAX);
390 tbl = (void __iomem **)pcim_iomap_table(pdev);
391 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
394 tbl[bar] = pci_iomap(pdev, bar, maxlen);
397 EXPORT_SYMBOL(pcim_iomap);
400 * pcim_iounmap - Managed pci_iounmap()
401 * @pdev: PCI device to iounmap for
402 * @addr: Address to unmap
404 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
406 void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
411 pci_iounmap(pdev, addr);
413 tbl = (void __iomem **)pcim_iomap_table(pdev);
416 for (i = 0; i < PCIM_IOMAP_MAX; i++)
417 if (tbl[i] == addr) {
423 EXPORT_SYMBOL(pcim_iounmap);
426 * pcim_iomap_regions - Request and iomap PCI BARs
427 * @pdev: PCI device to map IO resources for
428 * @mask: Mask of BARs to request and iomap
429 * @name: Name used when requesting regions
431 * Request and iomap regions specified by @mask.
433 int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name)
435 void __iomem * const *iomap;
438 iomap = pcim_iomap_table(pdev);
442 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
445 if (!(mask & (1 << i)))
449 len = pci_resource_len(pdev, i);
453 rc = pci_request_region(pdev, i, name);
458 if (!pcim_iomap(pdev, i, 0))
465 pci_release_region(pdev, i);
468 if (!(mask & (1 << i)))
470 pcim_iounmap(pdev, iomap[i]);
471 pci_release_region(pdev, i);
476 EXPORT_SYMBOL(pcim_iomap_regions);
479 * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
480 * @pdev: PCI device to map IO resources for
481 * @mask: Mask of BARs to iomap
482 * @name: Name used when requesting regions
484 * Request all PCI BARs and iomap regions specified by @mask.
486 int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask,
489 int request_mask = ((1 << 6) - 1) & ~mask;
492 rc = pci_request_selected_regions(pdev, request_mask, name);
496 rc = pcim_iomap_regions(pdev, mask, name);
498 pci_release_selected_regions(pdev, request_mask);
501 EXPORT_SYMBOL(pcim_iomap_regions_request_all);
504 * pcim_iounmap_regions - Unmap and release PCI BARs
505 * @pdev: PCI device to map IO resources for
506 * @mask: Mask of BARs to unmap and release
508 * Unmap and release regions specified by @mask.
510 void pcim_iounmap_regions(struct pci_dev *pdev, int mask)
512 void __iomem * const *iomap;
515 iomap = pcim_iomap_table(pdev);
519 for (i = 0; i < PCIM_IOMAP_MAX; i++) {
520 if (!(mask & (1 << i)))
523 pcim_iounmap(pdev, iomap[i]);
524 pci_release_region(pdev, i);
527 EXPORT_SYMBOL(pcim_iounmap_regions);
528 #endif /* CONFIG_PCI */