1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
7 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
8 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
10 * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
17 #include <linux/memblock.h>
18 #include <linux/spinlock.h>
19 #include <linux/string.h>
20 #include <linux/pci.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/crash_dump.h>
23 #include <linux/memory.h>
25 #include <linux/of_address.h>
26 #include <linux/iommu.h>
27 #include <linux/rculist.h>
31 #include <asm/iommu.h>
32 #include <asm/pci-bridge.h>
33 #include <asm/machdep.h>
34 #include <asm/firmware.h>
36 #include <asm/ppc-pci.h>
38 #include <asm/mmzone.h>
39 #include <asm/plpar_wrappers.h>
44 DDW_QUERY_PE_DMA_WIN = 0,
45 DDW_CREATE_PE_DMA_WIN = 1,
46 DDW_REMOVE_PE_DMA_WIN = 2,
53 DDW_EXT_RESET_DMA_WIN = 1,
54 DDW_EXT_QUERY_OUT_SIZE = 2
57 static struct iommu_table *iommu_pseries_alloc_table(int node)
59 struct iommu_table *tbl;
61 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node);
65 INIT_LIST_HEAD_RCU(&tbl->it_group_list);
66 kref_init(&tbl->it_kref);
70 static struct iommu_table_group *iommu_pseries_alloc_group(int node)
72 struct iommu_table_group *table_group;
74 table_group = kzalloc_node(sizeof(*table_group), GFP_KERNEL, node);
78 #ifdef CONFIG_IOMMU_API
79 table_group->ops = &spapr_tce_table_group_ops;
80 table_group->pgsizes = SZ_4K;
83 table_group->tables[0] = iommu_pseries_alloc_table(node);
84 if (table_group->tables[0])
91 static void iommu_pseries_free_group(struct iommu_table_group *table_group,
92 const char *node_name)
97 #ifdef CONFIG_IOMMU_API
98 if (table_group->group) {
99 iommu_group_put(table_group->group);
100 BUG_ON(table_group->group);
104 /* Default DMA window table is at index 0, while DDW at 1. SR-IOV
105 * adapters only have table on index 1.
107 if (table_group->tables[0])
108 iommu_tce_table_put(table_group->tables[0]);
110 if (table_group->tables[1])
111 iommu_tce_table_put(table_group->tables[1]);
116 static int tce_build_pSeries(struct iommu_table *tbl, long index,
117 long npages, unsigned long uaddr,
118 enum dma_data_direction direction,
124 const unsigned long tceshift = tbl->it_page_shift;
125 const unsigned long pagesize = IOMMU_PAGE_SIZE(tbl);
127 proto_tce = TCE_PCI_READ; // Read allowed
129 if (direction != DMA_TO_DEVICE)
130 proto_tce |= TCE_PCI_WRITE;
132 tcep = ((__be64 *)tbl->it_base) + index;
135 /* can't move this out since we might cross MEMBLOCK boundary */
136 rpn = __pa(uaddr) >> tceshift;
137 *tcep = cpu_to_be64(proto_tce | rpn << tceshift);
146 static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
150 tcep = ((__be64 *)tbl->it_base) + index;
156 static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
160 tcep = ((__be64 *)tbl->it_base) + index;
162 return be64_to_cpu(*tcep);
165 static void tce_free_pSeriesLP(unsigned long liobn, long, long, long);
166 static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
168 static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
169 long npages, unsigned long uaddr,
170 enum dma_data_direction direction,
177 long tcenum_start = tcenum, npages_start = npages;
179 rpn = __pa(uaddr) >> tceshift;
180 proto_tce = TCE_PCI_READ;
181 if (direction != DMA_TO_DEVICE)
182 proto_tce |= TCE_PCI_WRITE;
185 tce = proto_tce | rpn << tceshift;
186 rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
188 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
190 tce_free_pSeriesLP(liobn, tcenum_start, tceshift,
191 (npages_start - (npages + 1)));
195 if (rc && printk_ratelimit()) {
196 printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
197 printk("\tindex = 0x%llx\n", (u64)liobn);
198 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
199 printk("\ttce val = 0x%llx\n", tce );
209 static DEFINE_PER_CPU(__be64 *, tce_page);
211 static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
212 long npages, unsigned long uaddr,
213 enum dma_data_direction direction,
221 long tcenum_start = tcenum, npages_start = npages;
224 const unsigned long tceshift = tbl->it_page_shift;
226 if ((npages == 1) || !firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
227 return tce_build_pSeriesLP(tbl->it_index, tcenum,
228 tceshift, npages, uaddr,
232 local_irq_save(flags); /* to protect tcep and the page behind it */
234 tcep = __this_cpu_read(tce_page);
236 /* This is safe to do since interrupts are off when we're called
237 * from iommu_alloc{,_sg}()
240 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
241 /* If allocation fails, fall back to the loop implementation */
243 local_irq_restore(flags);
244 return tce_build_pSeriesLP(tbl->it_index, tcenum,
246 npages, uaddr, direction, attrs);
248 __this_cpu_write(tce_page, tcep);
251 rpn = __pa(uaddr) >> tceshift;
252 proto_tce = TCE_PCI_READ;
253 if (direction != DMA_TO_DEVICE)
254 proto_tce |= TCE_PCI_WRITE;
256 /* We can map max one pageful of TCEs at a time */
259 * Set up the page with TCE data, looping through and setting
262 limit = min_t(long, npages, 4096 / TCE_ENTRY_SIZE);
264 for (l = 0; l < limit; l++) {
265 tcep[l] = cpu_to_be64(proto_tce | rpn << tceshift);
269 rc = plpar_tce_put_indirect((u64)tbl->it_index,
270 (u64)tcenum << tceshift,
276 } while (npages > 0 && !rc);
278 local_irq_restore(flags);
280 if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
282 tce_freemulti_pSeriesLP(tbl, tcenum_start,
283 (npages_start - (npages + limit)));
287 if (rc && printk_ratelimit()) {
288 printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
289 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
290 printk("\tnpages = 0x%llx\n", (u64)npages);
291 printk("\ttce[0] val = 0x%llx\n", tcep[0]);
297 static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
303 rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, 0);
305 if (rc && printk_ratelimit()) {
306 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
307 printk("\tindex = 0x%llx\n", (u64)liobn);
308 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
317 static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
321 if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
322 return tce_free_pSeriesLP(tbl->it_index, tcenum,
323 tbl->it_page_shift, npages);
325 rc = plpar_tce_stuff((u64)tbl->it_index,
326 (u64)tcenum << tbl->it_page_shift, 0, npages);
328 if (rc && printk_ratelimit()) {
329 printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
330 printk("\trc = %lld\n", rc);
331 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
332 printk("\tnpages = 0x%llx\n", (u64)npages);
337 static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
340 unsigned long tce_ret;
342 rc = plpar_tce_get((u64)tbl->it_index,
343 (u64)tcenum << tbl->it_page_shift, &tce_ret);
345 if (rc && printk_ratelimit()) {
346 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
347 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
348 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
355 /* this is compatible with cells for the device tree property */
356 struct dynamic_dma_window_prop {
357 __be32 liobn; /* tce table number */
358 __be64 dma_base; /* address hi,lo */
359 __be32 tce_shift; /* ilog2(tce_page_size) */
360 __be32 window_shift; /* ilog2(tce_window_size) */
364 struct device_node *device;
365 const struct dynamic_dma_window_prop *prop;
366 struct list_head list;
369 /* Dynamic DMA Window support */
370 struct ddw_query_response {
371 u32 windows_available;
372 u64 largest_available_block;
374 u32 migration_capable;
377 struct ddw_create_response {
383 static LIST_HEAD(dma_win_list);
384 /* prevents races between memory on/offline and window creation */
385 static DEFINE_SPINLOCK(dma_win_list_lock);
386 /* protects initializing window twice for same device */
387 static DEFINE_MUTEX(dma_win_init_mutex);
388 #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
389 #define DMA64_PROPNAME "linux,dma64-ddr-window-info"
391 static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
392 unsigned long num_pfn, const void *arg)
394 const struct dynamic_dma_window_prop *maprange = arg;
396 u64 tce_size, num_tce, dma_offset, next;
400 tce_shift = be32_to_cpu(maprange->tce_shift);
401 tce_size = 1ULL << tce_shift;
402 next = start_pfn << PAGE_SHIFT;
403 num_tce = num_pfn << PAGE_SHIFT;
405 /* round back to the beginning of the tce page size */
406 num_tce += next & (tce_size - 1);
407 next &= ~(tce_size - 1);
409 /* covert to number of tces */
410 num_tce |= tce_size - 1;
411 num_tce >>= tce_shift;
415 * Set up the page with TCE data, looping through and setting
418 limit = min_t(long, num_tce, 512);
419 dma_offset = next + be64_to_cpu(maprange->dma_base);
421 rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
424 next += limit * tce_size;
426 } while (num_tce > 0 && !rc);
431 static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
432 unsigned long num_pfn, const void *arg)
434 const struct dynamic_dma_window_prop *maprange = arg;
435 u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
441 if (!firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
442 unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
443 unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
444 be64_to_cpu(maprange->dma_base);
445 unsigned long tcenum = dmastart >> tceshift;
446 unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
447 void *uaddr = __va(start_pfn << PAGE_SHIFT);
449 return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
450 tcenum, tceshift, npages, (unsigned long) uaddr,
451 DMA_BIDIRECTIONAL, 0);
454 local_irq_disable(); /* to protect tcep and the page behind it */
455 tcep = __this_cpu_read(tce_page);
458 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
463 __this_cpu_write(tce_page, tcep);
466 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
468 liobn = (u64)be32_to_cpu(maprange->liobn);
469 tce_shift = be32_to_cpu(maprange->tce_shift);
470 tce_size = 1ULL << tce_shift;
471 next = start_pfn << PAGE_SHIFT;
472 num_tce = num_pfn << PAGE_SHIFT;
474 /* round back to the beginning of the tce page size */
475 num_tce += next & (tce_size - 1);
476 next &= ~(tce_size - 1);
478 /* covert to number of tces */
479 num_tce |= tce_size - 1;
480 num_tce >>= tce_shift;
482 /* We can map max one pageful of TCEs at a time */
485 * Set up the page with TCE data, looping through and setting
488 limit = min_t(long, num_tce, 4096 / TCE_ENTRY_SIZE);
489 dma_offset = next + be64_to_cpu(maprange->dma_base);
491 for (l = 0; l < limit; l++) {
492 tcep[l] = cpu_to_be64(proto_tce | next);
496 rc = plpar_tce_put_indirect(liobn,
502 } while (num_tce > 0 && !rc);
504 /* error cleanup: caller will clear whole range */
510 static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
511 unsigned long num_pfn, void *arg)
513 return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
516 static void iommu_table_setparms_common(struct iommu_table *tbl, unsigned long busno,
517 unsigned long liobn, unsigned long win_addr,
518 unsigned long window_size, unsigned long page_shift,
519 void *base, struct iommu_table_ops *table_ops)
521 tbl->it_busno = busno;
522 tbl->it_index = liobn;
523 tbl->it_offset = win_addr >> page_shift;
524 tbl->it_size = window_size >> page_shift;
525 tbl->it_page_shift = page_shift;
526 tbl->it_base = (unsigned long)base;
527 tbl->it_blocksize = 16;
528 tbl->it_type = TCE_PCI;
529 tbl->it_ops = table_ops;
532 struct iommu_table_ops iommu_table_pseries_ops;
534 static void iommu_table_setparms(struct pci_controller *phb,
535 struct device_node *dn,
536 struct iommu_table *tbl)
538 struct device_node *node;
539 const unsigned long *basep;
542 /* Test if we are going over 2GB of DMA space */
543 if (phb->dma_window_base_cur + phb->dma_window_size > SZ_2G) {
544 udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
545 panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
549 basep = of_get_property(node, "linux,tce-base", NULL);
550 sizep = of_get_property(node, "linux,tce-size", NULL);
551 if (basep == NULL || sizep == NULL) {
552 printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %pOF has "
553 "missing tce entries !\n", dn);
557 iommu_table_setparms_common(tbl, phb->bus->number, 0, phb->dma_window_base_cur,
558 phb->dma_window_size, IOMMU_PAGE_SHIFT_4K,
559 __va(*basep), &iommu_table_pseries_ops);
561 if (!is_kdump_kernel())
562 memset((void *)tbl->it_base, 0, *sizep);
564 phb->dma_window_base_cur += phb->dma_window_size;
567 struct iommu_table_ops iommu_table_lpar_multi_ops;
570 * iommu_table_setparms_lpar
572 * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
574 static void iommu_table_setparms_lpar(struct pci_controller *phb,
575 struct device_node *dn,
576 struct iommu_table *tbl,
577 struct iommu_table_group *table_group,
578 const __be32 *dma_window)
580 unsigned long offset, size, liobn;
582 of_parse_dma_window(dn, dma_window, &liobn, &offset, &size);
584 iommu_table_setparms_common(tbl, phb->bus->number, liobn, offset, size, IOMMU_PAGE_SHIFT_4K, NULL,
585 &iommu_table_lpar_multi_ops);
588 table_group->tce32_start = offset;
589 table_group->tce32_size = size;
592 struct iommu_table_ops iommu_table_pseries_ops = {
593 .set = tce_build_pSeries,
594 .clear = tce_free_pSeries,
595 .get = tce_get_pseries
598 static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
600 struct device_node *dn;
601 struct iommu_table *tbl;
602 struct device_node *isa_dn, *isa_dn_orig;
603 struct device_node *tmp;
607 dn = pci_bus_to_OF_node(bus);
609 pr_debug("pci_dma_bus_setup_pSeries: setting up bus %pOF\n", dn);
612 /* This is not a root bus, any setup will be done for the
613 * device-side of the bridge in iommu_dev_setup_pSeries().
619 /* Check if the ISA bus on the system is under
622 isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
624 while (isa_dn && isa_dn != dn)
625 isa_dn = isa_dn->parent;
627 of_node_put(isa_dn_orig);
629 /* Count number of direct PCI children of the PHB. */
630 for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
633 pr_debug("Children: %d\n", children);
635 /* Calculate amount of DMA window per slot. Each window must be
636 * a power of two (due to pci_alloc_consistent requirements).
638 * Keep 256MB aside for PHBs with ISA.
642 /* No ISA/IDE - just set window size and return */
643 pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
645 while (pci->phb->dma_window_size * children > 0x80000000ul)
646 pci->phb->dma_window_size >>= 1;
647 pr_debug("No ISA/IDE, window size is 0x%llx\n",
648 pci->phb->dma_window_size);
649 pci->phb->dma_window_base_cur = 0;
654 /* If we have ISA, then we probably have an IDE
655 * controller too. Allocate a 128MB table but
656 * skip the first 128MB to avoid stepping on ISA
659 pci->phb->dma_window_size = 0x8000000ul;
660 pci->phb->dma_window_base_cur = 0x8000000ul;
662 pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
663 tbl = pci->table_group->tables[0];
665 iommu_table_setparms(pci->phb, dn, tbl);
667 if (!iommu_init_table(tbl, pci->phb->node, 0, 0))
668 panic("Failed to initialize iommu table");
670 /* Divide the rest (1.75GB) among the children */
671 pci->phb->dma_window_size = 0x80000000ul;
672 while (pci->phb->dma_window_size * children > 0x70000000ul)
673 pci->phb->dma_window_size >>= 1;
675 pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
678 #ifdef CONFIG_IOMMU_API
679 static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
680 long *tce, enum dma_data_direction *direction)
683 unsigned long ioba = (unsigned long) index << tbl->it_page_shift;
684 unsigned long flags, oldtce = 0;
685 u64 proto_tce = iommu_direction_to_tce_perm(*direction);
686 unsigned long newtce = *tce | proto_tce;
688 spin_lock_irqsave(&tbl->large_pool.lock, flags);
690 rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce);
692 rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce);
695 *direction = iommu_tce_direction(oldtce);
696 *tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
699 spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
705 struct iommu_table_ops iommu_table_lpar_multi_ops = {
706 .set = tce_buildmulti_pSeriesLP,
707 #ifdef CONFIG_IOMMU_API
708 .xchg_no_kill = tce_exchange_pseries,
710 .clear = tce_freemulti_pSeriesLP,
711 .get = tce_get_pSeriesLP
715 * Find nearest ibm,dma-window (default DMA window) or direct DMA window or
716 * dynamic 64bit DMA window, walking up the device tree.
718 static struct device_node *pci_dma_find(struct device_node *dn,
719 const __be32 **dma_window)
721 const __be32 *dw = NULL;
723 for ( ; dn && PCI_DN(dn); dn = dn->parent) {
724 dw = of_get_property(dn, "ibm,dma-window", NULL);
730 dw = of_get_property(dn, DIRECT64_PROPNAME, NULL);
733 dw = of_get_property(dn, DMA64_PROPNAME, NULL);
741 static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
743 struct iommu_table *tbl;
744 struct device_node *dn, *pdn;
746 const __be32 *dma_window = NULL;
748 dn = pci_bus_to_OF_node(bus);
750 pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
753 pdn = pci_dma_find(dn, &dma_window);
755 if (dma_window == NULL)
756 pr_debug(" no ibm,dma-window property !\n");
760 pr_debug(" parent is %pOF, iommu_table: 0x%p\n",
761 pdn, ppci->table_group);
763 if (!ppci->table_group) {
764 ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
765 tbl = ppci->table_group->tables[0];
767 iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
768 ppci->table_group, dma_window);
770 if (!iommu_init_table(tbl, ppci->phb->node, 0, 0))
771 panic("Failed to initialize iommu table");
773 iommu_register_group(ppci->table_group,
774 pci_domain_nr(bus), 0);
775 pr_debug(" created table: %p\n", ppci->table_group);
780 static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
782 struct device_node *dn;
783 struct iommu_table *tbl;
785 pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
787 dn = dev->dev.of_node;
789 /* If we're the direct child of a root bus, then we need to allocate
790 * an iommu table ourselves. The bus setup code should have setup
791 * the window sizes already.
793 if (!dev->bus->self) {
794 struct pci_controller *phb = PCI_DN(dn)->phb;
796 pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
797 PCI_DN(dn)->table_group = iommu_pseries_alloc_group(phb->node);
798 tbl = PCI_DN(dn)->table_group->tables[0];
799 iommu_table_setparms(phb, dn, tbl);
801 if (!iommu_init_table(tbl, phb->node, 0, 0))
802 panic("Failed to initialize iommu table");
804 set_iommu_table_base(&dev->dev, tbl);
808 /* If this device is further down the bus tree, search upwards until
809 * an already allocated iommu table is found and use that.
812 while (dn && PCI_DN(dn) && PCI_DN(dn)->table_group == NULL)
815 if (dn && PCI_DN(dn))
816 set_iommu_table_base(&dev->dev,
817 PCI_DN(dn)->table_group->tables[0]);
819 printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
823 static int __read_mostly disable_ddw;
825 static int __init disable_ddw_setup(char *str)
828 printk(KERN_INFO "ppc iommu: disabling ddw.\n");
833 early_param("disable_ddw", disable_ddw_setup);
835 static void clean_dma_window(struct device_node *np, struct dynamic_dma_window_prop *dwp)
839 ret = tce_clearrange_multi_pSeriesLP(0,
840 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
842 pr_warn("%pOF failed to clear tces in window.\n",
845 pr_debug("%pOF successfully cleared tces in window.\n",
850 * Call only if DMA window is clean.
852 static void __remove_dma_window(struct device_node *np, u32 *ddw_avail, u64 liobn)
856 ret = rtas_call(ddw_avail[DDW_REMOVE_PE_DMA_WIN], 1, 1, NULL, liobn);
858 pr_warn("%pOF: failed to remove DMA window: rtas returned "
859 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
860 np, ret, ddw_avail[DDW_REMOVE_PE_DMA_WIN], liobn);
862 pr_debug("%pOF: successfully removed DMA window: rtas returned "
863 "%d to ibm,remove-pe-dma-window(%x) %llx\n",
864 np, ret, ddw_avail[DDW_REMOVE_PE_DMA_WIN], liobn);
867 static void remove_dma_window(struct device_node *np, u32 *ddw_avail,
868 struct property *win)
870 struct dynamic_dma_window_prop *dwp;
874 liobn = (u64)be32_to_cpu(dwp->liobn);
876 clean_dma_window(np, dwp);
877 __remove_dma_window(np, ddw_avail, liobn);
880 static int remove_ddw(struct device_node *np, bool remove_prop, const char *win_name)
882 struct property *win;
883 u32 ddw_avail[DDW_APPLICABLE_SIZE];
886 win = of_find_property(np, win_name, NULL);
890 ret = of_property_read_u32_array(np, "ibm,ddw-applicable",
891 &ddw_avail[0], DDW_APPLICABLE_SIZE);
896 if (win->length >= sizeof(struct dynamic_dma_window_prop))
897 remove_dma_window(np, ddw_avail, win);
902 ret = of_remove_property(np, win);
904 pr_warn("%pOF: failed to remove DMA window property: %d\n",
909 static bool find_existing_ddw(struct device_node *pdn, u64 *dma_addr, int *window_shift)
911 struct dma_win *window;
912 const struct dynamic_dma_window_prop *dma64;
915 spin_lock(&dma_win_list_lock);
916 /* check if we already created a window and dupe that config if so */
917 list_for_each_entry(window, &dma_win_list, list) {
918 if (window->device == pdn) {
919 dma64 = window->prop;
920 *dma_addr = be64_to_cpu(dma64->dma_base);
921 *window_shift = be32_to_cpu(dma64->window_shift);
926 spin_unlock(&dma_win_list_lock);
931 static struct dma_win *ddw_list_new_entry(struct device_node *pdn,
932 const struct dynamic_dma_window_prop *dma64)
934 struct dma_win *window;
936 window = kzalloc(sizeof(*window), GFP_KERNEL);
940 window->device = pdn;
941 window->prop = dma64;
946 static void find_existing_ddw_windows_named(const char *name)
949 struct device_node *pdn;
950 struct dma_win *window;
951 const struct dynamic_dma_window_prop *dma64;
953 for_each_node_with_property(pdn, name) {
954 dma64 = of_get_property(pdn, name, &len);
955 if (!dma64 || len < sizeof(*dma64)) {
956 remove_ddw(pdn, true, name);
960 window = ddw_list_new_entry(pdn, dma64);
966 spin_lock(&dma_win_list_lock);
967 list_add(&window->list, &dma_win_list);
968 spin_unlock(&dma_win_list_lock);
972 static int find_existing_ddw_windows(void)
974 if (!firmware_has_feature(FW_FEATURE_LPAR))
977 find_existing_ddw_windows_named(DIRECT64_PROPNAME);
978 find_existing_ddw_windows_named(DMA64_PROPNAME);
982 machine_arch_initcall(pseries, find_existing_ddw_windows);
985 * ddw_read_ext - Get the value of an DDW extension
986 * @np: device node from which the extension value is to be read.
987 * @extnum: index number of the extension.
988 * @value: pointer to return value, modified when extension is available.
990 * Checks if "ibm,ddw-extensions" exists for this node, and get the value
992 * It can be used only to check if a property exists, passing value == NULL.
995 * 0 if extension successfully read
996 * -EINVAL if the "ibm,ddw-extensions" does not exist,
997 * -ENODATA if "ibm,ddw-extensions" does not have a value, and
998 * -EOVERFLOW if "ibm,ddw-extensions" does not contain this extension.
1000 static inline int ddw_read_ext(const struct device_node *np, int extnum,
1003 static const char propname[] = "ibm,ddw-extensions";
1007 ret = of_property_read_u32_index(np, propname, DDW_EXT_SIZE, &count);
1017 return of_property_read_u32_index(np, propname, extnum, value);
1020 static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
1021 struct ddw_query_response *query,
1022 struct device_node *parent)
1024 struct device_node *dn;
1026 u32 cfg_addr, ext_query, query_out[5];
1031 * From LoPAR level 2.8, "ibm,ddw-extensions" index 3 can rule how many
1032 * output parameters ibm,query-pe-dma-windows will have, ranging from
1035 ret = ddw_read_ext(parent, DDW_EXT_QUERY_OUT_SIZE, &ext_query);
1036 if (!ret && ext_query == 1)
1042 * Get the config address and phb buid of the PE window.
1043 * Rely on eeh to retrieve this for us.
1044 * Retrieve them from the pci device, not the node with the
1045 * dma-window property
1047 dn = pci_device_to_OF_node(dev);
1049 buid = pdn->phb->buid;
1050 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
1052 ret = rtas_call(ddw_avail[DDW_QUERY_PE_DMA_WIN], 3, out_sz, query_out,
1053 cfg_addr, BUID_HI(buid), BUID_LO(buid));
1057 query->windows_available = query_out[0];
1058 query->largest_available_block = query_out[1];
1059 query->page_size = query_out[2];
1060 query->migration_capable = query_out[3];
1063 query->windows_available = query_out[0];
1064 query->largest_available_block = ((u64)query_out[1] << 32) |
1066 query->page_size = query_out[3];
1067 query->migration_capable = query_out[4];
1071 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x returned %d, lb=%llx ps=%x wn=%d\n",
1072 ddw_avail[DDW_QUERY_PE_DMA_WIN], cfg_addr, BUID_HI(buid),
1073 BUID_LO(buid), ret, query->largest_available_block,
1074 query->page_size, query->windows_available);
1079 static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
1080 struct ddw_create_response *create, int page_shift,
1083 struct device_node *dn;
1090 * Get the config address and phb buid of the PE window.
1091 * Rely on eeh to retrieve this for us.
1092 * Retrieve them from the pci device, not the node with the
1093 * dma-window property
1095 dn = pci_device_to_OF_node(dev);
1097 buid = pdn->phb->buid;
1098 cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
1101 /* extra outputs are LIOBN and dma-addr (hi, lo) */
1102 ret = rtas_call(ddw_avail[DDW_CREATE_PE_DMA_WIN], 5, 4,
1103 (u32 *)create, cfg_addr, BUID_HI(buid),
1104 BUID_LO(buid), page_shift, window_shift);
1105 } while (rtas_busy_delay(ret));
1107 "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
1108 "(liobn = 0x%x starting addr = %x %x)\n",
1109 ddw_avail[DDW_CREATE_PE_DMA_WIN], cfg_addr, BUID_HI(buid),
1110 BUID_LO(buid), page_shift, window_shift, ret, create->liobn,
1111 create->addr_hi, create->addr_lo);
1116 struct failed_ddw_pdn {
1117 struct device_node *pdn;
1118 struct list_head list;
1121 static LIST_HEAD(failed_ddw_pdn_list);
1123 static phys_addr_t ddw_memory_hotplug_max(void)
1125 resource_size_t max_addr = memory_hotplug_max();
1126 struct device_node *memory;
1128 for_each_node_by_type(memory, "memory") {
1129 struct resource res;
1131 if (of_address_to_resource(memory, 0, &res))
1134 max_addr = max_t(resource_size_t, max_addr, res.end + 1);
1141 * Platforms supporting the DDW option starting with LoPAR level 2.7 implement
1142 * ibm,ddw-extensions, which carries the rtas token for
1143 * ibm,reset-pe-dma-windows.
1144 * That rtas-call can be used to restore the default DMA window for the device.
1146 static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
1149 u32 cfg_addr, reset_dma_win;
1151 struct device_node *dn;
1154 ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win);
1158 dn = pci_device_to_OF_node(dev);
1160 buid = pdn->phb->buid;
1161 cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8);
1163 ret = rtas_call(reset_dma_win, 3, 1, NULL, cfg_addr, BUID_HI(buid),
1167 "ibm,reset-pe-dma-windows(%x) %x %x %x returned %d ",
1168 reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid),
1172 /* Return largest page shift based on "IO Page Sizes" output of ibm,query-pe-dma-window. */
1173 static int iommu_get_page_shift(u32 query_page_size)
1175 /* Supported IO page-sizes according to LoPAR, note that 2M is out of order */
1176 const int shift[] = {
1177 __builtin_ctzll(SZ_4K), __builtin_ctzll(SZ_64K), __builtin_ctzll(SZ_16M),
1178 __builtin_ctzll(SZ_32M), __builtin_ctzll(SZ_64M), __builtin_ctzll(SZ_128M),
1179 __builtin_ctzll(SZ_256M), __builtin_ctzll(SZ_16G), __builtin_ctzll(SZ_2M)
1182 int i = ARRAY_SIZE(shift) - 1;
1186 * On LoPAR, ibm,query-pe-dma-window outputs "IO Page Sizes" using a bit field:
1187 * - bit 31 means 4k pages are supported,
1188 * - bit 30 means 64k pages are supported, and so on.
1189 * Larger pagesizes map more memory with the same amount of TCEs, so start probing them.
1191 for (; i >= 0 ; i--) {
1192 if (query_page_size & (1 << i))
1193 ret = max(ret, shift[i]);
1199 static struct property *ddw_property_create(const char *propname, u32 liobn, u64 dma_addr,
1200 u32 page_shift, u32 window_shift)
1202 struct dynamic_dma_window_prop *ddwprop;
1203 struct property *win64;
1205 win64 = kzalloc(sizeof(*win64), GFP_KERNEL);
1209 win64->name = kstrdup(propname, GFP_KERNEL);
1210 ddwprop = kzalloc(sizeof(*ddwprop), GFP_KERNEL);
1211 win64->value = ddwprop;
1212 win64->length = sizeof(*ddwprop);
1213 if (!win64->name || !win64->value) {
1215 kfree(win64->value);
1220 ddwprop->liobn = cpu_to_be32(liobn);
1221 ddwprop->dma_base = cpu_to_be64(dma_addr);
1222 ddwprop->tce_shift = cpu_to_be32(page_shift);
1223 ddwprop->window_shift = cpu_to_be32(window_shift);
1229 * If the PE supports dynamic dma windows, and there is space for a table
1230 * that can map all pages in a linear offset, then setup such a table,
1231 * and record the dma-offset in the struct device.
1233 * dev: the pci device we are checking
1234 * pdn: the parent pe node with the ibm,dma_window property
1235 * Future: also check if we can remap the base window for our base page size
1237 * returns true if can map all pages (direct mapping), false otherwise..
1239 static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1242 int max_ram_len = order_base_2(ddw_memory_hotplug_max());
1243 struct ddw_query_response query;
1244 struct ddw_create_response create;
1247 const char *win_name;
1248 struct device_node *dn;
1249 u32 ddw_avail[DDW_APPLICABLE_SIZE];
1250 struct dma_win *window;
1251 struct property *win64;
1252 struct failed_ddw_pdn *fpdn;
1253 bool default_win_removed = false, direct_mapping = false;
1255 struct pci_dn *pci = PCI_DN(pdn);
1256 struct property *default_win = NULL;
1258 dn = of_find_node_by_type(NULL, "ibm,pmemory");
1259 pmem_present = dn != NULL;
1262 mutex_lock(&dma_win_init_mutex);
1264 if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) {
1265 direct_mapping = (len >= max_ram_len);
1270 * If we already went through this for a previous function of
1271 * the same device and failed, we don't want to muck with the
1272 * DMA window again, as it will race with in-flight operations
1273 * and can lead to EEHs. The above mutex protects access to the
1276 list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
1277 if (fpdn->pdn == pdn)
1282 * the ibm,ddw-applicable property holds the tokens for:
1283 * ibm,query-pe-dma-window
1284 * ibm,create-pe-dma-window
1285 * ibm,remove-pe-dma-window
1286 * for the given node in that order.
1287 * the property is actually in the parent, not the PE
1289 ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable",
1290 &ddw_avail[0], DDW_APPLICABLE_SIZE);
1295 * Query if there is a second window of size to map the
1296 * whole partition. Query returns number of windows, largest
1297 * block assigned to PE (partition endpoint), and two bitmasks
1298 * of page sizes: supported and supported for migrate-dma.
1300 dn = pci_device_to_OF_node(dev);
1301 ret = query_ddw(dev, ddw_avail, &query, pdn);
1306 * If there is no window available, remove the default DMA window,
1307 * if it's present. This will make all the resources available to the
1309 * If anything fails after this, we need to restore it, so also check
1310 * for extensions presence.
1312 if (query.windows_available == 0) {
1315 /* DDW + IOMMU on single window may fail if there is any allocation */
1316 if (iommu_table_in_use(pci->table_group->tables[0])) {
1317 dev_warn(&dev->dev, "current IOMMU table in use, can't be replaced.\n");
1321 default_win = of_find_property(pdn, "ibm,dma-window", NULL);
1325 reset_win_ext = ddw_read_ext(pdn, DDW_EXT_RESET_DMA_WIN, NULL);
1329 remove_dma_window(pdn, ddw_avail, default_win);
1330 default_win_removed = true;
1332 /* Query again, to check if the window is available */
1333 ret = query_ddw(dev, ddw_avail, &query, pdn);
1337 if (query.windows_available == 0) {
1338 /* no windows are available for this device. */
1339 dev_dbg(&dev->dev, "no free dynamic windows");
1344 page_shift = iommu_get_page_shift(query.page_size);
1346 dev_dbg(&dev->dev, "no supported page size in mask %x",
1353 * The "ibm,pmemory" can appear anywhere in the address space.
1354 * Assuming it is still backed by page structs, try MAX_PHYSMEM_BITS
1355 * for the upper limit and fallback to max RAM otherwise but this
1356 * disables device::dma_ops_bypass.
1360 if (query.largest_available_block >=
1361 (1ULL << (MAX_PHYSMEM_BITS - page_shift)))
1362 len = MAX_PHYSMEM_BITS;
1364 dev_info(&dev->dev, "Skipping ibm,pmemory");
1367 /* check if the available block * number of ptes will map everything */
1368 if (query.largest_available_block < (1ULL << (len - page_shift))) {
1370 "can't map partition max 0x%llx with %llu %llu-sized pages\n",
1372 query.largest_available_block,
1373 1ULL << page_shift);
1375 len = order_base_2(query.largest_available_block << page_shift);
1376 win_name = DMA64_PROPNAME;
1378 direct_mapping = !default_win_removed ||
1379 (len == MAX_PHYSMEM_BITS) ||
1380 (!pmem_present && (len == max_ram_len));
1381 win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME;
1384 ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
1388 dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %pOF\n",
1391 win_addr = ((u64)create.addr_hi << 32) | create.addr_lo;
1392 win64 = ddw_property_create(win_name, create.liobn, win_addr, page_shift, len);
1396 "couldn't allocate property, property name, or value\n");
1397 goto out_remove_win;
1400 ret = of_add_property(pdn, win64);
1402 dev_err(&dev->dev, "unable to add DMA window property for %pOF: %d",
1407 window = ddw_list_new_entry(pdn, win64->value);
1411 if (direct_mapping) {
1412 /* DDW maps the whole partition, so enable direct DMA mapping */
1413 ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
1414 win64->value, tce_setrange_multi_pSeriesLP_walk);
1416 dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
1419 /* Make sure to clean DDW if any TCE was set*/
1420 clean_dma_window(pdn, win64->value);
1424 struct iommu_table *newtbl;
1426 unsigned long start = 0, end = 0;
1428 for (i = 0; i < ARRAY_SIZE(pci->phb->mem_resources); i++) {
1429 const unsigned long mask = IORESOURCE_MEM_64 | IORESOURCE_MEM;
1431 /* Look for MMIO32 */
1432 if ((pci->phb->mem_resources[i].flags & mask) == IORESOURCE_MEM) {
1433 start = pci->phb->mem_resources[i].start;
1434 end = pci->phb->mem_resources[i].end;
1439 /* New table for using DDW instead of the default DMA window */
1440 newtbl = iommu_pseries_alloc_table(pci->phb->node);
1442 dev_dbg(&dev->dev, "couldn't create new IOMMU table\n");
1446 iommu_table_setparms_common(newtbl, pci->phb->bus->number, create.liobn, win_addr,
1447 1UL << len, page_shift, NULL, &iommu_table_lpar_multi_ops);
1448 iommu_init_table(newtbl, pci->phb->node, start, end);
1450 pci->table_group->tables[1] = newtbl;
1452 set_iommu_table_base(&dev->dev, newtbl);
1455 if (default_win_removed) {
1456 iommu_tce_table_put(pci->table_group->tables[0]);
1457 pci->table_group->tables[0] = NULL;
1459 /* default_win is valid here because default_win_removed == true */
1460 of_remove_property(pdn, default_win);
1461 dev_info(&dev->dev, "Removed default DMA window for %pOF\n", pdn);
1464 spin_lock(&dma_win_list_lock);
1465 list_add(&window->list, &dma_win_list);
1466 spin_unlock(&dma_win_list_lock);
1468 dev->dev.archdata.dma_offset = win_addr;
1475 of_remove_property(pdn, win64);
1479 kfree(win64->value);
1483 /* DDW is clean, so it's ok to call this directly. */
1484 __remove_dma_window(pdn, ddw_avail, create.liobn);
1487 if (default_win_removed)
1488 reset_dma_window(dev, pdn);
1490 fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
1494 list_add(&fpdn->list, &failed_ddw_pdn_list);
1497 mutex_unlock(&dma_win_init_mutex);
1500 * If we have persistent memory and the window size is only as big
1501 * as RAM, then we failed to create a window to cover persistent
1502 * memory and need to set the DMA limit.
1504 if (pmem_present && direct_mapping && len == max_ram_len)
1505 dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len);
1507 return direct_mapping;
1510 static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
1512 struct device_node *pdn, *dn;
1513 struct iommu_table *tbl;
1514 const __be32 *dma_window = NULL;
1517 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
1519 /* dev setup for LPAR is a little tricky, since the device tree might
1520 * contain the dma-window properties per-device and not necessarily
1521 * for the bus. So we need to search upwards in the tree until we
1522 * either hit a dma-window property, OR find a parent with a table
1523 * already allocated.
1525 dn = pci_device_to_OF_node(dev);
1526 pr_debug(" node is %pOF\n", dn);
1528 pdn = pci_dma_find(dn, &dma_window);
1529 if (!pdn || !PCI_DN(pdn)) {
1530 printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
1531 "no DMA window found for pci dev=%s dn=%pOF\n",
1535 pr_debug(" parent is %pOF\n", pdn);
1538 if (!pci->table_group) {
1539 pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
1540 tbl = pci->table_group->tables[0];
1541 iommu_table_setparms_lpar(pci->phb, pdn, tbl,
1542 pci->table_group, dma_window);
1544 iommu_init_table(tbl, pci->phb->node, 0, 0);
1545 iommu_register_group(pci->table_group,
1546 pci_domain_nr(pci->phb->bus), 0);
1547 pr_debug(" created table: %p\n", pci->table_group);
1549 pr_debug(" found DMA window, table: %p\n", pci->table_group);
1552 set_iommu_table_base(&dev->dev, pci->table_group->tables[0]);
1553 iommu_add_device(pci->table_group, &dev->dev);
1556 static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
1558 struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
1560 /* only attempt to use a new window if 64-bit DMA is requested */
1561 if (dma_mask < DMA_BIT_MASK(64))
1564 dev_dbg(&pdev->dev, "node is %pOF\n", dn);
1567 * the device tree might contain the dma-window properties
1568 * per-device and not necessarily for the bus. So we need to
1569 * search upwards in the tree until we either hit a dma-window
1570 * property, OR find a parent with a table already allocated.
1572 pdn = pci_dma_find(dn, NULL);
1573 if (pdn && PCI_DN(pdn))
1574 return enable_ddw(pdev, pdn);
1579 static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
1582 struct dma_win *window;
1583 struct memory_notify *arg = data;
1587 case MEM_GOING_ONLINE:
1588 spin_lock(&dma_win_list_lock);
1589 list_for_each_entry(window, &dma_win_list, list) {
1590 ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
1591 arg->nr_pages, window->prop);
1594 spin_unlock(&dma_win_list_lock);
1596 case MEM_CANCEL_ONLINE:
1598 spin_lock(&dma_win_list_lock);
1599 list_for_each_entry(window, &dma_win_list, list) {
1600 ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
1601 arg->nr_pages, window->prop);
1604 spin_unlock(&dma_win_list_lock);
1609 if (ret && action != MEM_CANCEL_ONLINE)
1615 static struct notifier_block iommu_mem_nb = {
1616 .notifier_call = iommu_mem_notifier,
1619 static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
1621 int err = NOTIFY_OK;
1622 struct of_reconfig_data *rd = data;
1623 struct device_node *np = rd->dn;
1624 struct pci_dn *pci = PCI_DN(np);
1625 struct dma_win *window;
1628 case OF_RECONFIG_DETACH_NODE:
1630 * Removing the property will invoke the reconfig
1631 * notifier again, which causes dead-lock on the
1632 * read-write semaphore of the notifier chain. So
1633 * we have to remove the property when releasing
1636 if (remove_ddw(np, false, DIRECT64_PROPNAME))
1637 remove_ddw(np, false, DMA64_PROPNAME);
1639 if (pci && pci->table_group)
1640 iommu_pseries_free_group(pci->table_group,
1643 spin_lock(&dma_win_list_lock);
1644 list_for_each_entry(window, &dma_win_list, list) {
1645 if (window->device == np) {
1646 list_del(&window->list);
1651 spin_unlock(&dma_win_list_lock);
1660 static struct notifier_block iommu_reconfig_nb = {
1661 .notifier_call = iommu_reconfig_notifier,
1664 /* These are called very early. */
1665 void __init iommu_init_early_pSeries(void)
1667 if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
1670 if (firmware_has_feature(FW_FEATURE_LPAR)) {
1671 pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
1672 pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
1674 pseries_pci_controller_ops.iommu_bypass_supported =
1675 iommu_bypass_supported_pSeriesLP;
1677 pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
1678 pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
1682 of_reconfig_notifier_register(&iommu_reconfig_nb);
1683 register_memory_notifier(&iommu_mem_nb);
1685 set_pci_dma_ops(&dma_iommu_ops);
1688 static int __init disable_multitce(char *str)
1690 if (strcmp(str, "off") == 0 &&
1691 firmware_has_feature(FW_FEATURE_LPAR) &&
1692 (firmware_has_feature(FW_FEATURE_PUT_TCE_IND) ||
1693 firmware_has_feature(FW_FEATURE_STUFF_TCE))) {
1694 printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
1695 powerpc_firmware_features &=
1696 ~(FW_FEATURE_PUT_TCE_IND | FW_FEATURE_STUFF_TCE);
1701 __setup("multitce=", disable_multitce);
1703 #ifdef CONFIG_SPAPR_TCE_IOMMU
1704 struct iommu_group *pSeries_pci_device_group(struct pci_controller *hose,
1705 struct pci_dev *pdev)
1707 struct device_node *pdn, *dn = pdev->dev.of_node;
1708 struct iommu_group *grp;
1711 pdn = pci_dma_find(dn, NULL);
1712 if (!pdn || !PCI_DN(pdn))
1713 return ERR_PTR(-ENODEV);
1716 if (!pci->table_group)
1717 return ERR_PTR(-ENODEV);
1719 grp = pci->table_group->group;
1721 return ERR_PTR(-ENODEV);
1723 return iommu_group_ref_get(grp);