From: Joerg Roedel Date: Fri, 1 Sep 2017 09:31:42 +0000 (+0200) Subject: Merge branches 'arm/exynos', 'arm/renesas', 'arm/rockchip', 'arm/omap', 'arm/mediatek... X-Git-Tag: v4.14-rc1~72^2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=47b59d8e40850a05370ee9198ea5e505d89489f1;p=platform%2Fkernel%2Flinux-exynos.git Merge branches 'arm/exynos', 'arm/renesas', 'arm/rockchip', 'arm/omap', 'arm/mediatek', 'arm/tegra', 'arm/qcom', 'arm/smmu', 'ppc/pamu', 'x86/vt-d', 'x86/amd', 's390' and 'core' into next --- 47b59d8e40850a05370ee9198ea5e505d89489f1 diff --cc MAINTAINERS index 1c3feff,f66488d,f66488d,f66488d,f66488d,f66488d,6f7721d,c7a6ac0,6f7721d,6f7721d,f7d568b,6f7721d,6f7721d,f66488d..48ebccb --- a/MAINTAINERS +++ b/MAINTAINERS @@@@@@@@@@@@@@@ -1156,18 -1156,18 -1156,18 -1156,18 -1156,18 -1156,18 -1156,18 -1156,18 -1156,18 -1156,18 -1117,6 -1156,18 -1156,18 -1156,18 +1156,18 @@@@@@@@@@@@@@@ F: drivers/clk/axi F: drivers/pinctrl/pinctrl-artpec* F: Documentation/devicetree/bindings/pinctrl/axis,artpec6-pinctrl.txt + ARM/ASPEED I2C DRIVER + M: Brendan Higgins + R: Benjamin Herrenschmidt + R: Joel Stanley + L: linux-i2c@vger.kernel.org ----- -L: openbmc@lists.ozlabs.org +++++ + +L: openbmc@lists.ozlabs.org (moderated for non-subscribers) + S: Maintained + F: drivers/irqchip/irq-aspeed-i2c-ic.c + F: drivers/i2c/busses/i2c-aspeed.c + F: Documentation/devicetree/bindings/interrupt-controller/aspeed,ast2400-i2c-ic.txt + F: Documentation/devicetree/bindings/i2c/i2c-aspeed.txt + ARM/ASPEED MACHINE SUPPORT M: Joel Stanley S: Maintained @@@@@@@@@@@@@@@ -5834,12 -5826,12 -5826,12 -5826,12 -5826,12 -5826,12 -5834,12 -5834,12 -5834,12 -5834,12 -5748,11 -5834,12 -5834,12 -5826,12 +5834,12 @@@@@@@@@@@@@@@ F: drivers/staging/greybus/spi. F: drivers/staging/greybus/spilib.c F: drivers/staging/greybus/spilib.h ----- -GREYBUS LOOBACK/TIME PROTOCOLS DRIVERS - GREYBUS PROTOCOLS DRIVERS - M: David Lin +++++ + +GREYBUS LOOPBACK/TIME PROTOCOLS DRIVERS + M: Bryan O'Donoghue S: Maintained - F: drivers/staging/greybus/uart.c - F: drivers/staging/greybus/log.c + F: drivers/staging/greybus/loopback.c + F: drivers/staging/greybus/timesync.c + F: drivers/staging/greybus/timesync_platform.c GREYBUS PLATFORM DRIVERS M: Vaibhav Hiremath @@@@@@@@@@@@@@@ -10940,14 -10933,14 -10933,14 -10933,14 -10933,14 -10933,14 -10941,14 -10941,21 -10941,14 -10941,14 -10581,6 -10941,14 -10941,14 -10933,14 +10940,21 @@@@@@@@@@@@@@@ T: git git://git.kernel.org/pub/scm/lin S: Supported F: arch/hexagon/ +++++++ ++++++QUALCOMM IOMMU +++++++ ++++++M: Rob Clark +++++++ ++++++L: iommu@lists.linux-foundation.org +++++++ ++++++L: linux-arm-msm@vger.kernel.org +++++++ ++++++S: Maintained +++++++ ++++++F: drivers/iommu/qcom_iommu.c +++++++ ++++++ + QUALCOMM VENUS VIDEO ACCELERATOR DRIVER + M: Stanimir Varbanov + L: linux-media@vger.kernel.org + L: linux-arm-msm@vger.kernel.org + T: git git://linuxtv.org/media_tree.git + S: Maintained + F: drivers/media/platform/qcom/venus/ + QUALCOMM WCN36XX WIRELESS DRIVER M: Eugene Krasnikov L: wcn36xx@lists.infradead.org @@@@@@@@@@@@@@@ -14003,18 -13996,17 -13996,17 -13996,17 -13996,17 -13996,17 -14004,18 -14011,18 -14004,18 -14004,18 -13565,9 -14004,18 -14004,18 -13996,17 +14010,18 @@@@@@@@@@@@@@@ F: drivers/block/virtio_blk. F: include/linux/virtio*.h F: include/uapi/linux/virtio_*.h F: drivers/crypto/virtio/ +++++ + +F: mm/balloon_compaction.c + + VIRTIO CRYPTO DRIVER + M: Gonglei + L: virtualization@lists.linux-foundation.org + L: linux-crypto@vger.kernel.org + S: Maintained + F: drivers/crypto/virtio/ + F: include/uapi/linux/virtio_crypto.h VIRTIO DRIVERS FOR S390 - M: Cornelia Huck + M: Cornelia Huck M: Halil Pasic L: linux-s390@vger.kernel.org L: virtualization@lists.linux-foundation.org diff --cc drivers/iommu/amd_iommu.c index 354cbd6,688e775,688e775,688e775,688e775,354cbd6,5a61cf5,354cbd6,354cbd6,354cbd6,63cacf5,b531307,354cbd6,7798fcf..538c16f --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@@@@@@@@@@@@@@ -103,29 -103,29 -103,29 -103,29 -103,29 -103,29 -103,29 -103,29 -103,29 -103,29 -120,27 -103,6 -103,29 -103,29 +103,6 @@@@@@@@@@@@@@@ int amd_iommu_max_glx_val = -1 static const struct dma_map_ops amd_iommu_dma_ops; /* ----------- -- * This struct contains device specific data for the IOMMU ----------- -- */ ----------- --struct iommu_dev_data { ----------- -- struct list_head list; /* For domain->dev_list */ ----------- -- struct list_head dev_data_list; /* For global dev_data_list */ ----------- -- struct protection_domain *domain; /* Domain the device is bound to */ ----------- -- u16 devid; /* PCI Device ID */ ----------- -- u16 alias; /* Alias Device ID */ ----------- -- bool iommu_v2; /* Device can make use of IOMMUv2 */ ----------- -- bool passthrough; /* Device is identity mapped */ ----------- -- struct { ----------- -- bool enabled; ----------- -- int qdep; ----------- -- } ats; /* ATS state */ ----------- -- bool pri_tlp; /* PASID TLB required for ----------- -- PPR completions */ ----------- -- u32 errata; /* Bitmap for errata to apply */ ----------- -- bool use_vapic; /* Enable device to use vapic mode */ ---------- -- ---------- -- struct ratelimit_state rs; /* Ratelimit IOPF messages */ ----------- --}; ----------- -- ----------- --/* * general struct to manage commands send to an IOMMU */ struct iommu_cmd { @@@@@@@@@@@@@@@ -137,20 -137,20 -137,20 -137,20 -137,20 -137,20 -137,7 -137,20 -137,20 -137,20 -152,6 -114,20 -137,20 -137,7 +114,7 @@@@@@@@@@@@@@@ struct kmem_cache *amd_iommu_irq_cache static void update_domain(struct protection_domain *domain); static int protection_domain_init(struct protection_domain *domain); static void detach_device(struct device *dev); ------ --- -- ------ --- -- #define FLUSH_QUEUE_SIZE 256 ------ --- -- ------ --- -- struct flush_queue_entry { ------ --- -- unsigned long iova_pfn; ------ --- -- unsigned long pages; ------ --- -- u64 counter; /* Flush counter when this entry was added to the queue */ ------ --- -- }; ------ --- -- ------ --- -- struct flush_queue { ------ --- -- struct flush_queue_entry *entries; ------ --- -- unsigned head, tail; ------ --- -- spinlock_t lock; ------ --- -- }; ++++++ ++++++ static void iova_domain_flush_tlb(struct iova_domain *iovad); /* * Data container for a dma_ops specific protection domain @@@@@@@@@@@@@@@ -1788,180 -1788,180 -1788,180 -1788,180 -1788,180 -1788,180 -1745,21 -1788,180 -1788,180 -1788,180 -1733,6 -1771,180 -1788,180 -1745,21 +1728,21 @@@@@@@@@@@@@@@ static void free_gcr3_table(struct prot free_page((unsigned long)domain->gcr3_tbl); } ------ --- -- static void dma_ops_domain_free_flush_queue(struct dma_ops_domain *dom) ------ --- -- { ------ --- -- int cpu; ------ --- -- ------ --- -- for_each_possible_cpu(cpu) { ------ --- -- struct flush_queue *queue; ------ --- -- ------ --- -- queue = per_cpu_ptr(dom->flush_queue, cpu); ------ --- -- kfree(queue->entries); ------ --- -- } ------ --- -- ------ --- -- free_percpu(dom->flush_queue); ------ --- -- ------ --- -- dom->flush_queue = NULL; ------ --- -- } ------ --- -- ------ --- -- static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom) ------ --- -- { ------ --- -- int cpu; ------ --- -- ------ --- -- atomic64_set(&dom->flush_start_cnt, 0); ------ --- -- atomic64_set(&dom->flush_finish_cnt, 0); ------ --- -- ------ --- -- dom->flush_queue = alloc_percpu(struct flush_queue); ------ --- -- if (!dom->flush_queue) ------ --- -- return -ENOMEM; ------ --- -- ------ --- -- /* First make sure everything is cleared */ ------ --- -- for_each_possible_cpu(cpu) { ------ --- -- struct flush_queue *queue; ------ --- -- ------ --- -- queue = per_cpu_ptr(dom->flush_queue, cpu); ------ --- -- queue->head = 0; ------ --- -- queue->tail = 0; ------ --- -- queue->entries = NULL; ------ --- -- } ------ --- -- ------ --- -- /* Now start doing the allocation */ ------ --- -- for_each_possible_cpu(cpu) { ------ --- -- struct flush_queue *queue; ------ --- -- ------ --- -- queue = per_cpu_ptr(dom->flush_queue, cpu); ------ --- -- queue->entries = kzalloc(FLUSH_QUEUE_SIZE * sizeof(*queue->entries), ------ --- -- GFP_KERNEL); ------ --- -- if (!queue->entries) { ------ --- -- dma_ops_domain_free_flush_queue(dom); ------ --- -- return -ENOMEM; ------ --- -- } ------ --- -- ------ --- -- spin_lock_init(&queue->lock); ------ --- -- } ------ --- -- ------ --- -- return 0; ------ --- -- } ------ --- -- + static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom) + { ------ --- -- atomic64_inc(&dom->flush_start_cnt); + domain_flush_tlb(&dom->domain); + domain_flush_complete(&dom->domain); ------ --- -- atomic64_inc(&dom->flush_finish_cnt); ------ --- - } ------ --- - ------ --- - static inline bool queue_ring_full(struct flush_queue *queue) ------ --- - { ------ --- - assert_spin_locked(&queue->lock); ------ --- - ------ --- - return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head); ---- } ---- ---- #define queue_ring_for_each(i, q) \ ---- for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE) ---- ---- static inline unsigned queue_ring_add(struct flush_queue *queue) ---- { ---- unsigned idx = queue->tail; ---- ---- assert_spin_locked(&queue->lock); ---- queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE; ---- ---- return idx; ---- } ---- ---- static inline void queue_ring_remove_head(struct flush_queue *queue) ---- { ---- assert_spin_locked(&queue->lock); ---- queue->head = (queue->head + 1) % FLUSH_QUEUE_SIZE; ---- } ---- ---- static void queue_ring_free_flushed(struct dma_ops_domain *dom, ---- struct flush_queue *queue) ---- { ---- u64 counter = atomic64_read(&dom->flush_finish_cnt); ---- int idx; ---- ---- queue_ring_for_each(idx, queue) { ---- /* ---- * This assumes that counter values in the ring-buffer are ---- * monotonously rising. ---- */ ---- if (queue->entries[idx].counter >= counter) ---- break; ---- ---- free_iova_fast(&dom->iovad, ---- queue->entries[idx].iova_pfn, ---- queue->entries[idx].pages); ---- ---- queue_ring_remove_head(queue); ---- } ---- } ---- ---- static void queue_add(struct dma_ops_domain *dom, ---- unsigned long address, unsigned long pages) ---- { ---- struct flush_queue *queue; ---- unsigned long flags; ---- int idx; ---- ---- pages = __roundup_pow_of_two(pages); ---- address >>= PAGE_SHIFT; ---- ---- queue = get_cpu_ptr(dom->flush_queue); ---- spin_lock_irqsave(&queue->lock, flags); ---- ---- /* ---- * First remove the enries from the ring-buffer that are already ---- * flushed to make the below queue_ring_full() check less likely ---- */ ---- queue_ring_free_flushed(dom, queue); ---- ---- /* ---- * When ring-queue is full, flush the entries from the IOTLB so ---- * that we can free all entries with queue_ring_free_flushed() ---- * below. ---- */ ---- if (queue_ring_full(queue)) { ---- dma_ops_domain_flush_tlb(dom); ---- queue_ring_free_flushed(dom, queue); ---- } ---- ---- idx = queue_ring_add(queue); ---- ---- queue->entries[idx].iova_pfn = address; ---- queue->entries[idx].pages = pages; ---- queue->entries[idx].counter = atomic64_read(&dom->flush_start_cnt); ---- ---- spin_unlock_irqrestore(&queue->lock, flags); ---- ---- if (atomic_cmpxchg(&dom->flush_timer_on, 0, 1) == 0) ---- mod_timer(&dom->flush_timer, jiffies + msecs_to_jiffies(10)); ---- ---- put_cpu_ptr(dom->flush_queue); + } + - - --- - #define queue_ring_for_each(i, q) \ - - --- - for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE) - - --- - - - --- - static inline unsigned queue_ring_add(struct flush_queue *queue) ---- static void queue_flush_timeout(unsigned long data) - static inline bool queue_ring_full(struct flush_queue *queue) ++++++ ++++++ static void iova_domain_flush_tlb(struct iova_domain *iovad) + { - - --- - unsigned idx = queue->tail; ---- struct dma_ops_domain *dom = (struct dma_ops_domain *)data; ---- int cpu; - assert_spin_locked(&queue->lock); - - return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head); - } ++++++ ++++++ struct dma_ops_domain *dom; + - #define queue_ring_for_each(i, q) \ - for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE) - - static inline unsigned queue_ring_add(struct flush_queue *queue) - { - unsigned idx = queue->tail; - - - --- -- assert_spin_locked(&queue->lock); - - --- -- queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE; - - --- -- - - --- -- return idx; - - --- -- } - - --- -- - - --- -- static inline void queue_ring_remove_head(struct flush_queue *queue) - - --- -- { - - --- -- assert_spin_locked(&queue->lock); - - --- -- queue->head = (queue->head + 1) % FLUSH_QUEUE_SIZE; - - --- -- } - - --- -- - - --- -- static void queue_ring_free_flushed(struct dma_ops_domain *dom, - - --- -- struct flush_queue *queue) - - --- -- { - - --- -- u64 counter = atomic64_read(&dom->flush_finish_cnt); - - --- -- int idx; - - --- -- - - --- -- queue_ring_for_each(idx, queue) { - - --- -- /* - - --- -- * This assumes that counter values in the ring-buffer are - - --- -- * monotonously rising. - - --- -- */ - - --- -- if (queue->entries[idx].counter >= counter) - - --- -- break; - - --- -- - - --- -- free_iova_fast(&dom->iovad, - - --- -- queue->entries[idx].iova_pfn, - - --- -- queue->entries[idx].pages); - - --- -- - - --- -- queue_ring_remove_head(queue); - - --- -- } - - --- -- } - - --- -- - - --- -- static void queue_add(struct dma_ops_domain *dom, - - --- -- unsigned long address, unsigned long pages) - - --- -- { - - --- -- struct flush_queue *queue; - - --- -- unsigned long flags; - - --- -- int idx; - - --- -- - - --- -- pages = __roundup_pow_of_two(pages); - - --- -- address >>= PAGE_SHIFT; - - --- -- - - --- -- queue = get_cpu_ptr(dom->flush_queue); - - --- -- spin_lock_irqsave(&queue->lock, flags); - - --- -- - - --- -- /* - - --- -- * First remove the enries from the ring-buffer that are already - - --- -- * flushed to make the below queue_ring_full() check less likely - - --- -- */ - - --- -- queue_ring_free_flushed(dom, queue); - - --- -- - - --- -- /* - - --- -- * When ring-queue is full, flush the entries from the IOTLB so - - --- -- * that we can free all entries with queue_ring_free_flushed() - - --- -- * below. - - --- -- */ - - --- -- if (queue_ring_full(queue)) { - - --- -- dma_ops_domain_flush_tlb(dom); - - --- -- queue_ring_free_flushed(dom, queue); - - --- -- } - - --- -- - - --- -- idx = queue_ring_add(queue); - - --- -- - - --- -- queue->entries[idx].iova_pfn = address; - - --- -- queue->entries[idx].pages = pages; - - --- -- queue->entries[idx].counter = atomic64_read(&dom->flush_start_cnt); - - --- -- - - --- -- spin_unlock_irqrestore(&queue->lock, flags); - - --- -- - - --- -- if (atomic_cmpxchg(&dom->flush_timer_on, 0, 1) == 0) - - --- -- mod_timer(&dom->flush_timer, jiffies + msecs_to_jiffies(10)); - - --- -- - - --- -- put_cpu_ptr(dom->flush_queue); - - --- -- } - - --- -- - - --- -- static void queue_flush_timeout(unsigned long data) - - --- -- { - - --- -- struct dma_ops_domain *dom = (struct dma_ops_domain *)data; - - --- -- int cpu; - - --- -- ------ --- -- atomic_set(&dom->flush_timer_on, 0); ++++++ ++++++ dom = container_of(iovad, struct dma_ops_domain, iovad); + + dma_ops_domain_flush_tlb(dom); ------ --- -- ------ --- -- for_each_possible_cpu(cpu) { ------ --- -- struct flush_queue *queue; ------ --- -- unsigned long flags; ------ --- -- ------ --- -- queue = per_cpu_ptr(dom->flush_queue, cpu); ------ --- -- spin_lock_irqsave(&queue->lock, flags); ------ --- -- queue_ring_free_flushed(dom, queue); ------ --- -- spin_unlock_irqrestore(&queue->lock, flags); ------ --- -- } + } + /* * Free a domain, only used if something went wrong in the * allocation path and we need to free an already allocated page table @@@@@@@@@@@@@@@ -2013,16 -2013,16 -2013,16 -2013,16 -2013,16 -2013,16 -1806,11 -2013,16 -2013,16 -2013,16 -1779,8 -1996,16 -2013,16 -1806,11 +1789,11 @@@@@@@@@@@@@@@ static struct dma_ops_domain *dma_ops_d init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN, DMA_32BIT_PFN); ------ --- -- /* Initialize reserved ranges */ ------ --- -- copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad); ------ --- -- ------ --- -- if (dma_ops_domain_alloc_flush_queue(dma_dom)) ++++++ ++++++ if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL)) + goto free_dma_dom; + ------ --- -- setup_timer(&dma_dom->flush_timer, queue_flush_timeout, ------ --- -- (unsigned long)dma_dom); ------ --- -- ------ --- -- atomic_set(&dma_dom->flush_timer_on, 0); ++++++ +++ ++ /* Initialize reserved ranges */ ++++++ +++ ++ copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad); add_domain_to_list(&dma_dom->domain); @@@@@@@@@@@@@@@ -2086,8 -2086,8 -2086,8 -2086,8 -2086,8 -2086,8 -1874,8 -2086,8 -2086,8 -2086,8 -1844,7 -2069,7 -2086,8 -1874,8 +1857,7 @@@@@@@@@@@@@@@ static void set_dte_entry(u16 devid, st flags |= tmp; } ---------- -- ---------- -- flags &= ~(DTE_FLAG_SA | 0xffffULL); - flags &= ~(0xffffUL); +++++++++++ ++ flags &= ~DEV_DOMID_MASK; flags |= domain->id; amd_iommu_dev_table[devid].data[1] = flags; diff --cc drivers/iommu/amd_iommu_types.h index d6b873b,294a409,294a409,294a409,294a409,d6b873b,294a409,294a409,294a409,294a409,4de8f41,9e5af13,294a409,294a409..5f775fe --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@@@@@@@@@@@@@@ -321,8 -321,8 -321,8 -321,8 -321,8 -321,8 -321,8 -321,8 -321,8 -321,8 -321,7 -331,15 -321,8 -321,8 +331,15 @@@@@@@@@@@@@@@ #define IOMMU_PTE_IR (1ULL << 61) #define IOMMU_PTE_IW (1ULL << 62) +++++++++++ ++/* +++++++++++ ++ * Bit value definition for DTE fields +++++++++++ ++ */ +++++++++++ ++#define DTE_FLAG_V (1ULL << 0) +++++++++++ ++#define DTE_FLAG_TV (1ULL << 1) +++++++++++ ++#define DTE_FLAG_IR (1ULL << 61) +++++++++++ ++#define DTE_FLAG_IW (1ULL << 62) +++++++++++ ++ #define DTE_FLAG_IOTLB (1ULL << 32) ---------- --#define DTE_FLAG_SA (1ULL << 34) #define DTE_FLAG_GV (1ULL << 55) #define DTE_FLAG_MASK (0x3ffULL << 32) #define DTE_GLX_SHIFT (56) diff --cc drivers/iommu/arm-smmu.c index 2d80fa8,bc89b4d,bc89b4d,bc89b4d,bc89b4d,2d80fa8,2d80fa8,262e1a3,beeb042,2d80fa8,7ec30b0,2d80fa8,2d80fa8,bc89b4d..3bdb799 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@@@@@@@@@@@@@@ -438,7 -436,7 -436,7 -436,7 -436,7 -438,7 -438,7 -245,7 -446,7 -438,7 -428,7 -438,7 -438,7 -436,7 +253,7 @@@@@@@@@@@@@@@ struct arm_smmu_domain struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; struct mutex init_mutex; /* Protects smmu pointer */ ---- - spinlock_t cb_lock; /* Serialises ATS1* ops */ ++++ + + spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ struct iommu_domain domain; }; diff --cc drivers/iommu/iommu.c index 3f6ea16,3f6ea16,3f6ea16,3f6ea16,3f6ea16,3f6ea16,af69bf7,3f6ea16,3f6ea16,3f6ea16,cf7ca7e,86581b1,3f6ea16,31c2b1d..3de5c0b --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@@@@@@@@@@@@@@ -1005,13 -1005,13 -1005,13 -1005,13 -1005,13 -1005,13 -1005,12 -1005,13 -1005,13 -1005,13 -1015,10 -1005,13 -1005,13 -1007,12 +1007,12 @@@@@@@@@@@@@@@ struct iommu_group *iommu_group_get_for if (group) return group; ------ ------ group = ERR_PTR(-EINVAL); ------ --- -- ------ --- -- if (ops && ops->device_group) ------ --- -- group = ops->device_group(dev); ++++++ ++++++ if (!ops) ++++++ ++++++ return ERR_PTR(-EINVAL); - if (ops && ops->device_group) - group = ops->device_group(dev); ++++++ ++++++ group = ops->device_group(dev); + if (WARN_ON_ONCE(group == NULL)) + return ERR_PTR(-EINVAL); if (IS_ERR(group)) return group; @@@@@@@@@@@@@@@ -1360,9 -1360,9 -1360,9 -1360,9 -1360,9 -1360,9 -1352,6 -1360,9 -1360,9 -1360,9 -1367,9 -1368,9 -1360,9 -1354,8 +1362,8 @@@@@@@@@@@@@@@ struct iommu_domain *iommu_get_domain_f struct iommu_group *group; group = iommu_group_get(dev); ------ ------ /* FIXME: Remove this when groups a mandatory for iommu drivers */ ------ ------ if (group == NULL) +++++++++++++ if (!group) + return NULL; domain = group->domain; diff --cc include/linux/iommu.h index 176f756,2cb54ad,2cb54ad,2cb54ad,2cb54ad,176f756,f1ce8e5,2cb54ad,2cb54ad,2cb54ad,2cb54ad,63983c9,2cb54ad,50be4fd..a7f2ac6 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@@@@@@@@@@@@@@ -436,11 -431,11 -431,11 -431,11 -431,11 -436,11 -431,11 -431,11 -431,11 -431,11 -431,11 -432,11 -431,11 -460,17 +466,17 @@@@@@@@@@@@@@@ static inline int iommu_map(struct iomm } static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, ------ ------ int gfp_order) ++++++ ++++++ size_t size) ++++++ ++++++ { ++++++ ++++++ return -ENODEV; ++++++ ++++++ } ++++++ ++++++ +++++++++++++ static inline int iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, +++++++++++++ int gfp_order) + { + return -ENODEV; + } + static inline size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot)