Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Jan 2015 22:34:55 +0000 (14:34 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Jan 2015 22:34:55 +0000 (14:34 -0800)
Pull perf fixes from Ingo Molnar:
 "Mostly tooling fixes, but also an event groups fix, two PMU driver
  fixes and a CPU model variant addition"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf: Tighten (and fix) the grouping condition
  perf/x86/intel: Add model number for Airmont
  perf/rapl: Fix crash in rapl_scale()
  perf/x86/intel/uncore: Move uncore_box_init() out of driver initialization
  perf probe: Fix probing kretprobes
  perf symbols: Introduce 'for' method to iterate over the symbols with a given name
  perf probe: Do not rely on map__load() filter to find symbols
  perf symbols: Introduce method to iterate symbols ordered by name
  perf symbols: Return the first entry with a given name in find_by_name method
  perf annotate: Fix memory leaks in LOCK handling
  perf annotate: Handle ins parsing failures
  perf scripting perl: Force to use stdbool
  perf evlist: Remove extraneous 'was' on error message

161 files changed:
arch/alpha/mm/fault.c
arch/arc/mm/fault.c
arch/arm/boot/dts/imx6sx-sdb.dts
arch/arm/include/asm/kvm_emulate.h
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/kvm_mmu.h
arch/arm/kvm/arm.c
arch/arm/kvm/coproc.c
arch/arm/kvm/coproc.h
arch/arm/kvm/coproc_a15.c
arch/arm/kvm/coproc_a7.c
arch/arm/kvm/mmu.c
arch/arm/kvm/trace.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kvm/sys_regs.c
arch/avr32/mm/fault.c
arch/cris/mm/fault.c
arch/frv/mm/fault.c
arch/ia64/mm/fault.c
arch/m32r/mm/fault.c
arch/m68k/mm/fault.c
arch/metag/mm/fault.c
arch/microblaze/mm/fault.c
arch/mips/mm/fault.c
arch/mn10300/mm/fault.c
arch/nios2/mm/fault.c
arch/openrisc/mm/fault.c
arch/parisc/mm/fault.c
arch/powerpc/mm/copro_fault.c
arch/powerpc/mm/fault.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/xmon/xmon.c
arch/s390/mm/fault.c
arch/score/mm/fault.c
arch/sh/mm/fault.c
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/tile/mm/fault.c
arch/um/kernel/trap.c
arch/x86/kvm/lapic.c
arch/x86/mm/fault.c
arch/xtensa/mm/fault.c
drivers/block/rbd.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_module.c
drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/radeon/cik_sdma.c
drivers/gpu/drm/radeon/ni_dma.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_kfd.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rs400.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/si_dma.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/iommu/tegra-gart.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-thin.c
drivers/net/can/c_can/c_can.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/mvm/tx.c
drivers/pinctrl/pinctrl-at91.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/scsi.c
drivers/staging/lustre/lustre/llite/vvp_io.c
fs/btrfs/scrub.c
fs/gfs2/quota.c
fs/nfs/direct.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4client.c
fs/quota/dquot.c
fs/quota/quota.c
fs/udf/file.c
fs/xfs/xfs_qm.h
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_quotaops.c
include/linux/mm.h
include/linux/quota.h
include/linux/quotaops.h
include/net/ip.h
kernel/bpf/syscall.c
mm/gup.c
mm/ksm.c
mm/memory.c
net/dsa/slave.c
net/ipv4/ip_forward.c
net/ipv4/ping.c
net/ipv4/route.c
net/ipv4/udp_diag.c
net/ipv6/ip6_fib.c
net/ipv6/route.c
net/ipv6/xfrm6_policy.c
net/llc/sysctl_net_llc.c
net/mac80211/pm.c
net/mac80211/rx.c
net/sched/cls_bpf.c
net/sctp/associola.c
net/socket.c
net/wireless/nl80211.c
net/wireless/util.c
samples/bpf/test_maps.c
sound/core/seq/seq_dummy.c
sound/soc/adi/axi-i2s.c
sound/soc/codecs/pcm512x.c
sound/soc/codecs/rt286.c
sound/soc/codecs/rt5677.c
sound/soc/codecs/ts3a227e.c
sound/soc/codecs/wm8904.c
sound/soc/codecs/wm8960.c
sound/soc/fsl/fsl_esai.h
sound/soc/fsl/fsl_ssi.c
sound/soc/fsl/imx-wm8962.c
sound/soc/generic/simple-card.c
sound/soc/intel/sst-firmware.c
sound/soc/intel/sst-haswell-ipc.c
sound/soc/omap/omap-mcbsp.c
sound/soc/rockchip/rockchip_i2s.c
sound/soc/soc-compress.c

index 98838a0..9d0ac09 100644 (file)
@@ -156,6 +156,8 @@ retry:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 6f7e3a6..563cb27 100644 (file)
@@ -161,6 +161,8 @@ good_area:
 
        if (fault & VM_FAULT_OOM)
                goto out_of_memory;
+       else if (fault & VM_FAULT_SIGSEGV)
+               goto bad_area;
        else if (fault & VM_FAULT_SIGBUS)
                goto do_sigbus;
 
index 8c1febd..c108bb4 100644 (file)
                #address-cells = <1>;
                #size-cells = <0>;
 
-               ethphy1: ethernet-phy@0 {
-                       reg = <0>;
+               ethphy1: ethernet-phy@1 {
+                       reg = <1>;
                };
 
-               ethphy2: ethernet-phy@1 {
-                       reg = <1>;
+               ethphy2: ethernet-phy@2 {
+                       reg = <2>;
                };
        };
 };
index 66ce176..7b01523 100644 (file)
@@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
        vcpu->arch.hcr = HCR_GUEST_MASK;
 }
 
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.hcr;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+       vcpu->arch.hcr = hcr;
+}
+
 static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
 {
        return 1;
index 254e065..04b4ea0 100644 (file)
@@ -125,9 +125,6 @@ struct kvm_vcpu_arch {
         * Anything that is not used directly from assembly code goes
         * here.
         */
-       /* dcache set/way operation pending */
-       int last_pcpu;
-       cpumask_t require_dcache_flush;
 
        /* Don't run the guest on this vcpu */
        bool pause;
index 63e0ecc..1bca8f8 100644 (file)
@@ -44,6 +44,7 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/highmem.h>
 #include <asm/cacheflush.h>
 #include <asm/pgalloc.h>
 
@@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
        return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
 }
 
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
-                                            unsigned long size,
-                                            bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+                                              unsigned long size,
+                                              bool ipa_uncached)
 {
-       if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
-               kvm_flush_dcache_to_poc((void *)hva, size);
-       
        /*
         * If we are going to insert an instruction page and the icache is
         * either VIPT or PIPT, there is a potential problem where the host
@@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
         *
         * VIVT caches are tagged using both the ASID and the VMID and doesn't
         * need any kind of flushing (DDI 0406C.b - Page B3-1392).
+        *
+        * We need to do this through a kernel mapping (using the
+        * user-space mapping has proved to be the wrong
+        * solution). For that, we need to kmap one page at a time,
+        * and iterate over the range.
         */
-       if (icache_is_pipt()) {
-               __cpuc_coherent_user_range(hva, hva + size);
-       } else if (!icache_is_vivt_asid_tagged()) {
+
+       bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
+
+       VM_BUG_ON(size & PAGE_MASK);
+
+       if (!need_flush && !icache_is_pipt())
+               goto vipt_cache;
+
+       while (size) {
+               void *va = kmap_atomic_pfn(pfn);
+
+               if (need_flush)
+                       kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+               if (icache_is_pipt())
+                       __cpuc_coherent_user_range((unsigned long)va,
+                                                  (unsigned long)va + PAGE_SIZE);
+
+               size -= PAGE_SIZE;
+               pfn++;
+
+               kunmap_atomic(va);
+       }
+
+vipt_cache:
+       if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
                /* any kind of VIPT cache */
                __flush_icache_all();
        }
 }
 
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+       void *va = kmap_atomic(pte_page(pte));
+
+       kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+       kunmap_atomic(va);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+       unsigned long size = PMD_SIZE;
+       pfn_t pfn = pmd_pfn(pmd);
+
+       while (size) {
+               void *va = kmap_atomic_pfn(pfn);
+
+               kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+               pfn++;
+               size -= PAGE_SIZE;
+
+               kunmap_atomic(va);
+       }
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+}
+
 #define kvm_virt_to_phys(x)            virt_to_idmap((unsigned long)(x))
 
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
 
 #endif /* !__ASSEMBLY__ */
 
index 2d6d910..0b0d58a 100644 (file)
@@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        vcpu->cpu = cpu;
        vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
 
-       /*
-        * Check whether this vcpu requires the cache to be flushed on
-        * this physical CPU. This is a consequence of doing dcache
-        * operations by set/way on this vcpu. We do it here to be in
-        * a non-preemptible section.
-        */
-       if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
-               flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
-
        kvm_arm_set_running_vcpu(vcpu);
 }
 
@@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
 
                vcpu->mode = OUTSIDE_GUEST_MODE;
-               vcpu->arch.last_pcpu = smp_processor_id();
                kvm_guest_exit();
                trace_kvm_exit(*vcpu_pc(vcpu));
                /*
index 7928dbd..f3d88dc 100644 (file)
@@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
        return true;
 }
 
-/* See note at ARM ARM B1.14.4 */
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ */
 static bool access_dcsw(struct kvm_vcpu *vcpu,
                        const struct coproc_params *p,
                        const struct coproc_reg *r)
 {
-       unsigned long val;
-       int cpu;
-
        if (!p->is_write)
                return read_from_write_only(vcpu, p);
 
-       cpu = get_cpu();
-
-       cpumask_setall(&vcpu->arch.require_dcache_flush);
-       cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
-
-       /* If we were already preempted, take the long way around */
-       if (cpu != vcpu->arch.last_pcpu) {
-               flush_cache_all();
-               goto done;
-       }
-
-       val = *vcpu_reg(vcpu, p->Rt1);
-
-       switch (p->CRm) {
-       case 6:                 /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
-       case 14:                /* DCCISW */
-               asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
-               break;
-
-       case 10:                /* DCCSW */
-               asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
-               break;
-       }
-
-done:
-       put_cpu();
-
+       kvm_set_way_flush(vcpu);
        return true;
 }
 
 /*
  * Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set.
+ * is set.  If the guest enables the MMU, we stop trapping the VM
+ * sys_regs and leave it in complete control of the caches.
+ *
+ * Used by the cpu-specific code.
  */
-static bool access_vm_reg(struct kvm_vcpu *vcpu,
-                         const struct coproc_params *p,
-                         const struct coproc_reg *r)
+bool access_vm_reg(struct kvm_vcpu *vcpu,
+                  const struct coproc_params *p,
+                  const struct coproc_reg *r)
 {
+       bool was_enabled = vcpu_has_cache_enabled(vcpu);
+
        BUG_ON(!p->is_write);
 
        vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
        if (p->is_64bit)
                vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
 
-       return true;
-}
-
-/*
- * SCTLR accessor. Only called as long as HCR_TVM is set.  If the
- * guest enables the MMU, we stop trapping the VM sys_regs and leave
- * it in complete control of the caches.
- *
- * Used by the cpu-specific code.
- */
-bool access_sctlr(struct kvm_vcpu *vcpu,
-                 const struct coproc_params *p,
-                 const struct coproc_reg *r)
-{
-       access_vm_reg(vcpu, p, r);
-
-       if (vcpu_has_cache_enabled(vcpu)) {     /* MMU+Caches enabled? */
-               vcpu->arch.hcr &= ~HCR_TVM;
-               stage2_flush_vm(vcpu->kvm);
-       }
-
+       kvm_toggle_cache(vcpu, was_enabled);
        return true;
 }
 
index 1a44bbe..88d24a3 100644 (file)
@@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
 #define is64           .is_64 = true
 #define is32           .is_64 = false
 
-bool access_sctlr(struct kvm_vcpu *vcpu,
-                 const struct coproc_params *p,
-                 const struct coproc_reg *r);
+bool access_vm_reg(struct kvm_vcpu *vcpu,
+                  const struct coproc_params *p,
+                  const struct coproc_reg *r);
 
 #endif /* __ARM_KVM_COPROC_LOCAL_H__ */
index e6f4ae4..a713675 100644 (file)
@@ -34,7 +34,7 @@
 static const struct coproc_reg a15_regs[] = {
        /* SCTLR: swapped by interrupt.S. */
        { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
-                       access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
+                       access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
 };
 
 static struct kvm_coproc_target_table a15_target_table = {
index 17fc7cd..b19e46d 100644 (file)
@@ -37,7 +37,7 @@
 static const struct coproc_reg a7_regs[] = {
        /* SCTLR: swapped by interrupt.S. */
        { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
-                       access_sctlr, reset_val, c1_SCTLR, 0x00C50878 },
+                       access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
 };
 
 static struct kvm_coproc_target_table a7_target_table = {
index 1dc9778..1366625 100644 (file)
@@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
                kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
 }
 
+/*
+ * D-Cache management functions. They take the page table entries by
+ * value, as they are flushing the cache using the kernel mapping (or
+ * kmap on 32bit).
+ */
+static void kvm_flush_dcache_pte(pte_t pte)
+{
+       __kvm_flush_dcache_pte(pte);
+}
+
+static void kvm_flush_dcache_pmd(pmd_t pmd)
+{
+       __kvm_flush_dcache_pmd(pmd);
+}
+
+static void kvm_flush_dcache_pud(pud_t pud)
+{
+       __kvm_flush_dcache_pud(pud);
+}
+
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
                                  int min, int max)
 {
@@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
        put_page(virt_to_page(pmd));
 }
 
+/*
+ * Unmapping vs dcache management:
+ *
+ * If a guest maps certain memory pages as uncached, all writes will
+ * bypass the data cache and go directly to RAM.  However, the CPUs
+ * can still speculate reads (not writes) and fill cache lines with
+ * data.
+ *
+ * Those cache lines will be *clean* cache lines though, so a
+ * clean+invalidate operation is equivalent to an invalidate
+ * operation, because no cache lines are marked dirty.
+ *
+ * Those clean cache lines could be filled prior to an uncached write
+ * by the guest, and the cache coherent IO subsystem would therefore
+ * end up writing old data to disk.
+ *
+ * This is why right after unmapping a page/section and invalidating
+ * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
+ * the IO subsystem will never hit in the cache.
+ */
 static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
                       phys_addr_t addr, phys_addr_t end)
 {
@@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
        start_pte = pte = pte_offset_kernel(pmd, addr);
        do {
                if (!pte_none(*pte)) {
+                       pte_t old_pte = *pte;
+
                        kvm_set_pte(pte, __pte(0));
-                       put_page(virt_to_page(pte));
                        kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+                       /* No need to invalidate the cache for device mappings */
+                       if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+                               kvm_flush_dcache_pte(old_pte);
+
+                       put_page(virt_to_page(pte));
                }
        } while (pte++, addr += PAGE_SIZE, addr != end);
 
@@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
                next = kvm_pmd_addr_end(addr, end);
                if (!pmd_none(*pmd)) {
                        if (kvm_pmd_huge(*pmd)) {
+                               pmd_t old_pmd = *pmd;
+
                                pmd_clear(pmd);
                                kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+                               kvm_flush_dcache_pmd(old_pmd);
+
                                put_page(virt_to_page(pmd));
                        } else {
                                unmap_ptes(kvm, pmd, addr, next);
@@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
                next = kvm_pud_addr_end(addr, end);
                if (!pud_none(*pud)) {
                        if (pud_huge(*pud)) {
+                               pud_t old_pud = *pud;
+
                                pud_clear(pud);
                                kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+                               kvm_flush_dcache_pud(old_pud);
+
                                put_page(virt_to_page(pud));
                        } else {
                                unmap_pmds(kvm, pud, addr, next);
@@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
 
        pte = pte_offset_kernel(pmd, addr);
        do {
-               if (!pte_none(*pte)) {
-                       hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
-                       kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
-               }
+               if (!pte_none(*pte) &&
+                   (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+                       kvm_flush_dcache_pte(*pte);
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
@@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
        do {
                next = kvm_pmd_addr_end(addr, end);
                if (!pmd_none(*pmd)) {
-                       if (kvm_pmd_huge(*pmd)) {
-                               hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
-                               kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
-                       } else {
+                       if (kvm_pmd_huge(*pmd))
+                               kvm_flush_dcache_pmd(*pmd);
+                       else
                                stage2_flush_ptes(kvm, pmd, addr, next);
-                       }
                }
        } while (pmd++, addr = next, addr != end);
 }
@@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
        do {
                next = kvm_pud_addr_end(addr, end);
                if (!pud_none(*pud)) {
-                       if (pud_huge(*pud)) {
-                               hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
-                               kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
-                       } else {
+                       if (pud_huge(*pud))
+                               kvm_flush_dcache_pud(*pud);
+                       else
                                stage2_flush_pmds(kvm, pud, addr, next);
-                       }
                }
        } while (pud++, addr = next, addr != end);
 }
@@ -278,7 +330,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
  * Go through the stage 2 page tables and invalidate any cache lines
  * backing memory already mapped to the VM.
  */
-void stage2_flush_vm(struct kvm *kvm)
+static void stage2_flush_vm(struct kvm *kvm)
 {
        struct kvm_memslots *slots;
        struct kvm_memory_slot *memslot;
@@ -905,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn)
        return !pfn_valid(pfn);
 }
 
+static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+                                     unsigned long size, bool uncached)
+{
+       __coherent_cache_guest_page(vcpu, pfn, size, uncached);
+}
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                          struct kvm_memory_slot *memslot, unsigned long hva,
                          unsigned long fault_status)
@@ -994,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        kvm_set_s2pmd_writable(&new_pmd);
                        kvm_set_pfn_dirty(pfn);
                }
-               coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
-                                         fault_ipa_uncached);
+               coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
                ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
        } else {
                pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -1003,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                        kvm_set_s2pte_writable(&new_pte);
                        kvm_set_pfn_dirty(pfn);
                }
-               coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
-                                         fault_ipa_uncached);
+               coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
                ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
                        pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
        }
@@ -1411,3 +1467,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
        unmap_stage2_range(kvm, gpa, size);
        spin_unlock(&kvm->mmu_lock);
 }
+
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ *
+ * Main problems:
+ * - S/W ops are local to a CPU (not broadcast)
+ * - We have line migration behind our back (speculation)
+ * - System caches don't support S/W at all (damn!)
+ *
+ * In the face of the above, the best we can do is to try and convert
+ * S/W ops to VA ops. Because the guest is not allowed to infer the
+ * S/W to PA mapping, it can only use S/W to nuke the whole cache,
+ * which is a rather good thing for us.
+ *
+ * Also, it is only used when turning caches on/off ("The expected
+ * usage of the cache maintenance instructions that operate by set/way
+ * is associated with the cache maintenance instructions associated
+ * with the powerdown and powerup of caches, if this is required by
+ * the implementation.").
+ *
+ * We use the following policy:
+ *
+ * - If we trap a S/W operation, we enable VM trapping to detect
+ *   caches being turned on/off, and do a full clean.
+ *
+ * - We flush the caches on both caches being turned on and off.
+ *
+ * - Once the caches are enabled, we stop trapping VM ops.
+ */
+void kvm_set_way_flush(struct kvm_vcpu *vcpu)
+{
+       unsigned long hcr = vcpu_get_hcr(vcpu);
+
+       /*
+        * If this is the first time we do a S/W operation
+        * (i.e. HCR_TVM not set) flush the whole memory, and set the
+        * VM trapping.
+        *
+        * Otherwise, rely on the VM trapping to wait for the MMU +
+        * Caches to be turned off. At that point, we'll be able to
+        * clean the caches again.
+        */
+       if (!(hcr & HCR_TVM)) {
+               trace_kvm_set_way_flush(*vcpu_pc(vcpu),
+                                       vcpu_has_cache_enabled(vcpu));
+               stage2_flush_vm(vcpu->kvm);
+               vcpu_set_hcr(vcpu, hcr | HCR_TVM);
+       }
+}
+
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
+{
+       bool now_enabled = vcpu_has_cache_enabled(vcpu);
+
+       /*
+        * If switching the MMU+caches on, need to invalidate the caches.
+        * If switching it off, need to clean the caches.
+        * Clean + invalidate does the trick always.
+        */
+       if (now_enabled != was_enabled)
+               stage2_flush_vm(vcpu->kvm);
+
+       /* Caches are now on, stop trapping VM ops (until a S/W op) */
+       if (now_enabled)
+               vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
+
+       trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
+}
index b1d640f..b6a6e71 100644 (file)
@@ -223,6 +223,45 @@ TRACE_EVENT(kvm_hvc,
                  __entry->vcpu_pc, __entry->r0, __entry->imm)
 );
 
+TRACE_EVENT(kvm_set_way_flush,
+           TP_PROTO(unsigned long vcpu_pc, bool cache),
+           TP_ARGS(vcpu_pc, cache),
+
+           TP_STRUCT__entry(
+                   __field(    unsigned long,  vcpu_pc         )
+                   __field(    bool,           cache           )
+           ),
+
+           TP_fast_assign(
+                   __entry->vcpu_pc            = vcpu_pc;
+                   __entry->cache              = cache;
+           ),
+
+           TP_printk("S/W flush at 0x%016lx (cache %s)",
+                     __entry->vcpu_pc, __entry->cache ? "on" : "off")
+);
+
+TRACE_EVENT(kvm_toggle_cache,
+           TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
+           TP_ARGS(vcpu_pc, was, now),
+
+           TP_STRUCT__entry(
+                   __field(    unsigned long,  vcpu_pc         )
+                   __field(    bool,           was             )
+                   __field(    bool,           now             )
+           ),
+
+           TP_fast_assign(
+                   __entry->vcpu_pc            = vcpu_pc;
+                   __entry->was                = was;
+                   __entry->now                = now;
+           ),
+
+           TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
+                     __entry->vcpu_pc, __entry->was ? "on" : "off",
+                     __entry->now ? "on" : "off")
+);
+
 #endif /* _TRACE_KVM_H */
 
 #undef TRACE_INCLUDE_PATH
index 865a7e2..3cb4c85 100644 (file)
@@ -45,6 +45,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
                vcpu->arch.hcr_el2 &= ~HCR_RW;
 }
 
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.hcr_el2;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+       vcpu->arch.hcr_el2 = hcr;
+}
+
 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
 {
        return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
index 0b7dfdb..acd101a 100644 (file)
@@ -116,9 +116,6 @@ struct kvm_vcpu_arch {
         * Anything that is not used directly from assembly code goes
         * here.
         */
-       /* dcache set/way operation pending */
-       int last_pcpu;
-       cpumask_t require_dcache_flush;
 
        /* Don't run the guest */
        bool pause;
index 14a74f1..adcf495 100644 (file)
@@ -243,24 +243,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
        return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
 }
 
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
-                                            unsigned long size,
-                                            bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+                                              unsigned long size,
+                                              bool ipa_uncached)
 {
+       void *va = page_address(pfn_to_page(pfn));
+
        if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
-               kvm_flush_dcache_to_poc((void *)hva, size);
+               kvm_flush_dcache_to_poc(va, size);
 
        if (!icache_is_aliasing()) {            /* PIPT */
-               flush_icache_range(hva, hva + size);
+               flush_icache_range((unsigned long)va,
+                                  (unsigned long)va + size);
        } else if (!icache_is_aivivt()) {       /* non ASID-tagged VIVT */
                /* any kind of VIPT cache */
                __flush_icache_all();
        }
 }
 
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+       struct page *page = pte_page(pte);
+       kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+       struct page *page = pmd_page(pmd);
+       kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+       struct page *page = pud_page(pud);
+       kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
+}
+
 #define kvm_virt_to_phys(x)            __virt_to_phys((unsigned long)(x))
 
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
index 3d7c2df..f31e8bb 100644 (file)
@@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr)
        return ccsidr;
 }
 
-static void do_dc_cisw(u32 val)
-{
-       asm volatile("dc cisw, %x0" : : "r" (val));
-       dsb(ish);
-}
-
-static void do_dc_csw(u32 val)
-{
-       asm volatile("dc csw, %x0" : : "r" (val));
-       dsb(ish);
-}
-
-/* See note at ARM ARM B1.14.4 */
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ */
 static bool access_dcsw(struct kvm_vcpu *vcpu,
                        const struct sys_reg_params *p,
                        const struct sys_reg_desc *r)
 {
-       unsigned long val;
-       int cpu;
-
        if (!p->is_write)
                return read_from_write_only(vcpu, p);
 
-       cpu = get_cpu();
-
-       cpumask_setall(&vcpu->arch.require_dcache_flush);
-       cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
-
-       /* If we were already preempted, take the long way around */
-       if (cpu != vcpu->arch.last_pcpu) {
-               flush_cache_all();
-               goto done;
-       }
-
-       val = *vcpu_reg(vcpu, p->Rt);
-
-       switch (p->CRm) {
-       case 6:                 /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
-       case 14:                /* DCCISW */
-               do_dc_cisw(val);
-               break;
-
-       case 10:                /* DCCSW */
-               do_dc_csw(val);
-               break;
-       }
-
-done:
-       put_cpu();
-
+       kvm_set_way_flush(vcpu);
        return true;
 }
 
 /*
  * Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set.
+ * is set. If the guest enables the MMU, we stop trapping the VM
+ * sys_regs and leave it in complete control of the caches.
  */
 static bool access_vm_reg(struct kvm_vcpu *vcpu,
                          const struct sys_reg_params *p,
                          const struct sys_reg_desc *r)
 {
        unsigned long val;
+       bool was_enabled = vcpu_has_cache_enabled(vcpu);
 
        BUG_ON(!p->is_write);
 
@@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
                vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
        }
 
-       return true;
-}
-
-/*
- * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set.  If the
- * guest enables the MMU, we stop trapping the VM sys_regs and leave
- * it in complete control of the caches.
- */
-static bool access_sctlr(struct kvm_vcpu *vcpu,
-                        const struct sys_reg_params *p,
-                        const struct sys_reg_desc *r)
-{
-       access_vm_reg(vcpu, p, r);
-
-       if (vcpu_has_cache_enabled(vcpu)) {     /* MMU+Caches enabled? */
-               vcpu->arch.hcr_el2 &= ~HCR_TVM;
-               stage2_flush_vm(vcpu->kvm);
-       }
-
+       kvm_toggle_cache(vcpu, was_enabled);
        return true;
 }
 
@@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
          NULL, reset_mpidr, MPIDR_EL1 },
        /* SCTLR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
-         access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
+         access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
        /* CPACR_EL1 */
        { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
          NULL, reset_val, CPACR_EL1, 0 },
@@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
  * register).
  */
 static const struct sys_reg_desc cp15_regs[] = {
-       { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
+       { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
        { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
        { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
        { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
index 0eca933..d223a8b 100644 (file)
@@ -142,6 +142,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 1790f22..2686a7a 100644 (file)
@@ -176,6 +176,8 @@ retry:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 9a66372..ec4917d 100644 (file)
@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 7225dad..ba5ba7a 100644 (file)
@@ -172,6 +172,8 @@ retry:
                 */
                if (fault & VM_FAULT_OOM) {
                        goto out_of_memory;
+               } else if (fault & VM_FAULT_SIGSEGV) {
+                       goto bad_area;
                } else if (fault & VM_FAULT_SIGBUS) {
                        signal = SIGBUS;
                        goto bad_area;
index e9c6a80..e3d4d48 100644 (file)
@@ -200,6 +200,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 2bd7487..b2f04ae 100644 (file)
@@ -145,6 +145,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto map_err;
                else if (fault & VM_FAULT_SIGBUS)
                        goto bus_err;
                BUG();
index 332680e..2de5dc6 100644 (file)
@@ -141,6 +141,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index fa4cf52..d46a5eb 100644 (file)
@@ -224,6 +224,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index becc42b..70ab5d6 100644 (file)
@@ -158,6 +158,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 3516cbd..0c2cc5d 100644 (file)
@@ -262,6 +262,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 15a0bb5..34429d5 100644 (file)
@@ -135,6 +135,8 @@ survive:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 0703acf..230ac20 100644 (file)
@@ -171,6 +171,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 3ca9c11..e5120e6 100644 (file)
@@ -256,6 +256,8 @@ good_area:
                 */
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto bad_area;
                BUG();
index 5a236f0..1b5305d 100644 (file)
@@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
                if (*flt & VM_FAULT_OOM) {
                        ret = -ENOMEM;
                        goto out_unlock;
-               } else if (*flt & VM_FAULT_SIGBUS) {
+               } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
                        ret = -EFAULT;
                        goto out_unlock;
                }
index eb79907..6154b0a 100644 (file)
@@ -437,6 +437,8 @@ good_area:
         */
        fault = handle_mm_fault(mm, vma, address, flags);
        if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
+               if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                rc = mm_fault_error(regs, address, fault);
                if (rc >= MM_FAULT_RETURN)
                        goto bail;
index b700a32..d2de7d5 100644 (file)
@@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void)
         * all cpus at boot. Get these reg values of current cpu and use the
         * same accross all cpus.
         */
-       uint64_t lpcr_val = mfspr(SPRN_LPCR);
+       uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
        uint64_t hid0_val = mfspr(SPRN_HID0);
        uint64_t hid1_val = mfspr(SPRN_HID1);
        uint64_t hid4_val = mfspr(SPRN_HID4);
index 5b150f0..13c6e20 100644 (file)
@@ -337,6 +337,7 @@ static inline void disable_surveillance(void)
        args.token = rtas_token("set-indicator");
        if (args.token == RTAS_UNKNOWN_SERVICE)
                return;
+       args.token = cpu_to_be32(args.token);
        args.nargs = cpu_to_be32(3);
        args.nret = cpu_to_be32(1);
        args.rets = &args.args[3];
index 811937b..9065d5a 100644 (file)
@@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
                                do_no_context(regs);
                        else
                                pagefault_out_of_memory();
+               } else if (fault & VM_FAULT_SIGSEGV) {
+                       /* Kernel mode? Handle exceptions or die */
+                       if (!user_mode(regs))
+                               do_no_context(regs);
+                       else
+                               do_sigsegv(regs, SEGV_MAPERR);
                } else if (fault & VM_FAULT_SIGBUS) {
                        /* Kernel mode? Handle exceptions or die */
                        if (!user_mode(regs))
index 5223898..6860beb 100644 (file)
@@ -114,6 +114,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 541dc61..a58fec9 100644 (file)
@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
        } else {
                if (fault & VM_FAULT_SIGBUS)
                        do_sigbus(regs, error_code, address);
+               else if (fault & VM_FAULT_SIGSEGV)
+                       bad_area(regs, error_code, address);
                else
                        BUG();
        }
index 908e8c1..70d8171 100644 (file)
@@ -249,6 +249,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 18fcd71..4798232 100644 (file)
@@ -446,6 +446,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 565e25a..0f61a73 100644 (file)
@@ -442,6 +442,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 5678c35..2096173 100644 (file)
@@ -80,6 +80,8 @@ good_area:
                if (unlikely(fault & VM_FAULT_ERROR)) {
                        if (fault & VM_FAULT_OOM) {
                                goto out_of_memory;
+                       } else if (fault & VM_FAULT_SIGSEGV) {
+                               goto out;
                        } else if (fault & VM_FAULT_SIGBUS) {
                                err = -EACCES;
                                goto out;
index 4f0c0b9..d52dcf0 100644 (file)
@@ -192,6 +192,9 @@ static void recalculate_apic_map(struct kvm *kvm)
                u16 cid, lid;
                u32 ldr, aid;
 
+               if (!kvm_apic_present(vcpu))
+                       continue;
+
                aid = kvm_apic_id(apic);
                ldr = kvm_apic_get_reg(apic, APIC_LDR);
                cid = apic_cluster_id(new, ldr);
index 38dcec4..e3ff27a 100644 (file)
@@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
                if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
                             VM_FAULT_HWPOISON_LARGE))
                        do_sigbus(regs, error_code, address, fault);
+               else if (fault & VM_FAULT_SIGSEGV)
+                       bad_area_nosemaphore(regs, error_code, address);
                else
                        BUG();
        }
index b57c4f9..9e3571a 100644 (file)
@@ -117,6 +117,8 @@ good_area:
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
+               else if (fault & VM_FAULT_SIGSEGV)
+                       goto bad_area;
                else if (fault & VM_FAULT_SIGBUS)
                        goto do_sigbus;
                BUG();
index 3ec85df..8a86b62 100644 (file)
@@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
  * If an image has a non-zero parent overlap, get a reference to its
  * parent.
  *
- * We must get the reference before checking for the overlap to
- * coordinate properly with zeroing the parent overlap in
- * rbd_dev_v2_parent_info() when an image gets flattened.  We
- * drop it again if there is no overlap.
- *
  * Returns true if the rbd device has a parent with a non-zero
  * overlap and a reference for it was successfully taken, or
  * false otherwise.
  */
 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
 {
-       int counter;
+       int counter = 0;
 
        if (!rbd_dev->parent_spec)
                return false;
 
-       counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
-       if (counter > 0 && rbd_dev->parent_overlap)
-               return true;
-
-       /* Image was flattened, but parent is not yet torn down */
+       down_read(&rbd_dev->header_rwsem);
+       if (rbd_dev->parent_overlap)
+               counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
+       up_read(&rbd_dev->header_rwsem);
 
        if (counter < 0)
                rbd_warn(rbd_dev, "parent reference overflow");
 
-       return false;
+       return counter > 0;
 }
 
 /*
@@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
                 */
                if (rbd_dev->parent_overlap) {
                        rbd_dev->parent_overlap = 0;
-                       smp_mb();
                        rbd_dev_parent_put(rbd_dev);
                        pr_info("%s: clone image has been flattened\n",
                                rbd_dev->disk->disk_name);
@@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
         * treat it specially.
         */
        rbd_dev->parent_overlap = overlap;
-       smp_mb();
        if (!overlap) {
 
                /* A null parent_spec indicates it's the initial probe */
@@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
 {
        struct rbd_image_header *header;
 
-       /* Drop parent reference unless it's already been done (or none) */
-
-       if (rbd_dev->parent_overlap)
-               rbd_dev_parent_put(rbd_dev);
+       rbd_dev_parent_put(rbd_dev);
 
        /* Free dynamic fields from the header, then zero it out */
 
index 633532a..25bc47f 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/slab.h>
 #include "kfd_priv.h"
 #include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers.h"
 
 #define MQD_SIZE_ALIGNED 768
 
@@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
        kfd->shared_resources = *gpu_resources;
 
        /* calculate max size of mqds needed for queues */
-       size = max_num_of_processes *
-               max_num_of_queues_per_process *
-               kfd->device_info->mqd_size_aligned;
+       size = max_num_of_queues_per_device *
+                       kfd->device_info->mqd_size_aligned;
 
        /* add another 512KB for all other allocations on gart */
        size += 512 * 1024;
index 30c8fda..0d8694f 100644 (file)
@@ -183,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
 
        mutex_lock(&dqm->lock);
 
+       if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+               pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+                               dqm->total_queue_count);
+               mutex_unlock(&dqm->lock);
+               return -EPERM;
+       }
+
        if (list_empty(&qpd->queues_list)) {
                retval = allocate_vmid(dqm, qpd, q);
                if (retval != 0) {
@@ -207,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
        list_add(&q->list, &qpd->queues_list);
        dqm->queue_count++;
 
+       /*
+        * Unconditionally increment this counter, regardless of the queue's
+        * type or whether the queue is active.
+        */
+       dqm->total_queue_count++;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
        mutex_unlock(&dqm->lock);
        return 0;
 }
@@ -326,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
        if (list_empty(&qpd->queues_list))
                deallocate_vmid(dqm, qpd, q);
        dqm->queue_count--;
+
+       /*
+        * Unconditionally decrement this counter, regardless of the queue's
+        * type
+        */
+       dqm->total_queue_count--;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
 out:
        mutex_unlock(&dqm->lock);
        return retval;
@@ -541,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm,
 
        for (i = 0; i < pipes_num; i++) {
                inx = i + first_pipe;
+               /*
+                * HPD buffer on GTT is allocated by amdkfd, no need to waste
+                * space in GTT for pipelines we don't initialize
+                */
                pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
                pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
                /* = log2(bytes/4)-1 */
-               kfd2kgd->init_pipeline(dqm->dev->kgd, i,
+               kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
                                CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
        }
 
@@ -560,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
 
        pr_debug("kfd: In %s\n", __func__);
 
-       retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
+       retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
        if (retval != 0)
                return retval;
 
@@ -752,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
        pr_debug("kfd: In func %s\n", __func__);
 
        mutex_lock(&dqm->lock);
+       if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+               pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
+                               dqm->total_queue_count);
+               mutex_unlock(&dqm->lock);
+               return -EPERM;
+       }
+
+       /*
+        * Unconditionally increment this counter, regardless of the queue's
+        * type or whether the queue is active.
+        */
+       dqm->total_queue_count++;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
        list_add(&kq->list, &qpd->priv_queue_list);
        dqm->queue_count++;
        qpd->is_debug = true;
@@ -775,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
        dqm->queue_count--;
        qpd->is_debug = false;
        execute_queues_cpsch(dqm, false);
+       /*
+        * Unconditionally decrement this counter, regardless of the queue's
+        * type.
+        */
+       dqm->total_queue_count++;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
        mutex_unlock(&dqm->lock);
 }
 
@@ -793,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
 
        mutex_lock(&dqm->lock);
 
+       if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+               pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+                               dqm->total_queue_count);
+               retval = -EPERM;
+               goto out;
+       }
+
        mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
        if (mqd == NULL) {
                mutex_unlock(&dqm->lock);
@@ -810,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
                retval = execute_queues_cpsch(dqm, false);
        }
 
+       /*
+        * Unconditionally increment this counter, regardless of the queue's
+        * type or whether the queue is active.
+        */
+       dqm->total_queue_count++;
+
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
 out:
        mutex_unlock(&dqm->lock);
        return retval;
@@ -930,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
 
        mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
 
+       /*
+        * Unconditionally decrement this counter, regardless of the queue's
+        * type
+        */
+       dqm->total_queue_count--;
+       pr_debug("Total of %d queues are accountable so far\n",
+                       dqm->total_queue_count);
+
        mutex_unlock(&dqm->lock);
 
        return 0;
index c3f189e..52035bf 100644 (file)
@@ -130,6 +130,7 @@ struct device_queue_manager {
        struct list_head        queues;
        unsigned int            processes_count;
        unsigned int            queue_count;
+       unsigned int            total_queue_count;
        unsigned int            next_pipe_to_allocate;
        unsigned int            *allocated_queues;
        unsigned int            vmid_bitmap;
index 95d5af1..a8be6df 100644 (file)
@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
 MODULE_PARM_DESC(sched_policy,
        "Kernel cmdline parameter that defines the amdkfd scheduling policy");
 
-int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
-module_param(max_num_of_processes, int, 0444);
-MODULE_PARM_DESC(max_num_of_processes,
-       "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
-
-int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
-module_param(max_num_of_queues_per_process, int, 0444);
-MODULE_PARM_DESC(max_num_of_queues_per_process,
-       "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
+int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+module_param(max_num_of_queues_per_device, int, 0444);
+MODULE_PARM_DESC(max_num_of_queues_per_device,
+       "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
 
 bool kgd2kfd_init(unsigned interface_version,
                  const struct kfd2kgd_calls *f2g,
@@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
        }
 
        /* Verify module parameters */
-       if ((max_num_of_processes < 0) ||
-               (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
-               pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
-               return -1;
-       }
-
-       if ((max_num_of_queues_per_process < 0) ||
-               (max_num_of_queues_per_process >
-                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
-               pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
+       if ((max_num_of_queues_per_device < 0) ||
+               (max_num_of_queues_per_device >
+                       KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
+               pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
                return -1;
        }
 
index 4c25ef5..6cfe7f1 100644 (file)
@@ -30,7 +30,7 @@ static DEFINE_MUTEX(pasid_mutex);
 
 int kfd_pasid_init(void)
 {
-       pasid_limit = max_num_of_processes;
+       pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
 
        pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
        if (!pasid_bitmap)
index b3dc13c..96dc10e 100644 (file)
 #define kfd_alloc_struct(ptr_to_struct)        \
        ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
 
-/* Kernel module parameter to specify maximum number of supported processes */
-extern int max_num_of_processes;
-
-#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
 #define KFD_MAX_NUM_OF_PROCESSES 512
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
 
 /*
- * Kernel module parameter to specify maximum number of supported queues
- * per process
+ * Kernel module parameter to specify maximum number of supported queues per
+ * device
  */
-extern int max_num_of_queues_per_process;
+extern int max_num_of_queues_per_device;
 
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE               \
+       (KFD_MAX_NUM_OF_PROCESSES *                     \
+                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
 
 #define KFD_KERNEL_QUEUE_SIZE 2048
 
index 4752678..f37cf5e 100644 (file)
@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
        pr_debug("kfd: in %s\n", __func__);
 
        found = find_first_zero_bit(pqm->queue_slot_bitmap,
-                       max_num_of_queues_per_process);
+                       KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
 
        pr_debug("kfd: the new slot id %lu\n", found);
 
-       if (found >= max_num_of_queues_per_process) {
+       if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
                pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
                                pqm->process->pasid);
                return -ENOMEM;
@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
 
        INIT_LIST_HEAD(&pqm->queues);
        pqm->queue_slot_bitmap =
-                       kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
+                       kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
                                        BITS_PER_BYTE), GFP_KERNEL);
        if (pqm->queue_slot_bitmap == NULL)
                return -ENOMEM;
@@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
                pqn->kq = NULL;
                retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
                                                &q->properties.vmid);
+               pr_debug("DQM returned %d for create_queue\n", retval);
                print_queue(q);
                break;
        case KFD_QUEUE_TYPE_DIQ:
@@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
        }
 
        if (retval != 0) {
-               pr_err("kfd: error dqm create queue\n");
+               pr_debug("Error dqm create queue\n");
                goto err_create_queue;
        }
 
@@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
 err_create_queue:
        kfree(pqn);
 err_allocate_pqn:
+       /* check if queues list is empty unregister process from device */
        clear_bit(*qid, pqm->queue_slot_bitmap);
+       if (list_empty(&pqm->queues))
+               dev->dqm->unregister_process(dev->dqm, &pdd->qpd);
        return retval;
 }
 
index cf775a4..dc386eb 100644 (file)
@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
 }
 EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
 
+static void remove_from_modeset(struct drm_mode_set *set,
+               struct drm_connector *connector)
+{
+       int i, j;
+
+       for (i = 0; i < set->num_connectors; i++) {
+               if (set->connectors[i] == connector)
+                       break;
+       }
+
+       if (i == set->num_connectors)
+               return;
+
+       for (j = i + 1; j < set->num_connectors; j++) {
+               set->connectors[j - 1] = set->connectors[j];
+       }
+       set->num_connectors--;
+
+       /* because i915 is pissy about this..
+        * TODO maybe need to makes sure we set it back to !=NULL somewhere?
+        */
+       if (set->num_connectors == 0)
+               set->fb = NULL;
+}
+
 int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
                                       struct drm_connector *connector)
 {
@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
        }
        fb_helper->connector_count--;
        kfree(fb_helper_connector);
+
+       /* also cleanup dangling references to the connector: */
+       for (i = 0; i < fb_helper->crtc_count; i++)
+               remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
+
        return 0;
 }
 EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
index d476279..a9041d1 100644 (file)
@@ -32,6 +32,8 @@
 struct tda998x_priv {
        struct i2c_client *cec;
        struct i2c_client *hdmi;
+       struct mutex mutex;
+       struct delayed_work dwork;
        uint16_t rev;
        uint8_t current_page;
        int dpms;
@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
        uint8_t addr = REG2ADDR(reg);
        int ret;
 
+       mutex_lock(&priv->mutex);
        ret = set_page(priv, reg);
        if (ret < 0)
-               return ret;
+               goto out;
 
        ret = i2c_master_send(client, &addr, sizeof(addr));
        if (ret < 0)
@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
        if (ret < 0)
                goto fail;
 
-       return ret;
+       goto out;
 
 fail:
        dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
+out:
+       mutex_unlock(&priv->mutex);
        return ret;
 }
 
@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
        buf[0] = REG2ADDR(reg);
        memcpy(&buf[1], p, cnt);
 
+       mutex_lock(&priv->mutex);
        ret = set_page(priv, reg);
        if (ret < 0)
-               return;
+               goto out;
 
        ret = i2c_master_send(client, buf, cnt + 1);
        if (ret < 0)
                dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+       mutex_unlock(&priv->mutex);
 }
 
 static int
@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
        uint8_t buf[] = {REG2ADDR(reg), val};
        int ret;
 
+       mutex_lock(&priv->mutex);
        ret = set_page(priv, reg);
        if (ret < 0)
-               return;
+               goto out;
 
        ret = i2c_master_send(client, buf, sizeof(buf));
        if (ret < 0)
                dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+       mutex_unlock(&priv->mutex);
 }
 
 static void
@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
        uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
        int ret;
 
+       mutex_lock(&priv->mutex);
        ret = set_page(priv, reg);
        if (ret < 0)
-               return;
+               goto out;
 
        ret = i2c_master_send(client, buf, sizeof(buf));
        if (ret < 0)
                dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+       mutex_unlock(&priv->mutex);
 }
 
 static void
@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
        reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
 }
 
+/* handle HDMI connect/disconnect */
+static void tda998x_hpd(struct work_struct *work)
+{
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct tda998x_priv *priv =
+                       container_of(dwork, struct tda998x_priv, dwork);
+
+       if (priv->encoder && priv->encoder->dev)
+               drm_kms_helper_hotplug_event(priv->encoder->dev);
+}
+
 /*
  * only 2 interrupts may occur: screen plug/unplug and EDID read
  */
@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
                priv->wq_edid_wait = 0;
                wake_up(&priv->wq_edid);
        } else if (cec != 0) {                  /* HPD change */
-               if (priv->encoder && priv->encoder->dev)
-                       drm_helper_hpd_irq_event(priv->encoder->dev);
+               schedule_delayed_work(&priv->dwork, HZ/10);
        }
        return IRQ_HANDLED;
 }
@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
        /* disable all IRQs and free the IRQ handler */
        cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
        reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
-       if (priv->hdmi->irq)
+       if (priv->hdmi->irq) {
                free_irq(priv->hdmi->irq, priv);
+               cancel_delayed_work_sync(&priv->dwork);
+       }
 
        i2c_unregister_device(priv->cec);
 }
@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
        struct device_node *np = client->dev.of_node;
        u32 video;
        int rev_lo, rev_hi, ret;
+       unsigned short cec_addr;
 
        priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
        priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
 
        priv->current_page = 0xff;
        priv->hdmi = client;
-       priv->cec = i2c_new_dummy(client->adapter, 0x34);
+       /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
+       cec_addr = 0x34 + (client->addr & 0x03);
+       priv->cec = i2c_new_dummy(client->adapter, cec_addr);
        if (!priv->cec)
                return -ENODEV;
 
        priv->dpms = DRM_MODE_DPMS_OFF;
 
+       mutex_init(&priv->mutex);       /* protect the page access */
+
        /* wake up the device: */
        cec_write(priv, REG_CEC_ENAMODS,
                        CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
        if (client->irq) {
                int irqf_trigger;
 
-               /* init read EDID waitqueue */
+               /* init read EDID waitqueue and HDP work */
                init_waitqueue_head(&priv->wq_edid);
+               INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
 
                /* clear pending interrupts */
                reg_read(priv, REG_INT_FLAGS_0);
index 574057c..7643300 100644 (file)
@@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)
                        } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_LPT;
                                DRM_DEBUG_KMS("Found LynxPoint PCH\n");
-                               WARN_ON(!IS_HASWELL(dev));
-                               WARN_ON(IS_HSW_ULT(dev));
-                       } else if (IS_BROADWELL(dev)) {
-                               dev_priv->pch_type = PCH_LPT;
-                               dev_priv->pch_id =
-                                       INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
-                               DRM_DEBUG_KMS("This is Broadwell, assuming "
-                                             "LynxPoint LP PCH\n");
+                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+                               WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
                        } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_LPT;
                                DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
-                               WARN_ON(!IS_HASWELL(dev));
-                               WARN_ON(!IS_HSW_ULT(dev));
+                               WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+                               WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
                        } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
                                dev_priv->pch_type = PCH_SPT;
                                DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
index e9f891c..9d7a715 100644 (file)
@@ -2159,8 +2159,7 @@ struct drm_i915_cmd_table {
 #define IS_HSW_EARLY_SDV(dev)  (IS_HASWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
 #define IS_BDW_ULT(dev)                (IS_BROADWELL(dev) && \
-                                ((INTEL_DEVID(dev) & 0xf) == 0x2  || \
-                                (INTEL_DEVID(dev) & 0xf) == 0x6 || \
+                                ((INTEL_DEVID(dev) & 0xf) == 0x6 ||    \
                                 (INTEL_DEVID(dev) & 0xf) == 0xe))
 #define IS_BDW_GT3(dev)                (IS_BROADWELL(dev) && \
                                 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
index 76354d3..5f61482 100644 (file)
@@ -3148,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
                u32 size = i915_gem_obj_ggtt_size(obj);
                uint64_t val;
 
+               /* Adjust fence size to match tiled area */
+               if (obj->tiling_mode != I915_TILING_NONE) {
+                       uint32_t row_size = obj->stride *
+                               (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+                       size = (size / row_size) * row_size;
+               }
+
                val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
                                 0xfffff000) << 32;
                val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
@@ -4884,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev)
        for (i = 0; i < NUM_L3_SLICES(dev); i++)
                i915_gem_l3_remap(&dev_priv->ring[RCS], i);
 
-       /*
-        * XXX: Contexts should only be initialized once. Doing a switch to the
-        * default context switch however is something we'd like to do after
-        * reset or thaw (the latter may not actually be necessary for HW, but
-        * goes with our code better). Context switching requires rings (for
-        * the do_switch), but before enabling PPGTT. So don't move this.
-        */
-       ret = i915_gem_context_enable(dev_priv);
+       ret = i915_ppgtt_init_hw(dev);
        if (ret && ret != -EIO) {
-               DRM_ERROR("Context enable failed %d\n", ret);
+               DRM_ERROR("PPGTT enable failed %d\n", ret);
                i915_gem_cleanup_ringbuffer(dev);
-
-               return ret;
        }
 
-       ret = i915_ppgtt_init_hw(dev);
+       ret = i915_gem_context_enable(dev_priv);
        if (ret && ret != -EIO) {
-               DRM_ERROR("PPGTT enable failed %d\n", ret);
+               DRM_ERROR("Context enable failed %d\n", ret);
                i915_gem_cleanup_ringbuffer(dev);
+
+               return ret;
        }
 
        return ret;
index 4d63839..dfb783a 100644 (file)
@@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
 
        WARN_ON(panel->backlight.max == 0);
 
-       if (panel->backlight.level == 0) {
+       if (panel->backlight.level <= panel->backlight.min) {
                panel->backlight.level = panel->backlight.max;
                if (panel->backlight.device)
                        panel->backlight.device->props.brightness =
index a0133c7..42cd0cf 100644 (file)
@@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
                        if (flags & R600_PTE_SYSTEM) {
                                value = radeon_vm_map_gart(rdev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
                        } else if (flags & R600_PTE_VALID) {
                                value = addr;
                        } else {
index 4be2bb7..ce787a9 100644 (file)
@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
                        if (flags & R600_PTE_SYSTEM) {
                                value = radeon_vm_map_gart(rdev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
                        } else if (flags & R600_PTE_VALID) {
                                value = addr;
                        } else {
index 74f06d5..279801c 100644 (file)
@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
                return r;
        rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
        rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+       rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
        rdev->asic->gart.set_page = &r100_pci_gart_set_page;
        return radeon_gart_table_ram_alloc(rdev);
 }
@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
        WREG32(RADEON_AIC_HI_ADDR, 0);
 }
 
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
+{
+       return addr;
+}
+
 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
-                           uint64_t addr, uint32_t flags)
+                           uint64_t entry)
 {
        u32 *gtt = rdev->gart.ptr;
-       gtt[i] = cpu_to_le32(lower_32_bits(addr));
+       gtt[i] = cpu_to_le32(lower_32_bits(entry));
 }
 
 void r100_pci_gart_fini(struct radeon_device *rdev)
index 064ad55..08d68f3 100644 (file)
@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
 #define R300_PTE_WRITEABLE (1 << 2)
 #define R300_PTE_READABLE  (1 << 3)
 
-void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
-                             uint64_t addr, uint32_t flags)
+uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
 {
-       void __iomem *ptr = rdev->gart.ptr;
-
        addr = (lower_32_bits(addr) >> 8) |
                ((upper_32_bits(addr) & 0xff) << 24);
        if (flags & RADEON_GART_PAGE_READ)
@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
                addr |= R300_PTE_WRITEABLE;
        if (!(flags & RADEON_GART_PAGE_SNOOP))
                addr |= R300_PTE_UNSNOOPED;
+       return addr;
+}
+
+void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+                             uint64_t entry)
+{
+       void __iomem *ptr = rdev->gart.ptr;
+
        /* on x86 we want this to be CPU endian, on powerpc
         * on powerpc without HW swappers, it'll get swapped on way
         * into VRAM - so no need for cpu_to_le32 on VRAM tables */
-       writel(addr, ((void __iomem *)ptr) + (i * 4));
+       writel(entry, ((void __iomem *)ptr) + (i * 4));
 }
 
 int rv370_pcie_gart_init(struct radeon_device *rdev)
@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
                DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
        rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
        rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+       rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
        rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
        return radeon_gart_table_vram_alloc(rdev);
 }
index 54529b8..3f2a8d3 100644 (file)
@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
  * Dummy page
  */
 struct radeon_dummy_page {
+       uint64_t        entry;
        struct page     *page;
        dma_addr_t      addr;
 };
@@ -645,7 +646,7 @@ struct radeon_gart {
        unsigned                        num_cpu_pages;
        unsigned                        table_size;
        struct page                     **pages;
-       dma_addr_t                      *pages_addr;
+       uint64_t                        *pages_entry;
        bool                            ready;
 };
 
@@ -1847,8 +1848,9 @@ struct radeon_asic {
        /* gart */
        struct {
                void (*tlb_flush)(struct radeon_device *rdev);
+               uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
                void (*set_page)(struct radeon_device *rdev, unsigned i,
-                                uint64_t addr, uint32_t flags);
+                                uint64_t entry);
        } gart;
        struct {
                int (*init)(struct radeon_device *rdev);
@@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
 #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
 #define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
 #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
+#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
+#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
 #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
 #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
 #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
index 121aff6..ed0e10e 100644 (file)
@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
                DRM_INFO("Forcing AGP to PCIE mode\n");
                rdev->flags |= RADEON_IS_PCIE;
                rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+               rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
                rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
        } else {
                DRM_INFO("Forcing AGP to PCI mode\n");
                rdev->flags |= RADEON_IS_PCI;
                rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+               rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
                rdev->asic->gart.set_page = &r100_pci_gart_set_page;
        }
        rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
        .mc_wait_for_idle = &r100_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &r100_pci_gart_tlb_flush,
+               .get_page_entry = &r100_pci_gart_get_page_entry,
                .set_page = &r100_pci_gart_set_page,
        },
        .ring = {
@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
        .mc_wait_for_idle = &r100_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &r100_pci_gart_tlb_flush,
+               .get_page_entry = &r100_pci_gart_get_page_entry,
                .set_page = &r100_pci_gart_set_page,
        },
        .ring = {
@@ -359,6 +363,7 @@ static struct radeon_asic r300_asic = {
        .mc_wait_for_idle = &r300_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &r100_pci_gart_tlb_flush,
+               .get_page_entry = &r100_pci_gart_get_page_entry,
                .set_page = &r100_pci_gart_set_page,
        },
        .ring = {
@@ -425,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
        .mc_wait_for_idle = &r300_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rv370_pcie_gart_tlb_flush,
+               .get_page_entry = &rv370_pcie_gart_get_page_entry,
                .set_page = &rv370_pcie_gart_set_page,
        },
        .ring = {
@@ -491,6 +497,7 @@ static struct radeon_asic r420_asic = {
        .mc_wait_for_idle = &r300_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rv370_pcie_gart_tlb_flush,
+               .get_page_entry = &rv370_pcie_gart_get_page_entry,
                .set_page = &rv370_pcie_gart_set_page,
        },
        .ring = {
@@ -557,6 +564,7 @@ static struct radeon_asic rs400_asic = {
        .mc_wait_for_idle = &rs400_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rs400_gart_tlb_flush,
+               .get_page_entry = &rs400_gart_get_page_entry,
                .set_page = &rs400_gart_set_page,
        },
        .ring = {
@@ -623,6 +631,7 @@ static struct radeon_asic rs600_asic = {
        .mc_wait_for_idle = &rs600_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rs600_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -691,6 +700,7 @@ static struct radeon_asic rs690_asic = {
        .mc_wait_for_idle = &rs690_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rs400_gart_tlb_flush,
+               .get_page_entry = &rs400_gart_get_page_entry,
                .set_page = &rs400_gart_set_page,
        },
        .ring = {
@@ -759,6 +769,7 @@ static struct radeon_asic rv515_asic = {
        .mc_wait_for_idle = &rv515_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rv370_pcie_gart_tlb_flush,
+               .get_page_entry = &rv370_pcie_gart_get_page_entry,
                .set_page = &rv370_pcie_gart_set_page,
        },
        .ring = {
@@ -825,6 +836,7 @@ static struct radeon_asic r520_asic = {
        .mc_wait_for_idle = &r520_mc_wait_for_idle,
        .gart = {
                .tlb_flush = &rv370_pcie_gart_tlb_flush,
+               .get_page_entry = &rv370_pcie_gart_get_page_entry,
                .set_page = &rv370_pcie_gart_set_page,
        },
        .ring = {
@@ -919,6 +931,7 @@ static struct radeon_asic r600_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &r600_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1004,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &r600_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1095,6 +1109,7 @@ static struct radeon_asic rs780_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &r600_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1199,6 +1214,7 @@ static struct radeon_asic rv770_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &r600_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1317,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1409,6 +1426,7 @@ static struct radeon_asic sumo_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1500,6 +1518,7 @@ static struct radeon_asic btc_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &evergreen_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .ring = {
@@ -1635,6 +1654,7 @@ static struct radeon_asic cayman_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &cayman_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
@@ -1738,6 +1758,7 @@ static struct radeon_asic trinity_asic = {
        .get_gpu_clock_counter = &r600_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &cayman_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
@@ -1871,6 +1892,7 @@ static struct radeon_asic si_asic = {
        .get_gpu_clock_counter = &si_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &si_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
@@ -2032,6 +2054,7 @@ static struct radeon_asic ci_asic = {
        .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &cik_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
@@ -2139,6 +2162,7 @@ static struct radeon_asic kv_asic = {
        .get_gpu_clock_counter = &cik_get_gpu_clock_counter,
        .gart = {
                .tlb_flush = &cik_pcie_gart_tlb_flush,
+               .get_page_entry = &rs600_gart_get_page_entry,
                .set_page = &rs600_gart_set_page,
        },
        .vm = {
index 2a45d54..8d787d1 100644 (file)
@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
 int r100_asic_reset(struct radeon_device *rdev);
 u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
 void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
 void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
-                           uint64_t addr, uint32_t flags);
+                           uint64_t entry);
 void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
 int r100_irq_set(struct radeon_device *rdev);
 int r100_irq_process(struct radeon_device *rdev);
@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
                                struct radeon_fence *fence);
 extern int r300_cs_parse(struct radeon_cs_parser *p);
 extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
 extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
-                                    uint64_t addr, uint32_t flags);
+                                    uint64_t entry);
 extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
 extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
 extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
 extern int rs400_suspend(struct radeon_device *rdev);
 extern int rs400_resume(struct radeon_device *rdev);
 void rs400_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
 void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
-                        uint64_t addr, uint32_t flags);
+                        uint64_t entry);
 uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 int rs400_gart_init(struct radeon_device *rdev);
@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
 void rs600_irq_disable(struct radeon_device *rdev);
 u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
 void rs600_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
 void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
-                        uint64_t addr, uint32_t flags);
+                        uint64_t entry);
 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rs600_bandwidth_update(struct radeon_device *rdev);
index 0ec6516..bd7519f 100644 (file)
@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
                rdev->dummy_page.page = NULL;
                return -ENOMEM;
        }
+       rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
+                                                           RADEON_GART_PAGE_DUMMY);
        return 0;
 }
 
index 84146d5..5450fa9 100644 (file)
@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
                radeon_bo_unpin(rdev->gart.robj);
        radeon_bo_unreserve(rdev->gart.robj);
        rdev->gart.table_addr = gpu_addr;
+
+       if (!r) {
+               int i;
+
+               /* We might have dropped some GART table updates while it wasn't
+                * mapped, restore all entries
+                */
+               for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+                       radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
+               mb();
+               radeon_gart_tlb_flush(rdev);
+       }
+
        return r;
 }
 
@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
        unsigned t;
        unsigned p;
        int i, j;
-       u64 page_base;
 
        if (!rdev->gart.ready) {
                WARN(1, "trying to unbind memory from uninitialized GART !\n");
@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
        for (i = 0; i < pages; i++, p++) {
                if (rdev->gart.pages[p]) {
                        rdev->gart.pages[p] = NULL;
-                       rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
-                       page_base = rdev->gart.pages_addr[p];
                        for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+                               rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
                                if (rdev->gart.ptr) {
-                                       radeon_gart_set_page(rdev, t, page_base,
-                                                            RADEON_GART_PAGE_DUMMY);
+                                       radeon_gart_set_page(rdev, t,
+                                                            rdev->dummy_page.entry);
                                }
-                               page_base += RADEON_GPU_PAGE_SIZE;
                        }
                }
        }
@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
 {
        unsigned t;
        unsigned p;
-       uint64_t page_base;
+       uint64_t page_base, page_entry;
        int i, j;
 
        if (!rdev->gart.ready) {
@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
        p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
 
        for (i = 0; i < pages; i++, p++) {
-               rdev->gart.pages_addr[p] = dma_addr[i];
                rdev->gart.pages[p] = pagelist[i];
-               if (rdev->gart.ptr) {
-                       page_base = rdev->gart.pages_addr[p];
-                       for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
-                               radeon_gart_set_page(rdev, t, page_base, flags);
-                               page_base += RADEON_GPU_PAGE_SIZE;
+               page_base = dma_addr[i];
+               for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+                       page_entry = radeon_gart_get_page_entry(page_base, flags);
+                       rdev->gart.pages_entry[t] = page_entry;
+                       if (rdev->gart.ptr) {
+                               radeon_gart_set_page(rdev, t, page_entry);
                        }
+                       page_base += RADEON_GPU_PAGE_SIZE;
                }
        }
        mb();
@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)
                radeon_gart_fini(rdev);
                return -ENOMEM;
        }
-       rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
-                                       rdev->gart.num_cpu_pages);
-       if (rdev->gart.pages_addr == NULL) {
+       rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
+                                        rdev->gart.num_gpu_pages);
+       if (rdev->gart.pages_entry == NULL) {
                radeon_gart_fini(rdev);
                return -ENOMEM;
        }
        /* set GART entry to point to the dummy page by default */
-       for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
-               rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
-       }
+       for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+               rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
        return 0;
 }
 
@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)
  */
 void radeon_gart_fini(struct radeon_device *rdev)
 {
-       if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
+       if (rdev->gart.ready) {
                /* unbind pages */
                radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
        }
        rdev->gart.ready = false;
        vfree(rdev->gart.pages);
-       vfree(rdev->gart.pages_addr);
+       vfree(rdev->gart.pages_entry);
        rdev->gart.pages = NULL;
-       rdev->gart.pages_addr = NULL;
+       rdev->gart.pages_entry = NULL;
 
        radeon_dummy_page_fini(rdev);
 }
index 8bf87f1..bef9a09 100644 (file)
@@ -436,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd)
 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
                                uint32_t hpd_size, uint64_t hpd_gpu_addr)
 {
-       uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+       uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
        uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
 
        lock_srbm(kgd, mec, pipe, 0, 0);
index cde48c4..06d2246 100644 (file)
@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
        uint64_t result;
 
        /* page table offset */
-       result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
-
-       /* in case cpu page size != gpu page size*/
-       result |= addr & (~PAGE_MASK);
+       result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
+       result &= ~RADEON_GPU_PAGE_MASK;
 
        return result;
 }
index c5799f1..34e3235 100644 (file)
@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
 #define RS400_PTE_WRITEABLE (1 << 2)
 #define RS400_PTE_READABLE  (1 << 3)
 
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
-                        uint64_t addr, uint32_t flags)
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
 {
        uint32_t entry;
-       u32 *gtt = rdev->gart.ptr;
 
        entry = (lower_32_bits(addr) & PAGE_MASK) |
                ((upper_32_bits(addr) & 0xff) << 4);
@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
                entry |= RS400_PTE_WRITEABLE;
        if (!(flags & RADEON_GART_PAGE_SNOOP))
                entry |= RS400_PTE_UNSNOOPED;
-       entry = cpu_to_le32(entry);
-       gtt[i] = entry;
+       return entry;
+}
+
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+                        uint64_t entry)
+{
+       u32 *gtt = rdev->gart.ptr;
+       gtt[i] = cpu_to_le32(lower_32_bits(entry));
 }
 
 int rs400_mc_wait_for_idle(struct radeon_device *rdev)
index 9acb1c3..74bce91 100644 (file)
@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
        radeon_gart_table_vram_free(rdev);
 }
 
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
-                        uint64_t addr, uint32_t flags)
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
 {
-       void __iomem *ptr = (void *)rdev->gart.ptr;
-
        addr = addr & 0xFFFFFFFFFFFFF000ULL;
        addr |= R600_PTE_SYSTEM;
        if (flags & RADEON_GART_PAGE_VALID)
@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
                addr |= R600_PTE_WRITEABLE;
        if (flags & RADEON_GART_PAGE_SNOOP)
                addr |= R600_PTE_SNOOPED;
-       writeq(addr, ptr + (i * 8));
+       return addr;
+}
+
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+                        uint64_t entry)
+{
+       void __iomem *ptr = (void *)rdev->gart.ptr;
+       writeq(entry, ptr + (i * 8));
 }
 
 int rs600_irq_set(struct radeon_device *rdev)
index aa7b872..8320792 100644 (file)
@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
                for (; ndw > 0; ndw -= 2, --count, pe += 8) {
                        if (flags & R600_PTE_SYSTEM) {
                                value = radeon_vm_map_gart(rdev, addr);
-                               value &= 0xFFFFFFFFFFFFF000ULL;
                        } else if (flags & R600_PTE_VALID) {
                                value = addr;
                        } else {
index 7b5d221..6c6b655 100644 (file)
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
                if (unlikely(ret != 0))
                        --dev_priv->num_3d_resources;
        } else if (unhide_svga) {
-               mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_ENABLE,
                          vmw_read(dev_priv, SVGA_REG_ENABLE) &
                          ~SVGA_REG_ENABLE_HIDE);
-               mutex_unlock(&dev_priv->hw_mutex);
        }
 
        mutex_unlock(&dev_priv->release_mutex);
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
        mutex_lock(&dev_priv->release_mutex);
        if (unlikely(--dev_priv->num_3d_resources == 0))
                vmw_release_device(dev_priv);
-       else if (hide_svga) {
-               mutex_lock(&dev_priv->hw_mutex);
+       else if (hide_svga)
                vmw_write(dev_priv, SVGA_REG_ENABLE,
                          vmw_read(dev_priv, SVGA_REG_ENABLE) |
                          SVGA_REG_ENABLE_HIDE);
-               mutex_unlock(&dev_priv->hw_mutex);
-       }
 
        n3d = (int32_t) dev_priv->num_3d_resources;
        mutex_unlock(&dev_priv->release_mutex);
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->dev = dev;
        dev_priv->vmw_chipset = chipset;
        dev_priv->last_read_seqno = (uint32_t) -100;
-       mutex_init(&dev_priv->hw_mutex);
        mutex_init(&dev_priv->cmdbuf_mutex);
        mutex_init(&dev_priv->release_mutex);
        mutex_init(&dev_priv->binding_mutex);
        rwlock_init(&dev_priv->resource_lock);
        ttm_lock_init(&dev_priv->reservation_sem);
+       spin_lock_init(&dev_priv->hw_lock);
+       spin_lock_init(&dev_priv->waiter_lock);
+       spin_lock_init(&dev_priv->cap_lock);
 
        for (i = vmw_res_context; i < vmw_res_max; ++i) {
                idr_init(&dev_priv->res_idr[i]);
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
        dev_priv->enable_fb = enable_fbdev;
 
-       mutex_lock(&dev_priv->hw_mutex);
-
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
        svga_id = vmw_read(dev_priv, SVGA_REG_ID);
        if (svga_id != SVGA_ID_2) {
                ret = -ENOSYS;
                DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
-               mutex_unlock(&dev_priv->hw_mutex);
                goto out_err0;
        }
 
@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
                dev_priv->prim_bb_mem = dev_priv->vram_size;
 
        ret = vmw_dma_masks(dev_priv);
-       if (unlikely(ret != 0)) {
-               mutex_unlock(&dev_priv->hw_mutex);
+       if (unlikely(ret != 0))
                goto out_err0;
-       }
 
        /*
         * Limit back buffer size to VRAM size.  Remove this once
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        if (dev_priv->prim_bb_mem > dev_priv->vram_size)
                dev_priv->prim_bb_mem = dev_priv->vram_size;
 
-       mutex_unlock(&dev_priv->hw_mutex);
-
        vmw_print_capabilities(dev_priv->capabilities);
 
        if (dev_priv->capabilities & SVGA_CAP_GMR2) {
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
                if (unlikely(ret != 0))
                        return ret;
                vmw_kms_save_vga(dev_priv);
-               mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_TRACES, 0);
-               mutex_unlock(&dev_priv->hw_mutex);
        }
 
        if (active) {
@@ -1196,9 +1184,7 @@ out_no_active_lock:
        if (!dev_priv->enable_fb) {
                vmw_kms_restore_vga(dev_priv);
                vmw_3d_resource_dec(dev_priv, true);
-               mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-               mutex_unlock(&dev_priv->hw_mutex);
        }
        return ret;
 }
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
                        DRM_ERROR("Unable to clean VRAM on master drop.\n");
                vmw_kms_restore_vga(dev_priv);
                vmw_3d_resource_dec(dev_priv, true);
-               mutex_lock(&dev_priv->hw_mutex);
                vmw_write(dev_priv, SVGA_REG_TRACES, 1);
-               mutex_unlock(&dev_priv->hw_mutex);
        }
 
        dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
        struct drm_device *dev = pci_get_drvdata(pdev);
        struct vmw_private *dev_priv = vmw_priv(dev);
 
-       mutex_lock(&dev_priv->hw_mutex);
        vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
        (void) vmw_read(dev_priv, SVGA_REG_ID);
-       mutex_unlock(&dev_priv->hw_mutex);
 
        /**
         * Reclaim 3d reference held by fbdev and potentially
index 4ee799b..d26a6da 100644 (file)
@@ -399,7 +399,8 @@ struct vmw_private {
        uint32_t memory_size;
        bool has_gmr;
        bool has_mob;
-       struct mutex hw_mutex;
+       spinlock_t hw_lock;
+       spinlock_t cap_lock;
 
        /*
         * VGA registers.
@@ -449,8 +450,9 @@ struct vmw_private {
        atomic_t marker_seq;
        wait_queue_head_t fence_queue;
        wait_queue_head_t fifo_queue;
-       int fence_queue_waiters; /* Protected by hw_mutex */
-       int goal_queue_waiters; /* Protected by hw_mutex */
+       spinlock_t waiter_lock;
+       int fence_queue_waiters; /* Protected by waiter_lock */
+       int goal_queue_waiters; /* Protected by waiter_lock */
        atomic_t fifo_queue_waiters;
        uint32_t last_read_seqno;
        spinlock_t irq_lock;
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
        return (struct vmw_master *) master->driver_priv;
 }
 
+/*
+ * The locking here is fine-grained, so that it is performed once
+ * for every read- and write operation. This is of course costly, but we
+ * don't perform much register access in the timing critical paths anyway.
+ * Instead we have the extra benefit of being sure that we don't forget
+ * the hw lock around register accesses.
+ */
 static inline void vmw_write(struct vmw_private *dev_priv,
                             unsigned int offset, uint32_t value)
 {
+       unsigned long irq_flags;
+
+       spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
        outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
        outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+       spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
 }
 
 static inline uint32_t vmw_read(struct vmw_private *dev_priv,
                                unsigned int offset)
 {
-       uint32_t val;
+       unsigned long irq_flags;
+       u32 val;
 
+       spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
        outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
        val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+       spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+
        return val;
 }
 
index b7594cb..945f1e0 100644 (file)
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
        struct vmw_private *dev_priv;
        spinlock_t lock;
        struct list_head fence_list;
-       struct work_struct work, ping_work;
+       struct work_struct work;
        u32 user_fence_size;
        u32 fence_size;
        u32 event_fence_action_size;
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
        return "svga";
 }
 
-static void vmw_fence_ping_func(struct work_struct *work)
-{
-       struct vmw_fence_manager *fman =
-               container_of(work, struct vmw_fence_manager, ping_work);
-
-       vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
-}
-
 static bool vmw_fence_enable_signaling(struct fence *f)
 {
        struct vmw_fence_obj *fence =
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
        if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
                return false;
 
-       if (mutex_trylock(&dev_priv->hw_mutex)) {
-               vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
-               mutex_unlock(&dev_priv->hw_mutex);
-       } else
-               schedule_work(&fman->ping_work);
+       vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 
        return true;
 }
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
        INIT_LIST_HEAD(&fman->fence_list);
        INIT_LIST_HEAD(&fman->cleanup_list);
        INIT_WORK(&fman->work, &vmw_fence_work_func);
-       INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
        fman->fifo_down = true;
        fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
        fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
        bool lists_empty;
 
        (void) cancel_work_sync(&fman->work);
-       (void) cancel_work_sync(&fman->ping_work);
 
        spin_lock_irqsave(&fman->lock, irq_flags);
        lists_empty = list_empty(&fman->fence_list) &&
index 09e10ae..39f2b03 100644 (file)
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
                if (!dev_priv->has_mob)
                        return false;
 
-               mutex_lock(&dev_priv->hw_mutex);
+               spin_lock(&dev_priv->cap_lock);
                vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
                result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
-               mutex_unlock(&dev_priv->hw_mutex);
+               spin_unlock(&dev_priv->cap_lock);
 
                return (result != 0);
        }
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
        DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
 
-       mutex_lock(&dev_priv->hw_mutex);
        dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
        dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
        dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        mb();
 
        vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
-       mutex_unlock(&dev_priv->hw_mutex);
 
        max = ioread32(fifo_mem + SVGA_FIFO_MAX);
        min = ioread32(fifo_mem  + SVGA_FIFO_MIN);
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        return vmw_fifo_send_fence(dev_priv, &dummy);
 }
 
-void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason)
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
 {
        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+       static DEFINE_SPINLOCK(ping_lock);
+       unsigned long irq_flags;
 
+       /*
+        * The ping_lock is needed because we don't have an atomic
+        * test-and-set of the SVGA_FIFO_BUSY register.
+        */
+       spin_lock_irqsave(&ping_lock, irq_flags);
        if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
                iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
                vmw_write(dev_priv, SVGA_REG_SYNC, reason);
        }
-}
-
-void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
-{
-       mutex_lock(&dev_priv->hw_mutex);
-
-       vmw_fifo_ping_host_locked(dev_priv, reason);
-
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock_irqrestore(&ping_lock, irq_flags);
 }
 
 void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
 {
        __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 
-       mutex_lock(&dev_priv->hw_mutex);
-
        vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
        while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
                ;
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
        vmw_write(dev_priv, SVGA_REG_TRACES,
                  dev_priv->traces_state);
 
-       mutex_unlock(&dev_priv->hw_mutex);
        vmw_marker_queue_takedown(&fifo->marker_queue);
 
        if (likely(fifo->static_buffer != NULL)) {
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
                return vmw_fifo_wait_noirq(dev_priv, bytes,
                                           interruptible, timeout);
 
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
                spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
                outl(SVGA_IRQFLAG_FIFO_PROGRESS,
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 
        if (interruptible)
                ret = wait_event_interruptible_timeout
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
        else if (likely(ret > 0))
                ret = 0;
 
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
                spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
                dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 
        return ret;
 }
index 37881ec..69c8ce2 100644 (file)
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
                (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
        compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
 
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->cap_lock);
        for (i = 0; i < max_size; ++i) {
                vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
                compat_cap->pairs[i][0] = i;
                compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->cap_lock);
 
        return 0;
 }
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
                if (num > SVGA3D_DEVCAP_MAX)
                        num = SVGA3D_DEVCAP_MAX;
 
-               mutex_lock(&dev_priv->hw_mutex);
+               spin_lock(&dev_priv->cap_lock);
                for (i = 0; i < num; ++i) {
                        vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
                        *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
                }
-               mutex_unlock(&dev_priv->hw_mutex);
+               spin_unlock(&dev_priv->cap_lock);
        } else if (gb_objects) {
                ret = vmw_fill_compat_cap(dev_priv, bounce, size);
                if (unlikely(ret != 0))
index 0c42376..9fe9827 100644 (file)
@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
 
 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
 {
-       uint32_t busy;
 
-       mutex_lock(&dev_priv->hw_mutex);
-       busy = vmw_read(dev_priv, SVGA_REG_BUSY);
-       mutex_unlock(&dev_priv->hw_mutex);
-
-       return (busy == 0);
+       return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
 }
 
 void vmw_update_seqno(struct vmw_private *dev_priv,
@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
 
 void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
 {
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (dev_priv->fence_queue_waiters++ == 0) {
                unsigned long irq_flags;
 
@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 }
 
 void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
 {
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (--dev_priv->fence_queue_waiters == 0) {
                unsigned long irq_flags;
 
@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 }
 
 
 void vmw_goal_waiter_add(struct vmw_private *dev_priv)
 {
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (dev_priv->goal_queue_waiters++ == 0) {
                unsigned long irq_flags;
 
@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 }
 
 void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
 {
-       mutex_lock(&dev_priv->hw_mutex);
+       spin_lock(&dev_priv->waiter_lock);
        if (--dev_priv->goal_queue_waiters == 0) {
                unsigned long irq_flags;
 
@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
                spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
        }
-       mutex_unlock(&dev_priv->hw_mutex);
+       spin_unlock(&dev_priv->waiter_lock);
 }
 
 int vmw_wait_seqno(struct vmw_private *dev_priv,
@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
        if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
                return;
 
-       mutex_lock(&dev_priv->hw_mutex);
        vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
-       mutex_unlock(&dev_priv->hw_mutex);
 
        status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
        outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
index 3725b52..8725b79 100644 (file)
@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
        struct vmw_private *dev_priv = vmw_priv(dev);
        struct vmw_display_unit *du = vmw_connector_to_du(connector);
 
-       mutex_lock(&dev_priv->hw_mutex);
        num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
-       mutex_unlock(&dev_priv->hw_mutex);
 
        return ((vmw_connector_to_du(connector)->unit < num_displays &&
                 du->pref_active) ?
index f722a0c..c48da05 100644 (file)
@@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = {
        .attach_dev     = gart_iommu_attach_dev,
        .detach_dev     = gart_iommu_detach_dev,
        .map            = gart_iommu_map,
+       .map_sg         = default_iommu_map_sg,
        .unmap          = gart_iommu_unmap,
        .iova_to_phys   = gart_iommu_iova_to_phys,
        .pgsize_bitmap  = GART_IOMMU_PGSIZES,
@@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
        do_gart_setup(gart, NULL);
 
        gart_handle = gart;
-       bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
+
        return 0;
 }
 
index 21b1562..c1c0104 100644 (file)
@@ -683,7 +683,7 @@ static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
        if (!cmd) {
                DMERR("could not allocate metadata struct");
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
        atomic_set(&cmd->ref_count, 1);
@@ -745,7 +745,7 @@ static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
                return cmd;
 
        cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
-       if (cmd) {
+       if (!IS_ERR(cmd)) {
                mutex_lock(&table_lock);
                cmd2 = lookup(bdev);
                if (cmd2) {
@@ -780,9 +780,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
 {
        struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
                                                       may_format_device, policy_hint_size);
-       if (cmd && !same_params(cmd, data_block_size)) {
+
+       if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
                dm_cache_metadata_close(cmd);
-               return NULL;
+               return ERR_PTR(-EINVAL);
        }
 
        return cmd;
index 4934789..07705ee 100644 (file)
@@ -3385,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
 
+       if (get_pool_mode(pool) >= PM_READ_ONLY) {
+               DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
+                     dm_device_name(pool->pool_md));
+               return -EINVAL;
+       }
+
        if (!strcasecmp(argv[0], "create_thin"))
                r = process_create_thin_mesg(argc, argv, pool);
 
index f94a9fa..c672c4d 100644 (file)
@@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev)
 
        c_can_irq_control(priv, false);
 
+       /* put ctrl to init on stop to end ongoing transmission */
+       priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
+
        /* deactivate pins */
        pinctrl_pm_select_sleep_state(dev->dev.parent);
        priv->can.state = CAN_STATE_STOPPED;
index c32cd61..7af379c 100644 (file)
@@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
                          usb_sndbulkpipe(dev->udev,
                                          dev->bulk_out->bEndpointAddress),
                          buf, msg->len,
-                         kvaser_usb_simple_msg_callback, priv);
+                         kvaser_usb_simple_msg_callback, netdev);
        usb_anchor_urb(urb, &priv->tx_submitted);
 
        err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
        priv = dev->nets[channel];
        stats = &priv->netdev->stats;
 
-       if (status & M16C_STATE_BUS_RESET) {
-               kvaser_usb_unlink_tx_urbs(priv);
-               return;
-       }
-
        skb = alloc_can_err_skb(priv->netdev, &cf);
        if (!skb) {
                stats->rx_dropped++;
@@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
 
        netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
 
-       if (status & M16C_STATE_BUS_OFF) {
+       if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
                cf->can_id |= CAN_ERR_BUSOFF;
 
                priv->can.can_stats.bus_off++;
@@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
                }
 
                new_state = CAN_STATE_ERROR_PASSIVE;
-       }
-
-       if (status == M16C_STATE_BUS_ERROR) {
+       } else if (status & M16C_STATE_BUS_ERROR) {
                if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
                    ((txerr >= 96) || (rxerr >= 96))) {
                        cf->can_id |= CAN_ERR_CRTL;
@@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
 
                        priv->can.can_stats.error_warning++;
                        new_state = CAN_STATE_ERROR_WARNING;
-               } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+               } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
+                          ((txerr < 96) && (rxerr < 96))) {
                        cf->can_id |= CAN_ERR_PROT;
                        cf->data[2] = CAN_ERR_PROT_ACTIVE;
 
@@ -1590,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
 {
        struct kvaser_usb *dev;
        int err = -ENOMEM;
-       int i;
+       int i, retry = 3;
 
        dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
        if (!dev)
@@ -1608,7 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
 
        usb_set_intfdata(intf, dev);
 
-       err = kvaser_usb_get_software_info(dev);
+       /* On some x86 laptops, plugging a Kvaser device again after
+        * an unplug makes the firmware always ignore the very first
+        * command. For such a case, provide some room for retries
+        * instead of completely exiting the driver.
+        */
+       do {
+               err = kvaser_usb_get_software_info(dev);
+       } while (--retry && err == -ETIMEDOUT);
+
        if (err) {
                dev_err(&intf->dev,
                        "Cannot get software infos, error %d\n", err);
index 75b08c6..29a0927 100644 (file)
 #define MTL_Q_RQOMR                    0x40
 #define MTL_Q_RQMPOCR                  0x44
 #define MTL_Q_RQDR                     0x4c
+#define MTL_Q_RQFCR                    0x50
 #define MTL_Q_IER                      0x70
 #define MTL_Q_ISR                      0x74
 
 /* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQFCR_RFA_INDEX          1
+#define MTL_Q_RQFCR_RFA_WIDTH          6
+#define MTL_Q_RQFCR_RFD_INDEX          17
+#define MTL_Q_RQFCR_RFD_WIDTH          6
 #define MTL_Q_RQOMR_EHFC_INDEX         7
 #define MTL_Q_RQOMR_EHFC_WIDTH         1
-#define MTL_Q_RQOMR_RFA_INDEX          8
-#define MTL_Q_RQOMR_RFA_WIDTH          3
-#define MTL_Q_RQOMR_RFD_INDEX          13
-#define MTL_Q_RQOMR_RFD_WIDTH          3
 #define MTL_Q_RQOMR_RQS_INDEX          16
 #define MTL_Q_RQOMR_RQS_WIDTH          9
 #define MTL_Q_RQOMR_RSF_INDEX          5
index 53f5f66..4c66cd1 100644 (file)
@@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
 
        for (i = 0; i < pdata->rx_q_count; i++) {
                /* Activate flow control when less than 4k left in fifo */
-               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
+               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
 
                /* De-activate flow control when more than 6k left in fifo */
-               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
+               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
        }
 }
 
index 1d1147c..e468ed3 100644 (file)
@@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
                }
 #endif
                if (!bnx2x_fp_lock_napi(fp))
-                       return work_done;
+                       return budget;
 
                for_each_cos_in_tx_queue(fp, cos)
                        if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
index b29e027..e356afa 100644 (file)
@@ -1335,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
        int err;
 
        if (!enic_poll_lock_napi(&enic->rq[rq]))
-               return work_done;
+               return budget;
        /* Service RQ
         */
 
index a62fc38..1c75829 100644 (file)
@@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
 #define IS_TSO_HEADER(txq, addr) \
        ((addr >= txq->tso_hdrs_dma) && \
         (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+
+#define DESC_DMA_MAP_SINGLE 0
+#define DESC_DMA_MAP_PAGE 1
+
 /*
  * RX/TX descriptors.
  */
@@ -362,6 +366,7 @@ struct tx_queue {
        dma_addr_t tso_hdrs_dma;
 
        struct tx_desc *tx_desc_area;
+       char *tx_desc_mapping; /* array to track the type of the dma mapping */
        dma_addr_t tx_desc_dma;
        int tx_desc_area_size;
 
@@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
        if (txq->tx_curr_desc == txq->tx_ring_size)
                txq->tx_curr_desc = 0;
        desc = &txq->tx_desc_area[tx_index];
+       txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
 
        desc->l4i_chk = 0;
        desc->byte_cnt = length;
@@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
                skb_frag_t *this_frag;
                int tx_index;
                struct tx_desc *desc;
-               void *addr;
 
                this_frag = &skb_shinfo(skb)->frags[frag];
-               addr = page_address(this_frag->page.p) + this_frag->page_offset;
                tx_index = txq->tx_curr_desc++;
                if (txq->tx_curr_desc == txq->tx_ring_size)
                        txq->tx_curr_desc = 0;
                desc = &txq->tx_desc_area[tx_index];
+               txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
 
                /*
                 * The last fragment will generate an interrupt
@@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
 
                desc->l4i_chk = 0;
                desc->byte_cnt = skb_frag_size(this_frag);
-               desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
-                                              desc->byte_cnt, DMA_TO_DEVICE);
+               desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
+                                                this_frag, 0, desc->byte_cnt,
+                                                DMA_TO_DEVICE);
        }
 }
 
@@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
        if (txq->tx_curr_desc == txq->tx_ring_size)
                txq->tx_curr_desc = 0;
        desc = &txq->tx_desc_area[tx_index];
+       txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
 
        if (nr_frags) {
                txq_submit_frag_skb(txq, skb);
@@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                int tx_index;
                struct tx_desc *desc;
                u32 cmd_sts;
+               char desc_dma_map;
 
                tx_index = txq->tx_used_desc;
                desc = &txq->tx_desc_area[tx_index];
+               desc_dma_map = txq->tx_desc_mapping[tx_index];
+
                cmd_sts = desc->cmd_sts;
 
                if (cmd_sts & BUFFER_OWNED_BY_DMA) {
@@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
                reclaimed++;
                txq->tx_desc_count--;
 
-               if (!IS_TSO_HEADER(txq, desc->buf_ptr))
-                       dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
-                                        desc->byte_cnt, DMA_TO_DEVICE);
+               if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
+
+                       if (desc_dma_map == DESC_DMA_MAP_PAGE)
+                               dma_unmap_page(mp->dev->dev.parent,
+                                              desc->buf_ptr,
+                                              desc->byte_cnt,
+                                              DMA_TO_DEVICE);
+                       else
+                               dma_unmap_single(mp->dev->dev.parent,
+                                                desc->buf_ptr,
+                                                desc->byte_cnt,
+                                                DMA_TO_DEVICE);
+               }
 
                if (cmd_sts & TX_ENABLE_INTERRUPT) {
                        struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
@@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
        struct tx_queue *txq = mp->txq + index;
        struct tx_desc *tx_desc;
        int size;
+       int ret;
        int i;
 
        txq->index = index;
@@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
                                        nexti * sizeof(struct tx_desc);
        }
 
+       txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
+                                      GFP_KERNEL);
+       if (!txq->tx_desc_mapping) {
+               ret = -ENOMEM;
+               goto err_free_desc_area;
+       }
+
        /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
        txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
                                           txq->tx_ring_size * TSO_HEADER_SIZE,
                                           &txq->tso_hdrs_dma, GFP_KERNEL);
        if (txq->tso_hdrs == NULL) {
-               dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
-                                 txq->tx_desc_area, txq->tx_desc_dma);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err_free_desc_mapping;
        }
        skb_queue_head_init(&txq->tx_skb);
 
        return 0;
+
+err_free_desc_mapping:
+       kfree(txq->tx_desc_mapping);
+err_free_desc_area:
+       if (index == 0 && size <= mp->tx_desc_sram_size)
+               iounmap(txq->tx_desc_area);
+       else
+               dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+                                 txq->tx_desc_area, txq->tx_desc_dma);
+       return ret;
 }
 
 static void txq_deinit(struct tx_queue *txq)
@@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq)
        else
                dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
                                  txq->tx_desc_area, txq->tx_desc_dma);
+       kfree(txq->tx_desc_mapping);
+
        if (txq->tso_hdrs)
                dma_free_coherent(mp->dev->dev.parent,
                                  txq->tx_ring_size * TSO_HEADER_SIZE,
index 6130375..c531c8a 100644 (file)
@@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
 
        work_done = netxen_process_rcv_ring(sds_ring, budget);
 
-       if ((work_done < budget) && tx_complete) {
+       if (!tx_complete)
+               work_done = budget;
+
+       if (work_done < budget) {
                napi_complete(&sds_ring->napi);
                if (test_bit(__NX_DEV_UP, &adapter->state))
                        netxen_nic_enable_int(sds_ring);
index 6576243..04283fe 100644 (file)
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
        [TSU_ADRL31]    = 0x01fc,
 };
 
+static void sh_eth_rcv_snd_disable(struct net_device *ndev);
+static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
+
 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
 {
        return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -1120,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
        int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
        int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
        int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+       dma_addr_t dma_addr;
 
        mdp->cur_rx = 0;
        mdp->cur_tx = 0;
@@ -1133,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
                /* skb */
                mdp->rx_skbuff[i] = NULL;
                skb = netdev_alloc_skb(ndev, skbuff_size);
-               mdp->rx_skbuff[i] = skb;
                if (skb == NULL)
                        break;
                sh_eth_set_receive_align(skb);
@@ -1142,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
                rxdesc = &mdp->rx_ring[i];
                /* The size of the buffer is a multiple of 16 bytes. */
                rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
-               dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
-                              DMA_FROM_DEVICE);
-               rxdesc->addr = virt_to_phys(skb->data);
+               dma_addr = dma_map_single(&ndev->dev, skb->data,
+                                         rxdesc->buffer_length,
+                                         DMA_FROM_DEVICE);
+               if (dma_mapping_error(&ndev->dev, dma_addr)) {
+                       kfree_skb(skb);
+                       break;
+               }
+               mdp->rx_skbuff[i] = skb;
+               rxdesc->addr = dma_addr;
                rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
 
                /* Rx descriptor address set */
@@ -1316,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
                     RFLR);
 
        sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
-       if (start)
+       if (start) {
+               mdp->irq_enabled = true;
                sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+       }
 
        /* PAUSE Prohibition */
        val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -1356,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
        return ret;
 }
 
+static void sh_eth_dev_exit(struct net_device *ndev)
+{
+       struct sh_eth_private *mdp = netdev_priv(ndev);
+       int i;
+
+       /* Deactivate all TX descriptors, so DMA should stop at next
+        * packet boundary if it's currently running
+        */
+       for (i = 0; i < mdp->num_tx_ring; i++)
+               mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
+
+       /* Disable TX FIFO egress to MAC */
+       sh_eth_rcv_snd_disable(ndev);
+
+       /* Stop RX DMA at next packet boundary */
+       sh_eth_write(ndev, 0, EDRRR);
+
+       /* Aside from TX DMA, we can't tell when the hardware is
+        * really stopped, so we need to reset to make sure.
+        * Before doing that, wait for long enough to *probably*
+        * finish transmitting the last packet and poll stats.
+        */
+       msleep(2); /* max frame time at 10 Mbps < 1250 us */
+       sh_eth_get_stats(ndev);
+       sh_eth_reset(ndev);
+}
+
 /* free Tx skb function */
 static int sh_eth_txfree(struct net_device *ndev)
 {
@@ -1400,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
        u16 pkt_len = 0;
        u32 desc_status;
        int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+       dma_addr_t dma_addr;
 
        boguscnt = min(boguscnt, *quota);
        limit = boguscnt;
@@ -1447,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
                        mdp->rx_skbuff[entry] = NULL;
                        if (mdp->cd->rpadir)
                                skb_reserve(skb, NET_IP_ALIGN);
-                       dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
-                                               ALIGN(mdp->rx_buf_sz, 16),
-                                               DMA_FROM_DEVICE);
+                       dma_unmap_single(&ndev->dev, rxdesc->addr,
+                                        ALIGN(mdp->rx_buf_sz, 16),
+                                        DMA_FROM_DEVICE);
                        skb_put(skb, pkt_len);
                        skb->protocol = eth_type_trans(skb, ndev);
                        netif_receive_skb(skb);
@@ -1469,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 
                if (mdp->rx_skbuff[entry] == NULL) {
                        skb = netdev_alloc_skb(ndev, skbuff_size);
-                       mdp->rx_skbuff[entry] = skb;
                        if (skb == NULL)
                                break;  /* Better luck next round. */
                        sh_eth_set_receive_align(skb);
-                       dma_map_single(&ndev->dev, skb->data,
-                                      rxdesc->buffer_length, DMA_FROM_DEVICE);
+                       dma_addr = dma_map_single(&ndev->dev, skb->data,
+                                                 rxdesc->buffer_length,
+                                                 DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&ndev->dev, dma_addr)) {
+                               kfree_skb(skb);
+                               break;
+                       }
+                       mdp->rx_skbuff[entry] = skb;
 
                        skb_checksum_none_assert(skb);
-                       rxdesc->addr = virt_to_phys(skb->data);
+                       rxdesc->addr = dma_addr;
                }
                if (entry >= mdp->num_rx_ring - 1)
                        rxdesc->status |=
@@ -1573,7 +1617,6 @@ ignore_link:
                if (intr_status & EESR_RFRMER) {
                        /* Receive Frame Overflow int */
                        ndev->stats.rx_frame_errors++;
-                       netif_err(mdp, rx_err, ndev, "Receive Abort\n");
                }
        }
 
@@ -1592,13 +1635,11 @@ ignore_link:
        if (intr_status & EESR_RDE) {
                /* Receive Descriptor Empty int */
                ndev->stats.rx_over_errors++;
-               netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
        }
 
        if (intr_status & EESR_RFE) {
                /* Receive FIFO Overflow int */
                ndev->stats.rx_fifo_errors++;
-               netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
        }
 
        if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
@@ -1653,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
        if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
                ret = IRQ_HANDLED;
        else
-               goto other_irq;
+               goto out;
+
+       if (!likely(mdp->irq_enabled)) {
+               sh_eth_write(ndev, 0, EESIPR);
+               goto out;
+       }
 
        if (intr_status & EESR_RX_CHECK) {
                if (napi_schedule_prep(&mdp->napi)) {
@@ -1684,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
                sh_eth_error(ndev, intr_status);
        }
 
-other_irq:
+out:
        spin_unlock(&mdp->lock);
 
        return ret;
@@ -1712,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
        napi_complete(napi);
 
        /* Reenable Rx interrupts */
-       sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+       if (mdp->irq_enabled)
+               sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
 out:
        return budget - quota;
 }
@@ -1968,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
                return -EINVAL;
 
        if (netif_running(ndev)) {
+               netif_device_detach(ndev);
                netif_tx_disable(ndev);
-               /* Disable interrupts by clearing the interrupt mask. */
-               sh_eth_write(ndev, 0x0000, EESIPR);
-               /* Stop the chip's Tx and Rx processes. */
-               sh_eth_write(ndev, 0, EDTRR);
-               sh_eth_write(ndev, 0, EDRRR);
+
+               /* Serialise with the interrupt handler and NAPI, then
+                * disable interrupts.  We have to clear the
+                * irq_enabled flag first to ensure that interrupts
+                * won't be re-enabled.
+                */
+               mdp->irq_enabled = false;
                synchronize_irq(ndev->irq);
-       }
+               napi_synchronize(&mdp->napi);
+               sh_eth_write(ndev, 0x0000, EESIPR);
 
-       /* Free all the skbuffs in the Rx queue. */
-       sh_eth_ring_free(ndev);
-       /* Free DMA buffer */
-       sh_eth_free_dma_buffer(mdp);
+               sh_eth_dev_exit(ndev);
+
+               /* Free all the skbuffs in the Rx queue. */
+               sh_eth_ring_free(ndev);
+               /* Free DMA buffer */
+               sh_eth_free_dma_buffer(mdp);
+       }
 
        /* Set new parameters */
        mdp->num_rx_ring = ring->rx_pending;
        mdp->num_tx_ring = ring->tx_pending;
 
-       ret = sh_eth_ring_init(ndev);
-       if (ret < 0) {
-               netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
-               return ret;
-       }
-       ret = sh_eth_dev_init(ndev, false);
-       if (ret < 0) {
-               netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
-               return ret;
-       }
-
        if (netif_running(ndev)) {
+               ret = sh_eth_ring_init(ndev);
+               if (ret < 0) {
+                       netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
+                                  __func__);
+                       return ret;
+               }
+               ret = sh_eth_dev_init(ndev, false);
+               if (ret < 0) {
+                       netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
+                                  __func__);
+                       return ret;
+               }
+
+               mdp->irq_enabled = true;
                sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
                /* Setting the Rx mode will start the Rx process. */
                sh_eth_write(ndev, EDRRR_R, EDRRR);
-               netif_wake_queue(ndev);
+               netif_device_attach(ndev);
        }
 
        return 0;
@@ -2117,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
        spin_unlock_irqrestore(&mdp->lock, flags);
 
+       if (skb_padto(skb, ETH_ZLEN))
+               return NETDEV_TX_OK;
+
        entry = mdp->cur_tx % mdp->num_tx_ring;
        mdp->tx_skbuff[entry] = skb;
        txdesc = &mdp->tx_ring[entry];
@@ -2126,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                                 skb->len + 2);
        txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
                                      DMA_TO_DEVICE);
-       if (skb->len < ETH_ZLEN)
-               txdesc->buffer_length = ETH_ZLEN;
-       else
-               txdesc->buffer_length = skb->len;
+       if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
+               kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+       txdesc->buffer_length = skb->len;
 
        if (entry >= mdp->num_tx_ring - 1)
                txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
@@ -2181,14 +2242,17 @@ static int sh_eth_close(struct net_device *ndev)
 
        netif_stop_queue(ndev);
 
-       /* Disable interrupts by clearing the interrupt mask. */
+       /* Serialise with the interrupt handler and NAPI, then disable
+        * interrupts.  We have to clear the irq_enabled flag first to
+        * ensure that interrupts won't be re-enabled.
+        */
+       mdp->irq_enabled = false;
+       synchronize_irq(ndev->irq);
+       napi_disable(&mdp->napi);
        sh_eth_write(ndev, 0x0000, EESIPR);
 
-       /* Stop the chip's Tx and Rx processes. */
-       sh_eth_write(ndev, 0, EDTRR);
-       sh_eth_write(ndev, 0, EDRRR);
+       sh_eth_dev_exit(ndev);
 
-       sh_eth_get_stats(ndev);
        /* PHY Disconnect */
        if (mdp->phydev) {
                phy_stop(mdp->phydev);
@@ -2198,8 +2262,6 @@ static int sh_eth_close(struct net_device *ndev)
 
        free_irq(ndev->irq, ndev);
 
-       napi_disable(&mdp->napi);
-
        /* Free all the skbuffs in the Rx queue. */
        sh_eth_ring_free(ndev);
 
index 71f5de1..332d3c1 100644 (file)
@@ -513,6 +513,7 @@ struct sh_eth_private {
        u32 rx_buf_sz;                  /* Based on MTU+slack. */
        int edmac_endian;
        struct napi_struct napi;
+       bool irq_enabled;
        /* MII transceiver section. */
        u32 phy_id;                     /* PHY ID */
        struct mii_bus *mii_bus;        /* MDIO bus control */
index 8c6b7c1..cf62ff4 100644 (file)
@@ -2778,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
  * @addr: iobase memory address
  * Description: this is the main probe function used to
  * call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * on success the new private structure is returned, otherwise the error
+ * pointer.
  */
 struct stmmac_priv *stmmac_dvr_probe(struct device *device,
                                     struct plat_stmmacenet_data *plat_dat,
@@ -2789,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
 
        ndev = alloc_etherdev(sizeof(struct stmmac_priv));
        if (!ndev)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        SET_NETDEV_DEV(ndev, device);
 
index e068d48..a39131f 100644 (file)
@@ -1683,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
        if (vid == priv->data.default_vlan)
                return 0;
 
+       if (priv->data.dual_emac) {
+               /* In dual EMAC, reserved VLAN id should not be used for
+                * creating VLAN interfaces as this can break the dual
+                * EMAC port separation
+                */
+               int i;
+
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (vid == priv->slaves[i].port_vlan)
+                               return -EINVAL;
+               }
+       }
+
        dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
        return cpsw_add_vlan_ale_entry(priv, vid);
 }
@@ -1696,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
        if (vid == priv->data.default_vlan)
                return 0;
 
+       if (priv->data.dual_emac) {
+               int i;
+
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (vid == priv->slaves[i].port_vlan)
+                               return -EINVAL;
+               }
+       }
+
        dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
        ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
        if (ret != 0)
index a14d877..2e19528 100644 (file)
@@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
        };
 
        dst = ip6_route_output(dev_net(dev), NULL, &fl6);
-       if (IS_ERR(dst))
+       if (dst->error) {
+               ret = dst->error;
+               dst_release(dst);
                goto err;
-
+       }
        skb_dst_drop(skb);
        skb_dst_set(skb, dst);
        err = ip6_local_out(skb);
index 9a72640..62b0bf4 100644 (file)
@@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
 
        __ath_cancel_work(sc);
 
+       disable_irq(sc->irq);
        tasklet_disable(&sc->intr_tq);
        tasklet_disable(&sc->bcon_tasklet);
        spin_lock_bh(&sc->sc_pcu_lock);
@@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
                r = -EIO;
 
 out:
+       enable_irq(sc->irq);
        spin_unlock_bh(&sc->sc_pcu_lock);
        tasklet_enable(&sc->bcon_tasklet);
        tasklet_enable(&sc->intr_tq);
@@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev)
        if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
                return IRQ_NONE;
 
-       if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
-               return IRQ_NONE;
-
        /* shared irq, not for us */
        if (!ath9k_hw_intrpend(ah))
                return IRQ_NONE;
@@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev)
        ath9k_debug_sync_cause(sc, sync_cause);
        status &= ah->imask;    /* discard unasked-for bits */
 
-       if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
+       if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
                return IRQ_HANDLED;
 
        /*
index 1bbe4fc..660ddb1 100644 (file)
@@ -246,6 +246,7 @@ enum iwl_ucode_tlv_flag {
  * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
  *     regardless of the band or the number of the probes. FW will calculate
  *     the actual dwell time.
+ * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
  */
 enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID     = BIT(0),
@@ -257,6 +258,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF     = BIT(7),
        IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = BIT(8),
        IWL_UCODE_TLV_API_BASIC_DWELL           = BIT(13),
+       IWL_UCODE_TLV_API_SINGLE_SCAN_EBS       = BIT(16),
 };
 
 /**
index 201846d..cfc0e65 100644 (file)
@@ -653,8 +653,11 @@ enum iwl_scan_channel_flags {
 };
 
 /* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
- * @flags: enum iwl_scan_channel_flgs
- * @non_ebs_ratio: how many regular scan iteration before EBS
+ * @flags: enum iwl_scan_channel_flags
+ * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
+ *     involved.
+ *     1 - EBS is disabled.
+ *     2 - every second scan will be full scan(and so on).
  */
 struct iwl_scan_channel_opt {
        __le16 flags;
index e880f9d..2091558 100644 (file)
@@ -3343,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
                msk |= mvmsta->tfd_queue_msk;
        }
 
-       if (drop) {
-               if (iwl_mvm_flush_tx_path(mvm, msk, true))
-                       IWL_ERR(mvm, "flush request fail\n");
-               mutex_unlock(&mvm->mutex);
-       } else {
-               mutex_unlock(&mvm->mutex);
+       msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
 
-               /* this can take a while, and we may need/want other operations
-                * to succeed while doing this, so do it without the mutex held
-                */
-               iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
-       }
+       if (iwl_mvm_flush_tx_path(mvm, msk, true))
+               IWL_ERR(mvm, "flush request fail\n");
+       mutex_unlock(&mvm->mutex);
+
+       /* this can take a while, and we may need/want other operations
+        * to succeed while doing this, so do it without the mutex held
+        */
+       iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
 }
 
 const struct ieee80211_ops iwl_mvm_hw_ops = {
index ec9a8e7..844bf7c 100644 (file)
@@ -72,6 +72,8 @@
 
 #define IWL_PLCP_QUIET_THRESH 1
 #define IWL_ACTIVE_QUIET_TIME 10
+#define IWL_DENSE_EBS_SCAN_RATIO 5
+#define IWL_SPARSE_EBS_SCAN_RATIO 1
 
 struct iwl_mvm_scan_params {
        u32 max_out_time;
@@ -1105,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
                return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
                                          notify);
 
+       if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+               return 0;
+
+       if (iwl_mvm_is_radio_killed(mvm))
+               goto out;
+
        if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
            (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
             mvm->scan_status != IWL_MVM_SCAN_OS)) {
@@ -1141,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
        if (mvm->scan_status == IWL_MVM_SCAN_OS)
                iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
 
+out:
        mvm->scan_status = IWL_MVM_SCAN_NONE;
 
        if (notify) {
@@ -1297,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
        cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
        cmd->iter_num = cpu_to_le32(1);
 
-       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
-           mvm->last_ebs_successful) {
-               cmd->channel_opt[0].flags =
-                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
-                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
-                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
-               cmd->channel_opt[1].flags =
-                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
-                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
-                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
-       }
-
        if (iwl_mvm_rrm_scan_needed(mvm))
                cmd->scan_flags |=
                        cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
@@ -1383,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
        cmd->schedule[1].iterations = 0;
        cmd->schedule[1].full_scan_mul = 0;
 
+       if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
+           mvm->last_ebs_successful) {
+               cmd->channel_opt[0].flags =
+                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+               cmd->channel_opt[0].non_ebs_ratio =
+                       cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+               cmd->channel_opt[1].flags =
+                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+               cmd->channel_opt[1].non_ebs_ratio =
+                       cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+       }
+
        for (i = 1; i <= req->req.n_ssids; i++)
                ssid_bitmap |= BIT(i);
 
@@ -1483,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
        cmd->schedule[1].iterations = 0xff;
        cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
 
+       if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
+           mvm->last_ebs_successful) {
+               cmd->channel_opt[0].flags =
+                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+               cmd->channel_opt[0].non_ebs_ratio =
+                       cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+               cmd->channel_opt[1].flags =
+                       cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+                                   IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+                                   IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+               cmd->channel_opt[1].non_ebs_ratio =
+                       cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+       }
+
        iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
                                       ssid_bitmap, cmd);
 
index 4333306..c59d075 100644 (file)
@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        if (ieee80211_is_probe_resp(fc))
                tx_flags |= TX_CMD_FLG_TSF;
-       else if (ieee80211_is_back_req(fc))
-               tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
 
        if (ieee80211_has_morefrags(fc))
                tx_flags |= TX_CMD_FLG_MORE_FRAG;
@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                u8 *qc = ieee80211_get_qos_ctl(hdr);
                tx_cmd->tid_tspec = qc[0] & 0xf;
                tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+       } else if (ieee80211_is_back_req(fc)) {
+               struct ieee80211_bar *bar = (void *)skb->data;
+               u16 control = le16_to_cpu(bar->control);
+
+               tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
+               tx_cmd->tid_tspec = (control &
+                                    IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+                       IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+               WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
        } else {
                tx_cmd->tid_tspec = IWL_TID_NON_QOS;
                if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
index dfd021e..f4cd0b9 100644 (file)
@@ -177,7 +177,7 @@ struct at91_pinctrl {
        struct device           *dev;
        struct pinctrl_dev      *pctl;
 
-       int                     nbanks;
+       int                     nactive_banks;
 
        uint32_t                *mux_mask;
        int                     nmux;
@@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name,
        int mux;
 
        /* check if it's a valid config */
-       if (pin->bank >= info->nbanks) {
+       if (pin->bank >= gpio_banks) {
                dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n",
-                       name, index, pin->bank, info->nbanks);
+                       name, index, pin->bank, gpio_banks);
                return -EINVAL;
        }
 
+       if (!gpio_chips[pin->bank]) {
+               dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n",
+                       name, index, pin->bank);
+               return -ENXIO;
+       }
+
        if (pin->pin >= MAX_NB_GPIO_PER_BANK) {
                dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",
                        name, index, pin->pin, MAX_NB_GPIO_PER_BANK);
@@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info,
 
        for_each_child_of_node(np, child) {
                if (of_device_is_compatible(child, gpio_compat)) {
-                       info->nbanks++;
+                       if (of_device_is_available(child))
+                               info->nactive_banks++;
                } else {
                        info->nfunctions++;
                        info->ngroups += of_get_child_count(child);
@@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info,
        }
 
        size /= sizeof(*list);
-       if (!size || size % info->nbanks) {
-               dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks);
+       if (!size || size % gpio_banks) {
+               dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks);
                return -EINVAL;
        }
-       info->nmux = size / info->nbanks;
+       info->nmux = size / gpio_banks;
 
        info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL);
        if (!info->mux_mask) {
@@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
                of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;
        at91_pinctrl_child_count(info, np);
 
-       if (info->nbanks < 1) {
+       if (gpio_banks < 1) {
                dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");
                return -EINVAL;
        }
@@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
 
        dev_dbg(&pdev->dev, "mux-mask\n");
        tmp = info->mux_mask;
-       for (i = 0; i < info->nbanks; i++) {
+       for (i = 0; i < gpio_banks; i++) {
                for (j = 0; j < info->nmux; j++, tmp++) {
                        dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);
                }
@@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
        if (!info->groups)
                return -ENOMEM;
 
-       dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks);
+       dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks);
        dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
        dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
 
@@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
 {
        struct at91_pinctrl *info;
        struct pinctrl_pin_desc *pdesc;
-       int ret, i, j, k;
+       int ret, i, j, k, ngpio_chips_enabled = 0;
 
        info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
        if (!info)
@@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
         * to obtain references to the struct gpio_chip * for them, and we
         * need this to proceed.
         */
-       for (i = 0; i < info->nbanks; i++) {
-               if (!gpio_chips[i]) {
-                       dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
-                       devm_kfree(&pdev->dev, info);
-                       return -EPROBE_DEFER;
-               }
+       for (i = 0; i < gpio_banks; i++)
+               if (gpio_chips[i])
+                       ngpio_chips_enabled++;
+
+       if (ngpio_chips_enabled < info->nactive_banks) {
+               dev_warn(&pdev->dev,
+                        "All GPIO chips are not registered yet (%d/%d)\n",
+                        ngpio_chips_enabled, info->nactive_banks);
+               devm_kfree(&pdev->dev, info);
+               return -EPROBE_DEFER;
        }
 
        at91_pinctrl_desc.name = dev_name(&pdev->dev);
-       at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK;
+       at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;
        at91_pinctrl_desc.pins = pdesc =
                devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL);
 
        if (!at91_pinctrl_desc.pins)
                return -ENOMEM;
 
-       for (i = 0 , k = 0; i < info->nbanks; i++) {
+       for (i = 0, k = 0; i < gpio_banks; i++) {
                for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
                        pdesc->number = k;
                        pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j);
@@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
        }
 
        /* We will handle a range of GPIO pins */
-       for (i = 0; i < info->nbanks; i++)
-               pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
+       for (i = 0; i < gpio_banks; i++)
+               if (gpio_chips[i])
+                       pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
 
        dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
 
@@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
 static int at91_gpio_of_irq_setup(struct platform_device *pdev,
                                  struct at91_gpio_chip *at91_gpio)
 {
+       struct gpio_chip        *gpiochip_prev = NULL;
        struct at91_gpio_chip   *prev = NULL;
        struct irq_data         *d = irq_get_irq_data(at91_gpio->pioc_virq);
-       int ret;
+       int ret, i;
 
        at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
 
@@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
                return ret;
        }
 
-       /* Setup chained handler */
-       if (at91_gpio->pioc_idx)
-               prev = gpio_chips[at91_gpio->pioc_idx - 1];
-
        /* The top level handler handles one bank of GPIOs, except
         * on some SoC it can handle up to three...
         * We only set up the handler for the first of the list.
         */
-       if (prev && prev->next == at91_gpio)
+       gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
+       if (!gpiochip_prev) {
+               /* Then register the chain on the parent IRQ */
+               gpiochip_set_chained_irqchip(&at91_gpio->chip,
+                                            &gpio_irqchip,
+                                            at91_gpio->pioc_virq,
+                                            gpio_irq_handler);
                return 0;
+       }
 
-       /* Then register the chain on the parent IRQ */
-       gpiochip_set_chained_irqchip(&at91_gpio->chip,
-                                    &gpio_irqchip,
-                                    at91_gpio->pioc_virq,
-                                    gpio_irq_handler);
+       prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip);
 
-       return 0;
+       /* we can only have 2 banks before */
+       for (i = 0; i < 2; i++) {
+               if (prev->next) {
+                       prev = prev->next;
+               } else {
+                       prev->next = at91_gpio;
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
 }
 
 /* This structure is replicated for each GPIO block allocated at probe time */
@@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = {
        .ngpio                  = MAX_NB_GPIO_PER_BANK,
 };
 
-static void at91_gpio_probe_fixup(void)
-{
-       unsigned i;
-       struct at91_gpio_chip *at91_gpio, *last = NULL;
-
-       for (i = 0; i < gpio_banks; i++) {
-               at91_gpio = gpio_chips[i];
-
-               /*
-                * GPIO controller are grouped on some SoC:
-                * PIOC, PIOD and PIOE can share the same IRQ line
-                */
-               if (last && last->pioc_virq == at91_gpio->pioc_virq)
-                       last->next = at91_gpio;
-               last = at91_gpio;
-       }
-}
-
 static struct of_device_id at91_gpio_of_match[] = {
        { .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
        { .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
@@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
        gpio_chips[alias_idx] = at91_chip;
        gpio_banks = max(gpio_banks, alias_idx + 1);
 
-       at91_gpio_probe_fixup();
-
        ret = at91_gpio_of_irq_setup(pdev, at91_chip);
        if (ret)
                goto irq_setup_err;
index f407e37..642c77c 100644 (file)
@@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
        QETH_DBF_TEXT(SETUP, 2, "idxanswr");
        card = CARD_FROM_CDEV(channel->ccwdev);
        iob = qeth_get_buffer(channel);
+       if (!iob)
+               return -ENOMEM;
        iob->callback = idx_reply_cb;
        memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
        channel->ccw.count = QETH_BUFSIZE;
@@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
        QETH_DBF_TEXT(SETUP, 2, "idxactch");
 
        iob = qeth_get_buffer(channel);
+       if (!iob)
+               return -ENOMEM;
        iob->callback = idx_reply_cb;
        memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
        channel->ccw.count = IDX_ACTIVATE_SIZE;
@@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,
 }
 EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
 
+/**
+ * qeth_send_control_data() -  send control command to the card
+ * @card:                      qeth_card structure pointer
+ * @len:                       size of the command buffer
+ * @iob:                       qeth_cmd_buffer pointer
+ * @reply_cb:                  callback function pointer
+ * @cb_card:                   pointer to the qeth_card structure
+ * @cb_reply:                  pointer to the qeth_reply structure
+ * @cb_cmd:                    pointer to the original iob for non-IPA
+ *                             commands, or to the qeth_ipa_cmd structure
+ *                             for the IPA commands.
+ * @reply_param:               private pointer passed to the callback
+ *
+ * Returns the value of the `return_code' field of the response
+ * block returned from the hardware, or other error indication.
+ * Value of zero indicates successful execution of the command.
+ *
+ * Callback function gets called one or more times, with cb_cmd
+ * pointing to the response returned by the hardware. Callback
+ * function must return non-zero if more reply blocks are expected,
+ * and zero if the last or only reply block is received. Callback
+ * function can get the value of the reply_param pointer from the
+ * field 'param' of the structure qeth_reply.
+ */
+
 int qeth_send_control_data(struct qeth_card *card, int len,
                struct qeth_cmd_buffer *iob,
-               int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
-                       unsigned long),
+               int (*reply_cb)(struct qeth_card *cb_card,
+                               struct qeth_reply *cb_reply,
+                               unsigned long cb_cmd),
                void *reply_param)
 {
        int rc;
@@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
        struct qeth_cmd_buffer *iob;
        struct qeth_ipa_cmd *cmd;
 
-       iob = qeth_wait_for_buffer(&card->write);
-       cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+       iob = qeth_get_buffer(&card->write);
+       if (iob) {
+               cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+               qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+       } else {
+               dev_warn(&card->gdev->dev,
+                        "The qeth driver ran out of channel command buffers\n");
+               QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
+                                dev_name(&card->gdev->dev));
+       }
 
        return iob;
 }
@@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
 }
 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
 
+/**
+ * qeth_send_ipa_cmd() - send an IPA command
+ *
+ * See qeth_send_control_data() for explanation of the arguments.
+ */
+
 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
                int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
                        unsigned long),
@@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card)
        QETH_DBF_TEXT(SETUP, 2, "strtlan");
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
        return rc;
 }
@@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
                                     QETH_PROT_IPV4);
-       cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
-       cmd->data.setadapterparms.hdr.command_code = command;
-       cmd->data.setadapterparms.hdr.used_total = 1;
-       cmd->data.setadapterparms.hdr.seq_no = 1;
+       if (iob) {
+               cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+               cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
+               cmd->data.setadapterparms.hdr.command_code = command;
+               cmd->data.setadapterparms.hdr.used_total = 1;
+               cmd->data.setadapterparms.hdr.seq_no = 1;
+       }
 
        return iob;
 }
@@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
        QETH_CARD_TEXT(card, 3, "queryadp");
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
                                   sizeof(struct qeth_ipacmd_setadpparms));
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
        return rc;
 }
@@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
 
        QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
        return rc;
 }
@@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card,
                return -ENOMEDIUM;
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
                                sizeof(struct qeth_ipacmd_setadpparms_hdr));
+       if (!iob)
+               return -ENOMEM;
        return qeth_send_ipa_cmd(card, iob,
                                qeth_query_switch_attributes_cb, sw_info);
 }
@@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card)
 
        QETH_DBF_TEXT(SETUP, 2, "qdiagass");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.diagass.subcmd_len = 16;
        cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
@@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
 
        QETH_DBF_TEXT(SETUP, 2, "diagtrap");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.diagass.subcmd_len = 80;
        cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
@@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
 
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
                        sizeof(struct qeth_ipacmd_setadpparms));
+       if (!iob)
+               return;
        cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
        cmd->data.setadapterparms.data.mode = mode;
        qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
@@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
 
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
                                   sizeof(struct qeth_ipacmd_setadpparms));
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
        cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
@@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
                                   sizeof(struct qeth_ipacmd_setadpparms_hdr) +
                                   sizeof(struct qeth_set_access_ctrl));
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
        access_ctrl_req->subcmd_code = isolation;
@@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
 
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
                                   QETH_SNMP_SETADP_CMDLENGTH + req_len);
+       if (!iob) {
+               rc = -ENOMEM;
+               goto out;
+       }
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
        rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
@@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
                if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
                        rc = -EFAULT;
        }
-
+out:
        kfree(ureq);
        kfree(qinfo.udata);
        return rc;
@@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
                                   sizeof(struct qeth_ipacmd_setadpparms_hdr) +
                                   sizeof(struct qeth_query_oat));
+       if (!iob) {
+               rc = -ENOMEM;
+               goto out_free;
+       }
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        oat_req = &cmd->data.setadapterparms.data.query_oat;
        oat_req->subcmd_code = oat_data.command;
@@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card,
                return -EOPNOTSUPP;
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
                sizeof(struct qeth_ipacmd_setadpparms_hdr));
+       if (!iob)
+               return -ENOMEM;
        return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
                                        (void *)carrier_info);
 }
@@ -5060,11 +5133,23 @@ retriable:
        card->options.adp.supported_funcs = 0;
        card->options.sbp.supported_funcs = 0;
        card->info.diagass_support = 0;
-       qeth_query_ipassists(card, QETH_PROT_IPV4);
-       if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
-               qeth_query_setadapterparms(card);
-       if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
-               qeth_query_setdiagass(card);
+       rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
+       if (rc == -ENOMEM)
+               goto out;
+       if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+               rc = qeth_query_setadapterparms(card);
+               if (rc < 0) {
+                       QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+                       goto out;
+               }
+       }
+       if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+               rc = qeth_query_setdiagass(card);
+               if (rc < 0) {
+                       QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+                       goto out;
+               }
+       }
        return 0;
 out:
        dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
index d02cd1a..ce87ae7 100644 (file)
@@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *);
 static int qeth_l2_stop(struct net_device *);
 static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
 static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
-                          enum qeth_ipa_cmds,
-                          int (*reply_cb) (struct qeth_card *,
-                                           struct qeth_reply*,
-                                           unsigned long));
+                          enum qeth_ipa_cmds);
 static void qeth_l2_set_multicast_list(struct net_device *);
 static int qeth_l2_recover(void *);
 static void qeth_bridgeport_query_support(struct qeth_card *card);
@@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
        return ndev;
 }
 
-static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
-                               struct qeth_reply *reply,
-                               unsigned long data)
+static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
 {
-       struct qeth_ipa_cmd *cmd;
-       __u8 *mac;
+       int rc;
 
-       QETH_CARD_TEXT(card, 2, "L2Sgmacb");
-       cmd = (struct qeth_ipa_cmd *) data;
-       mac = &cmd->data.setdelmac.mac[0];
-       /* MAC already registered, needed in couple/uncouple case */
-       if (cmd->hdr.return_code ==  IPA_RC_L2_DUP_MAC) {
-               QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
-                         mac, QETH_CARD_IFNAME(card));
-               cmd->hdr.return_code = 0;
+       if (retcode)
+               QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
+       switch (retcode) {
+       case IPA_RC_SUCCESS:
+               rc = 0;
+               break;
+       case IPA_RC_L2_UNSUPPORTED_CMD:
+               rc = -ENOSYS;
+               break;
+       case IPA_RC_L2_ADDR_TABLE_FULL:
+               rc = -ENOSPC;
+               break;
+       case IPA_RC_L2_DUP_MAC:
+       case IPA_RC_L2_DUP_LAYER3_MAC:
+               rc = -EEXIST;
+               break;
+       case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
+       case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+               rc = -EPERM;
+               break;
+       case IPA_RC_L2_MAC_NOT_FOUND:
+               rc = -ENOENT;
+               break;
+       case -ENOMEM:
+               rc = -ENOMEM;
+               break;
+       default:
+               rc = -EIO;
+               break;
        }
-       if (cmd->hdr.return_code)
-               QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
-                         mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
-       return 0;
+       return rc;
 }
 
 static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
 {
-       QETH_CARD_TEXT(card, 2, "L2Sgmac");
-       return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
-                                         qeth_l2_send_setgroupmac_cb);
-}
-
-static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
-                               struct qeth_reply *reply,
-                               unsigned long data)
-{
-       struct qeth_ipa_cmd *cmd;
-       __u8 *mac;
+       int rc;
 
-       QETH_CARD_TEXT(card, 2, "L2Dgmacb");
-       cmd = (struct qeth_ipa_cmd *) data;
-       mac = &cmd->data.setdelmac.mac[0];
-       if (cmd->hdr.return_code)
-               QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n",
-                         mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
-       return 0;
+       QETH_CARD_TEXT(card, 2, "L2Sgmac");
+       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+                                       IPA_CMD_SETGMAC));
+       if (rc == -EEXIST)
+               QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
+                       mac, QETH_CARD_IFNAME(card));
+       else if (rc)
+               QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
+                       mac, QETH_CARD_IFNAME(card), rc);
+       return rc;
 }
 
 static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
 {
+       int rc;
+
        QETH_CARD_TEXT(card, 2, "L2Dgmac");
-       return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
-                                         qeth_l2_send_delgroupmac_cb);
+       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+                                       IPA_CMD_DELGMAC));
+       if (rc)
+               QETH_DBF_MESSAGE(2,
+                       "Could not delete group MAC %pM on %s: %d\n",
+                       mac, QETH_CARD_IFNAME(card), rc);
+       return rc;
 }
 
 static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
@@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
        mc->is_vmac = vmac;
 
        if (vmac) {
-               rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
-                                       NULL);
+               rc = qeth_setdel_makerc(card,
+                       qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC));
        } else {
-               rc = qeth_l2_send_setgroupmac(card, mac);
+               rc = qeth_setdel_makerc(card,
+                       qeth_l2_send_setgroupmac(card, mac));
        }
 
        if (!rc)
@@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
                if (del) {
                        if (mc->is_vmac)
                                qeth_l2_send_setdelmac(card, mc->mc_addr,
-                                       IPA_CMD_DELVMAC, NULL);
+                                       IPA_CMD_DELVMAC);
                        else
                                qeth_l2_send_delgroupmac(card, mc->mc_addr);
                }
@@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
 
        QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
        iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setdelvlan.vlan_id = i;
        return qeth_send_ipa_cmd(card, iob,
@@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
 {
        struct qeth_card *card = dev->ml_priv;
        struct qeth_vlan_vid *id;
+       int rc;
 
        QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
        if (!vid)
@@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
        id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
        if (id) {
                id->vid = vid;
-               qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+               rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+               if (rc) {
+                       kfree(id);
+                       return rc;
+               }
                spin_lock_bh(&card->vlanlock);
                list_add_tail(&id->list, &card->vid_list);
                spin_unlock_bh(&card->vlanlock);
@@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
 {
        struct qeth_vlan_vid *id, *tmpid = NULL;
        struct qeth_card *card = dev->ml_priv;
+       int rc = 0;
 
        QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
        if (card->info.type == QETH_CARD_TYPE_OSM) {
@@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
        }
        spin_unlock_bh(&card->vlanlock);
        if (tmpid) {
-               qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
+               rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
                kfree(tmpid);
        }
        qeth_l2_set_multicast_list(card->dev);
-       return 0;
+       return rc;
 }
 
 static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -539,91 +560,62 @@ out:
 }
 
 static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
-                          enum qeth_ipa_cmds ipacmd,
-                          int (*reply_cb) (struct qeth_card *,
-                                           struct qeth_reply*,
-                                           unsigned long))
+                          enum qeth_ipa_cmds ipacmd)
 {
        struct qeth_ipa_cmd *cmd;
        struct qeth_cmd_buffer *iob;
 
        QETH_CARD_TEXT(card, 2, "L2sdmac");
        iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
        memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
-       return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
+       return qeth_send_ipa_cmd(card, iob, NULL, NULL);
 }
 
-static int qeth_l2_send_setmac_cb(struct qeth_card *card,
-                          struct qeth_reply *reply,
-                          unsigned long data)
+static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
 {
-       struct qeth_ipa_cmd *cmd;
+       int rc;
 
-       QETH_CARD_TEXT(card, 2, "L2Smaccb");
-       cmd = (struct qeth_ipa_cmd *) data;
-       if (cmd->hdr.return_code) {
-               QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
+       QETH_CARD_TEXT(card, 2, "L2Setmac");
+       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+                                       IPA_CMD_SETVMAC));
+       if (rc == 0) {
+               card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+               memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+               dev_info(&card->gdev->dev,
+                       "MAC address %pM successfully registered on device %s\n",
+                       card->dev->dev_addr, card->dev->name);
+       } else {
                card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-               switch (cmd->hdr.return_code) {
-               case IPA_RC_L2_DUP_MAC:
-               case IPA_RC_L2_DUP_LAYER3_MAC:
+               switch (rc) {
+               case -EEXIST:
                        dev_warn(&card->gdev->dev,
-                               "MAC address %pM already exists\n",
-                               cmd->data.setdelmac.mac);
+                               "MAC address %pM already exists\n", mac);
                        break;
-               case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
-               case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+               case -EPERM:
                        dev_warn(&card->gdev->dev,
-                               "MAC address %pM is not authorized\n",
-                               cmd->data.setdelmac.mac);
-                       break;
-               default:
+                               "MAC address %pM is not authorized\n", mac);
                        break;
                }
-       } else {
-               card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-               memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
-                      OSA_ADDR_LEN);
-               dev_info(&card->gdev->dev,
-                       "MAC address %pM successfully registered on device %s\n",
-                       card->dev->dev_addr, card->dev->name);
-       }
-       return 0;
-}
-
-static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
-{
-       QETH_CARD_TEXT(card, 2, "L2Setmac");
-       return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
-                                         qeth_l2_send_setmac_cb);
-}
-
-static int qeth_l2_send_delmac_cb(struct qeth_card *card,
-                          struct qeth_reply *reply,
-                          unsigned long data)
-{
-       struct qeth_ipa_cmd *cmd;
-
-       QETH_CARD_TEXT(card, 2, "L2Dmaccb");
-       cmd = (struct qeth_ipa_cmd *) data;
-       if (cmd->hdr.return_code) {
-               QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
-               return 0;
        }
-       card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-
-       return 0;
+       return rc;
 }
 
 static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
 {
+       int rc;
+
        QETH_CARD_TEXT(card, 2, "L2Delmac");
        if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
                return 0;
-       return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
-                                         qeth_l2_send_delmac_cb);
+       rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+                                       IPA_CMD_DELVMAC));
+       if (rc == 0)
+               card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+       return rc;
 }
 
 static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
                if (rc) {
                        QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
                                "device %s: x%x\n", CARD_BUS_ID(card), rc);
-                       QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+                       QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
                        return rc;
                }
                QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
@@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
                return -ERESTARTSYS;
        }
        rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
-       if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND))
+       if (!rc || (rc == -ENOENT))
                rc = qeth_l2_send_setmac(card, addr->sa_data);
        return rc ? -EINVAL : 0;
 }
@@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        recover_flag = card->state;
        rc = qeth_core_hardsetup_card(card);
        if (rc) {
-               QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+               QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
                rc = -ENODEV;
                goto out_remove;
        }
@@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
 
        QETH_CARD_TEXT(card, 2, "brqsuppo");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+       if (!iob)
+               return;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.sbp.hdr.cmdlength =
                sizeof(struct qeth_ipacmd_sbp_hdr) +
@@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
        if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
                return -EOPNOTSUPP;
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.sbp.hdr.cmdlength =
                sizeof(struct qeth_ipacmd_sbp_hdr);
@@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
        if (rc)
                return rc;
        rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
-       if (rc)
-               return rc;
-       return 0;
+       return rc;
 }
 EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
 
@@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
        if (!(card->options.sbp.supported_funcs & setcmd))
                return -EOPNOTSUPP;
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.sbp.hdr.cmdlength = cmdlength;
        cmd->data.sbp.hdr.command_code = setcmd;
index 625227a..e2a0ee8 100644 (file)
@@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
        QETH_CARD_TEXT(card, 4, "setdelmc");
 
        iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
        if (addr->proto == QETH_PROT_IPV6)
@@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
        QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
 
        iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        if (addr->proto == QETH_PROT_IPV6) {
                memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
@@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
 
        QETH_CARD_TEXT(card, 4, "setroutg");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setrtg.type = (type);
        rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
@@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
        QETH_CARD_TEXT(card, 4, "getasscm");
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
 
-       cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
-       cmd->data.setassparms.hdr.assist_no = ipa_func;
-       cmd->data.setassparms.hdr.length = 8 + len;
-       cmd->data.setassparms.hdr.command_code = cmd_code;
-       cmd->data.setassparms.hdr.return_code = 0;
-       cmd->data.setassparms.hdr.seq_no = 0;
+       if (iob) {
+               cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+               cmd->data.setassparms.hdr.assist_no = ipa_func;
+               cmd->data.setassparms.hdr.length = 8 + len;
+               cmd->data.setassparms.hdr.command_code = cmd_code;
+               cmd->data.setassparms.hdr.return_code = 0;
+               cmd->data.setassparms.hdr.seq_no = 0;
+       }
 
        return iob;
 }
@@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
        QETH_CARD_TEXT(card, 4, "simassp6");
        iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
                                       0, QETH_PROT_IPV6);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_l3_send_setassparms(card, iob, 0, 0,
                                   qeth_l3_default_setassparms_cb, NULL);
        return rc;
@@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
                length = sizeof(__u32);
        iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
                                       length, QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_l3_send_setassparms(card, iob, length, data,
                                   qeth_l3_default_setassparms_cb, NULL);
        return rc;
@@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
                                     QETH_PROT_IPV6);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
                        card->info.unique_id;
@@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
                                     QETH_PROT_IPV6);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
                        card->info.unique_id;
@@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
        QETH_DBF_TEXT(SETUP, 2, "diagtrac");
 
        iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.diagass.subcmd_len = 16;
        cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
@@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
                        IPA_CMD_ASS_ARP_QUERY_INFO,
                        sizeof(struct qeth_arp_query_data) - sizeof(char),
                        prot);
+       if (!iob)
+               return -ENOMEM;
        cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
        cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
        cmd->data.setassparms.data.query_arp.reply_bits = 0;
@@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
                                       IPA_CMD_ASS_ARP_ADD_ENTRY,
                                       sizeof(struct qeth_arp_cache_entry),
                                       QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_l3_send_setassparms(card, iob,
                                   sizeof(struct qeth_arp_cache_entry),
                                   (unsigned long) entry,
@@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
                                       IPA_CMD_ASS_ARP_REMOVE_ENTRY,
                                       12,
                                       QETH_PROT_IPV4);
+       if (!iob)
+               return -ENOMEM;
        rc = qeth_l3_send_setassparms(card, iob,
                                   12, (unsigned long)buf,
                                   qeth_l3_default_setassparms_cb, NULL);
@@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
 
 static int qeth_l3_setup_netdev(struct qeth_card *card)
 {
+       int rc;
+
        if (card->info.type == QETH_CARD_TYPE_OSD ||
            card->info.type == QETH_CARD_TYPE_OSX) {
                if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                        return -ENODEV;
                card->dev->flags |= IFF_NOARP;
                card->dev->netdev_ops = &qeth_l3_netdev_ops;
-               qeth_l3_iqd_read_initial_mac(card);
+               rc = qeth_l3_iqd_read_initial_mac(card);
+               if (rc)
+                       return rc;
                if (card->options.hsuid[0])
                        memcpy(card->dev->perm_addr, card->options.hsuid, 9);
        } else
@@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        recover_flag = card->state;
        rc = qeth_core_hardsetup_card(card);
        if (rc) {
-               QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+               QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
                rc = -ENODEV;
                goto out_remove;
        }
@@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
 contin:
        rc = qeth_l3_setadapter_parms(card);
        if (rc)
-               QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+               QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
        if (!card->options.sniffer) {
                rc = qeth_l3_start_ipassists(card);
                if (rc) {
@@ -3410,10 +3438,10 @@ contin:
                }
                rc = qeth_l3_setrouting_v4(card);
                if (rc)
-                       QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+                       QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
                rc = qeth_l3_setrouting_v6(card);
                if (rc)
-                       QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+                       QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
        }
        netif_tx_disable(card->dev);
 
index e028854..9b38299 100644 (file)
@@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev)
                return -ENXIO;
        if (!get_device(&sdev->sdev_gendev))
                return -ENXIO;
-       /* We can fail this if we're doing SCSI operations
+       /* We can fail try_module_get if we're doing SCSI operations
         * from module exit (like cache flush) */
-       try_module_get(sdev->host->hostt->module);
+       __module_get(sdev->host->hostt->module);
 
        return 0;
 }
@@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get);
  */
 void scsi_device_put(struct scsi_device *sdev)
 {
-#ifdef CONFIG_MODULE_UNLOAD
-       struct module *module = sdev->host->hostt->module;
-
-       /* The module refcount will be zero if scsi_device_get()
-        * was called from a module removal routine */
-       if (module && module_refcount(module) != 0)
-               module_put(module);
-#endif
+       module_put(sdev->host->hostt->module);
        put_device(&sdev->sdev_gendev);
 }
 EXPORT_SYMBOL(scsi_device_put);
index 930f601..65d610a 100644 (file)
@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
                return 0;
        }
 
-       if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
+       if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
                CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
                return -EFAULT;
        }
index 2f0fbc3..e427cb7 100644 (file)
@@ -3065,6 +3065,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        path->search_commit_root = 1;
        path->skip_locking = 1;
 
+       ppath->search_commit_root = 1;
+       ppath->skip_locking = 1;
        /*
         * trigger the readahead for extent tree csum tree and wait for
         * completion. During readahead, the scrub is officially paused
index c8b148b..3e193cb 100644 (file)
@@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
 
 static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
                             s64 change, struct gfs2_quota_data *qd,
-                            struct fs_disk_quota *fdq)
+                            struct qc_dqblk *fdq)
 {
        struct inode *inode = &ip->i_inode;
        struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
        be64_add_cpu(&q.qu_value, change);
        qd->qd_qb.qb_value = q.qu_value;
        if (fdq) {
-               if (fdq->d_fieldmask & FS_DQ_BSOFT) {
-                       q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
+               if (fdq->d_fieldmask & QC_SPC_SOFT) {
+                       q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
                        qd->qd_qb.qb_warn = q.qu_warn;
                }
-               if (fdq->d_fieldmask & FS_DQ_BHARD) {
-                       q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
+               if (fdq->d_fieldmask & QC_SPC_HARD) {
+                       q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
                        qd->qd_qb.qb_limit = q.qu_limit;
                }
-               if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
-                       q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
+               if (fdq->d_fieldmask & QC_SPACE) {
+                       q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
                        qd->qd_qb.qb_value = q.qu_value;
                }
        }
@@ -1497,7 +1497,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
 }
 
 static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
-                         struct fs_disk_quota *fdq)
+                         struct qc_dqblk *fdq)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
        struct gfs2_quota_lvb *qlvb;
@@ -1505,7 +1505,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
        struct gfs2_holder q_gh;
        int error;
 
-       memset(fdq, 0, sizeof(struct fs_disk_quota));
+       memset(fdq, 0, sizeof(*fdq));
 
        if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
                return -ESRCH; /* Crazy XFS error code */
@@ -1522,12 +1522,9 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
                goto out;
 
        qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
-       fdq->d_version = FS_DQUOT_VERSION;
-       fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
-       fdq->d_id = from_kqid_munged(current_user_ns(), qid);
-       fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
-       fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
-       fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
+       fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
+       fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
+       fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
 
        gfs2_glock_dq_uninit(&q_gh);
 out:
@@ -1536,10 +1533,10 @@ out:
 }
 
 /* GFS2 only supports a subset of the XFS fields */
-#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
+#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
 
 static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
-                         struct fs_disk_quota *fdq)
+                         struct qc_dqblk *fdq)
 {
        struct gfs2_sbd *sdp = sb->s_fs_info;
        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
@@ -1583,17 +1580,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
                goto out_i;
 
        /* If nothing has changed, this is a no-op */
-       if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
-           ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
-               fdq->d_fieldmask ^= FS_DQ_BSOFT;
+       if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
+           ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
+               fdq->d_fieldmask ^= QC_SPC_SOFT;
 
-       if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
-           ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
-               fdq->d_fieldmask ^= FS_DQ_BHARD;
+       if ((fdq->d_fieldmask & QC_SPC_HARD) &&
+           ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
+               fdq->d_fieldmask ^= QC_SPC_HARD;
 
-       if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
-           ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
-               fdq->d_fieldmask ^= FS_DQ_BCOUNT;
+       if ((fdq->d_fieldmask & QC_SPACE) &&
+           ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
+               fdq->d_fieldmask ^= QC_SPACE;
 
        if (fdq->d_fieldmask == 0)
                goto out_i;
index 10bf072..294692f 100644 (file)
@@ -212,6 +212,12 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
  */
 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
 {
+       struct inode *inode = iocb->ki_filp->f_mapping->host;
+
+       /* we only support swap file calling nfs_direct_IO */
+       if (!IS_SWAPFILE(inode))
+               return 0;
+
 #ifndef CONFIG_NFS_SWAP
        dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
                        iocb->ki_filp, (long long) pos, iter->nr_segs);
index 4bffe63..2211f6b 100644 (file)
@@ -352,8 +352,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
 
        nfs_attr_check_mountpoint(sb, fattr);
 
-       if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) &&
-           !nfs_attr_use_mounted_on_fileid(fattr))
+       if (nfs_attr_use_mounted_on_fileid(fattr))
+               fattr->fileid = fattr->mounted_on_fileid;
+       else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)
                goto out_no_inode;
        if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
                goto out_no_inode;
index efaa31c..b6f34bf 100644 (file)
@@ -31,8 +31,6 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
            (((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
             ((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
                return 0;
-
-       fattr->fileid = fattr->mounted_on_fileid;
        return 1;
 }
 
index 953daa4..706ad10 100644 (file)
@@ -639,7 +639,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
                        prev = pos;
 
                        status = nfs_wait_client_init_complete(pos);
-                       if (status == 0) {
+                       if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
                                nfs4_schedule_lease_recovery(pos);
                                status = nfs4_wait_clnt_recover(pos);
                        }
index 8f0acef..69df5b2 100644 (file)
@@ -2396,30 +2396,25 @@ static inline qsize_t stoqb(qsize_t space)
 }
 
 /* Generic routine for getting common part of quota structure */
-static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
 {
        struct mem_dqblk *dm = &dquot->dq_dqb;
 
        memset(di, 0, sizeof(*di));
-       di->d_version = FS_DQUOT_VERSION;
-       di->d_flags = dquot->dq_id.type == USRQUOTA ?
-                       FS_USER_QUOTA : FS_GROUP_QUOTA;
-       di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
-
        spin_lock(&dq_data_lock);
-       di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
-       di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
+       di->d_spc_hardlimit = dm->dqb_bhardlimit;
+       di->d_spc_softlimit = dm->dqb_bsoftlimit;
        di->d_ino_hardlimit = dm->dqb_ihardlimit;
        di->d_ino_softlimit = dm->dqb_isoftlimit;
-       di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
-       di->d_icount = dm->dqb_curinodes;
-       di->d_btimer = dm->dqb_btime;
-       di->d_itimer = dm->dqb_itime;
+       di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
+       di->d_ino_count = dm->dqb_curinodes;
+       di->d_spc_timer = dm->dqb_btime;
+       di->d_ino_timer = dm->dqb_itime;
        spin_unlock(&dq_data_lock);
 }
 
 int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
-                   struct fs_disk_quota *di)
+                   struct qc_dqblk *di)
 {
        struct dquot *dquot;
 
@@ -2433,70 +2428,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
 }
 EXPORT_SYMBOL(dquot_get_dqblk);
 
-#define VFS_FS_DQ_MASK \
-       (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
-        FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
-        FS_DQ_BTIMER | FS_DQ_ITIMER)
+#define VFS_QC_MASK \
+       (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
+        QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
+        QC_SPC_TIMER | QC_INO_TIMER)
 
 /* Generic routine for setting common part of quota structure */
-static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
 {
        struct mem_dqblk *dm = &dquot->dq_dqb;
        int check_blim = 0, check_ilim = 0;
        struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
 
-       if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
+       if (di->d_fieldmask & ~VFS_QC_MASK)
                return -EINVAL;
 
-       if (((di->d_fieldmask & FS_DQ_BSOFT) &&
-            (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
-           ((di->d_fieldmask & FS_DQ_BHARD) &&
-            (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
-           ((di->d_fieldmask & FS_DQ_ISOFT) &&
+       if (((di->d_fieldmask & QC_SPC_SOFT) &&
+            stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) ||
+           ((di->d_fieldmask & QC_SPC_HARD) &&
+            stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) ||
+           ((di->d_fieldmask & QC_INO_SOFT) &&
             (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
-           ((di->d_fieldmask & FS_DQ_IHARD) &&
+           ((di->d_fieldmask & QC_INO_HARD) &&
             (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
                return -ERANGE;
 
        spin_lock(&dq_data_lock);
-       if (di->d_fieldmask & FS_DQ_BCOUNT) {
-               dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
+       if (di->d_fieldmask & QC_SPACE) {
+               dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
                check_blim = 1;
                set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_BSOFT)
-               dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
-       if (di->d_fieldmask & FS_DQ_BHARD)
-               dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
-       if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
+       if (di->d_fieldmask & QC_SPC_SOFT)
+               dm->dqb_bsoftlimit = di->d_spc_softlimit;
+       if (di->d_fieldmask & QC_SPC_HARD)
+               dm->dqb_bhardlimit = di->d_spc_hardlimit;
+       if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
                check_blim = 1;
                set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_ICOUNT) {
-               dm->dqb_curinodes = di->d_icount;
+       if (di->d_fieldmask & QC_INO_COUNT) {
+               dm->dqb_curinodes = di->d_ino_count;
                check_ilim = 1;
                set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_ISOFT)
+       if (di->d_fieldmask & QC_INO_SOFT)
                dm->dqb_isoftlimit = di->d_ino_softlimit;
-       if (di->d_fieldmask & FS_DQ_IHARD)
+       if (di->d_fieldmask & QC_INO_HARD)
                dm->dqb_ihardlimit = di->d_ino_hardlimit;
-       if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
+       if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
                check_ilim = 1;
                set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_BTIMER) {
-               dm->dqb_btime = di->d_btimer;
+       if (di->d_fieldmask & QC_SPC_TIMER) {
+               dm->dqb_btime = di->d_spc_timer;
                check_blim = 1;
                set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
        }
 
-       if (di->d_fieldmask & FS_DQ_ITIMER) {
-               dm->dqb_itime = di->d_itimer;
+       if (di->d_fieldmask & QC_INO_TIMER) {
+               dm->dqb_itime = di->d_ino_timer;
                check_ilim = 1;
                set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
        }
@@ -2506,7 +2501,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
                    dm->dqb_curspace < dm->dqb_bsoftlimit) {
                        dm->dqb_btime = 0;
                        clear_bit(DQ_BLKS_B, &dquot->dq_flags);
-               } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
+               } else if (!(di->d_fieldmask & QC_SPC_TIMER))
                        /* Set grace only if user hasn't provided his own... */
                        dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
        }
@@ -2515,7 +2510,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
                    dm->dqb_curinodes < dm->dqb_isoftlimit) {
                        dm->dqb_itime = 0;
                        clear_bit(DQ_INODES_B, &dquot->dq_flags);
-               } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
+               } else if (!(di->d_fieldmask & QC_INO_TIMER))
                        /* Set grace only if user hasn't provided his own... */
                        dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
        }
@@ -2531,7 +2526,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
 }
 
 int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
-                 struct fs_disk_quota *di)
+                 struct qc_dqblk *di)
 {
        struct dquot *dquot;
        int rc;
index 2aa4151..6f38563 100644 (file)
@@ -118,17 +118,27 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
        return sb->s_qcop->set_info(sb, type, &info);
 }
 
-static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
+static inline qsize_t qbtos(qsize_t blocks)
+{
+       return blocks << QIF_DQBLKSIZE_BITS;
+}
+
+static inline qsize_t stoqb(qsize_t space)
+{
+       return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+}
+
+static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
 {
        memset(dst, 0, sizeof(*dst));
-       dst->dqb_bhardlimit = src->d_blk_hardlimit;
-       dst->dqb_bsoftlimit = src->d_blk_softlimit;
-       dst->dqb_curspace = src->d_bcount;
+       dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
+       dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
+       dst->dqb_curspace = src->d_space;
        dst->dqb_ihardlimit = src->d_ino_hardlimit;
        dst->dqb_isoftlimit = src->d_ino_softlimit;
-       dst->dqb_curinodes = src->d_icount;
-       dst->dqb_btime = src->d_btimer;
-       dst->dqb_itime = src->d_itimer;
+       dst->dqb_curinodes = src->d_ino_count;
+       dst->dqb_btime = src->d_spc_timer;
+       dst->dqb_itime = src->d_ino_timer;
        dst->dqb_valid = QIF_ALL;
 }
 
@@ -136,7 +146,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
                          void __user *addr)
 {
        struct kqid qid;
-       struct fs_disk_quota fdq;
+       struct qc_dqblk fdq;
        struct if_dqblk idq;
        int ret;
 
@@ -154,36 +164,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
        return 0;
 }
 
-static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
+static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
 {
-       dst->d_blk_hardlimit = src->dqb_bhardlimit;
-       dst->d_blk_softlimit  = src->dqb_bsoftlimit;
-       dst->d_bcount = src->dqb_curspace;
+       dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
+       dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
+       dst->d_space = src->dqb_curspace;
        dst->d_ino_hardlimit = src->dqb_ihardlimit;
        dst->d_ino_softlimit = src->dqb_isoftlimit;
-       dst->d_icount = src->dqb_curinodes;
-       dst->d_btimer = src->dqb_btime;
-       dst->d_itimer = src->dqb_itime;
+       dst->d_ino_count = src->dqb_curinodes;
+       dst->d_spc_timer = src->dqb_btime;
+       dst->d_ino_timer = src->dqb_itime;
 
        dst->d_fieldmask = 0;
        if (src->dqb_valid & QIF_BLIMITS)
-               dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
+               dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
        if (src->dqb_valid & QIF_SPACE)
-               dst->d_fieldmask |= FS_DQ_BCOUNT;
+               dst->d_fieldmask |= QC_SPACE;
        if (src->dqb_valid & QIF_ILIMITS)
-               dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
+               dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
        if (src->dqb_valid & QIF_INODES)
-               dst->d_fieldmask |= FS_DQ_ICOUNT;
+               dst->d_fieldmask |= QC_INO_COUNT;
        if (src->dqb_valid & QIF_BTIME)
-               dst->d_fieldmask |= FS_DQ_BTIMER;
+               dst->d_fieldmask |= QC_SPC_TIMER;
        if (src->dqb_valid & QIF_ITIME)
-               dst->d_fieldmask |= FS_DQ_ITIMER;
+               dst->d_fieldmask |= QC_INO_TIMER;
 }
 
 static int quota_setquota(struct super_block *sb, int type, qid_t id,
                          void __user *addr)
 {
-       struct fs_disk_quota fdq;
+       struct qc_dqblk fdq;
        struct if_dqblk idq;
        struct kqid qid;
 
@@ -247,10 +257,78 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr)
        return ret;
 }
 
+/*
+ * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
+ * out of there as xfsprogs rely on definitions being in that header file. So
+ * just define same functions here for quota purposes.
+ */
+#define XFS_BB_SHIFT 9
+
+static inline u64 quota_bbtob(u64 blocks)
+{
+       return blocks << XFS_BB_SHIFT;
+}
+
+static inline u64 quota_btobb(u64 bytes)
+{
+       return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
+}
+
+static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
+{
+       dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
+       dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
+       dst->d_ino_hardlimit = src->d_ino_hardlimit;
+       dst->d_ino_softlimit = src->d_ino_softlimit;
+       dst->d_space = quota_bbtob(src->d_bcount);
+       dst->d_ino_count = src->d_icount;
+       dst->d_ino_timer = src->d_itimer;
+       dst->d_spc_timer = src->d_btimer;
+       dst->d_ino_warns = src->d_iwarns;
+       dst->d_spc_warns = src->d_bwarns;
+       dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
+       dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
+       dst->d_rt_space = quota_bbtob(src->d_rtbcount);
+       dst->d_rt_spc_timer = src->d_rtbtimer;
+       dst->d_rt_spc_warns = src->d_rtbwarns;
+       dst->d_fieldmask = 0;
+       if (src->d_fieldmask & FS_DQ_ISOFT)
+               dst->d_fieldmask |= QC_INO_SOFT;
+       if (src->d_fieldmask & FS_DQ_IHARD)
+               dst->d_fieldmask |= QC_INO_HARD;
+       if (src->d_fieldmask & FS_DQ_BSOFT)
+               dst->d_fieldmask |= QC_SPC_SOFT;
+       if (src->d_fieldmask & FS_DQ_BHARD)
+               dst->d_fieldmask |= QC_SPC_HARD;
+       if (src->d_fieldmask & FS_DQ_RTBSOFT)
+               dst->d_fieldmask |= QC_RT_SPC_SOFT;
+       if (src->d_fieldmask & FS_DQ_RTBHARD)
+               dst->d_fieldmask |= QC_RT_SPC_HARD;
+       if (src->d_fieldmask & FS_DQ_BTIMER)
+               dst->d_fieldmask |= QC_SPC_TIMER;
+       if (src->d_fieldmask & FS_DQ_ITIMER)
+               dst->d_fieldmask |= QC_INO_TIMER;
+       if (src->d_fieldmask & FS_DQ_RTBTIMER)
+               dst->d_fieldmask |= QC_RT_SPC_TIMER;
+       if (src->d_fieldmask & FS_DQ_BWARNS)
+               dst->d_fieldmask |= QC_SPC_WARNS;
+       if (src->d_fieldmask & FS_DQ_IWARNS)
+               dst->d_fieldmask |= QC_INO_WARNS;
+       if (src->d_fieldmask & FS_DQ_RTBWARNS)
+               dst->d_fieldmask |= QC_RT_SPC_WARNS;
+       if (src->d_fieldmask & FS_DQ_BCOUNT)
+               dst->d_fieldmask |= QC_SPACE;
+       if (src->d_fieldmask & FS_DQ_ICOUNT)
+               dst->d_fieldmask |= QC_INO_COUNT;
+       if (src->d_fieldmask & FS_DQ_RTBCOUNT)
+               dst->d_fieldmask |= QC_RT_SPACE;
+}
+
 static int quota_setxquota(struct super_block *sb, int type, qid_t id,
                           void __user *addr)
 {
        struct fs_disk_quota fdq;
+       struct qc_dqblk qdq;
        struct kqid qid;
 
        if (copy_from_user(&fdq, addr, sizeof(fdq)))
@@ -260,13 +338,44 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
        qid = make_kqid(current_user_ns(), type, id);
        if (!qid_valid(qid))
                return -EINVAL;
-       return sb->s_qcop->set_dqblk(sb, qid, &fdq);
+       copy_from_xfs_dqblk(&qdq, &fdq);
+       return sb->s_qcop->set_dqblk(sb, qid, &qdq);
+}
+
+static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
+                             int type, qid_t id)
+{
+       memset(dst, 0, sizeof(*dst));
+       dst->d_version = FS_DQUOT_VERSION;
+       dst->d_id = id;
+       if (type == USRQUOTA)
+               dst->d_flags = FS_USER_QUOTA;
+       else if (type == PRJQUOTA)
+               dst->d_flags = FS_PROJ_QUOTA;
+       else
+               dst->d_flags = FS_GROUP_QUOTA;
+       dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
+       dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
+       dst->d_ino_hardlimit = src->d_ino_hardlimit;
+       dst->d_ino_softlimit = src->d_ino_softlimit;
+       dst->d_bcount = quota_btobb(src->d_space);
+       dst->d_icount = src->d_ino_count;
+       dst->d_itimer = src->d_ino_timer;
+       dst->d_btimer = src->d_spc_timer;
+       dst->d_iwarns = src->d_ino_warns;
+       dst->d_bwarns = src->d_spc_warns;
+       dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
+       dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
+       dst->d_rtbcount = quota_btobb(src->d_rt_space);
+       dst->d_rtbtimer = src->d_rt_spc_timer;
+       dst->d_rtbwarns = src->d_rt_spc_warns;
 }
 
 static int quota_getxquota(struct super_block *sb, int type, qid_t id,
                           void __user *addr)
 {
        struct fs_disk_quota fdq;
+       struct qc_dqblk qdq;
        struct kqid qid;
        int ret;
 
@@ -275,8 +384,11 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
        qid = make_kqid(current_user_ns(), type, id);
        if (!qid_valid(qid))
                return -EINVAL;
-       ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
-       if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
+       ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
+       if (ret)
+               return ret;
+       copy_to_xfs_dqblk(&fdq, &qdq, type, id);
+       if (copy_to_user(addr, &fdq, sizeof(fdq)))
                return -EFAULT;
        return ret;
 }
index bb15771..08f3555 100644 (file)
@@ -224,7 +224,7 @@ out:
 static int udf_release_file(struct inode *inode, struct file *filp)
 {
        if (filp->f_mode & FMODE_WRITE &&
-           atomic_read(&inode->i_writecount) > 1) {
+           atomic_read(&inode->i_writecount) == 1) {
                /*
                 * Grab i_mutex to avoid races with writes changing i_size
                 * while we are running.
index 3a07a93..41f6c0b 100644 (file)
@@ -166,9 +166,9 @@ extern void         xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
 /* quota ops */
 extern int             xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
 extern int             xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
-                                       uint, struct fs_disk_quota *);
+                                       uint, struct qc_dqblk *);
 extern int             xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
-                                       struct fs_disk_quota *);
+                                       struct qc_dqblk *);
 extern int             xfs_qm_scall_getqstat(struct xfs_mount *,
                                        struct fs_quota_stat *);
 extern int             xfs_qm_scall_getqstatv(struct xfs_mount *,
index 74fca68..cb6168e 100644 (file)
@@ -39,7 +39,6 @@ STATIC int    xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
 STATIC int     xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
                                        uint);
 STATIC uint    xfs_qm_export_flags(uint);
-STATIC uint    xfs_qm_export_qtype_flags(uint);
 
 /*
  * Turn off quota accounting and/or enforcement for all udquots and/or
@@ -573,8 +572,8 @@ xfs_qm_scall_getqstatv(
        return 0;
 }
 
-#define XFS_DQ_MASK \
-       (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
+#define XFS_QC_MASK \
+       (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
 
 /*
  * Adjust quota limits, and start/stop timers accordingly.
@@ -584,7 +583,7 @@ xfs_qm_scall_setqlim(
        struct xfs_mount        *mp,
        xfs_dqid_t              id,
        uint                    type,
-       fs_disk_quota_t         *newlim)
+       struct qc_dqblk         *newlim)
 {
        struct xfs_quotainfo    *q = mp->m_quotainfo;
        struct xfs_disk_dquot   *ddq;
@@ -593,9 +592,9 @@ xfs_qm_scall_setqlim(
        int                     error;
        xfs_qcnt_t              hard, soft;
 
-       if (newlim->d_fieldmask & ~XFS_DQ_MASK)
+       if (newlim->d_fieldmask & ~XFS_QC_MASK)
                return -EINVAL;
-       if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
+       if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
                return 0;
 
        /*
@@ -633,11 +632,11 @@ xfs_qm_scall_setqlim(
        /*
         * Make sure that hardlimits are >= soft limits before changing.
         */
-       hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
-               (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
+       hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
+               (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
                        be64_to_cpu(ddq->d_blk_hardlimit);
-       soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
-               (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
+       soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
+               (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
                        be64_to_cpu(ddq->d_blk_softlimit);
        if (hard == 0 || hard >= soft) {
                ddq->d_blk_hardlimit = cpu_to_be64(hard);
@@ -650,11 +649,11 @@ xfs_qm_scall_setqlim(
        } else {
                xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
        }
-       hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
-               (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
+       hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
+               (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
                        be64_to_cpu(ddq->d_rtb_hardlimit);
-       soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
-               (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
+       soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
+               (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
                        be64_to_cpu(ddq->d_rtb_softlimit);
        if (hard == 0 || hard >= soft) {
                ddq->d_rtb_hardlimit = cpu_to_be64(hard);
@@ -667,10 +666,10 @@ xfs_qm_scall_setqlim(
                xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
        }
 
-       hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
+       hard = (newlim->d_fieldmask & QC_INO_HARD) ?
                (xfs_qcnt_t) newlim->d_ino_hardlimit :
                        be64_to_cpu(ddq->d_ino_hardlimit);
-       soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
+       soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
                (xfs_qcnt_t) newlim->d_ino_softlimit :
                        be64_to_cpu(ddq->d_ino_softlimit);
        if (hard == 0 || hard >= soft) {
@@ -687,12 +686,12 @@ xfs_qm_scall_setqlim(
        /*
         * Update warnings counter(s) if requested
         */
-       if (newlim->d_fieldmask & FS_DQ_BWARNS)
-               ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
-       if (newlim->d_fieldmask & FS_DQ_IWARNS)
-               ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
-       if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
-               ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
+       if (newlim->d_fieldmask & QC_SPC_WARNS)
+               ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
+       if (newlim->d_fieldmask & QC_INO_WARNS)
+               ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
+       if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+               ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
 
        if (id == 0) {
                /*
@@ -702,24 +701,24 @@ xfs_qm_scall_setqlim(
                 * soft and hard limit values (already done, above), and
                 * for warnings.
                 */
-               if (newlim->d_fieldmask & FS_DQ_BTIMER) {
-                       q->qi_btimelimit = newlim->d_btimer;
-                       ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
+               if (newlim->d_fieldmask & QC_SPC_TIMER) {
+                       q->qi_btimelimit = newlim->d_spc_timer;
+                       ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
                }
-               if (newlim->d_fieldmask & FS_DQ_ITIMER) {
-                       q->qi_itimelimit = newlim->d_itimer;
-                       ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
+               if (newlim->d_fieldmask & QC_INO_TIMER) {
+                       q->qi_itimelimit = newlim->d_ino_timer;
+                       ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
                }
-               if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
-                       q->qi_rtbtimelimit = newlim->d_rtbtimer;
-                       ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
+               if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
+                       q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
+                       ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
                }
-               if (newlim->d_fieldmask & FS_DQ_BWARNS)
-                       q->qi_bwarnlimit = newlim->d_bwarns;
-               if (newlim->d_fieldmask & FS_DQ_IWARNS)
-                       q->qi_iwarnlimit = newlim->d_iwarns;
-               if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
-                       q->qi_rtbwarnlimit = newlim->d_rtbwarns;
+               if (newlim->d_fieldmask & QC_SPC_WARNS)
+                       q->qi_bwarnlimit = newlim->d_spc_warns;
+               if (newlim->d_fieldmask & QC_INO_WARNS)
+                       q->qi_iwarnlimit = newlim->d_ino_warns;
+               if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+                       q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
        } else {
                /*
                 * If the user is now over quota, start the timelimit.
@@ -824,7 +823,7 @@ xfs_qm_scall_getquota(
        struct xfs_mount        *mp,
        xfs_dqid_t              id,
        uint                    type,
-       struct fs_disk_quota    *dst)
+       struct qc_dqblk         *dst)
 {
        struct xfs_dquot        *dqp;
        int                     error;
@@ -848,28 +847,25 @@ xfs_qm_scall_getquota(
        }
 
        memset(dst, 0, sizeof(*dst));
-       dst->d_version = FS_DQUOT_VERSION;
-       dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
-       dst->d_id = be32_to_cpu(dqp->q_core.d_id);
-       dst->d_blk_hardlimit =
-               XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
-       dst->d_blk_softlimit =
-               XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
+       dst->d_spc_hardlimit =
+               XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
+       dst->d_spc_softlimit =
+               XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
        dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
        dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
-       dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
-       dst->d_icount = dqp->q_res_icount;
-       dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
-       dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
-       dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
-       dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
-       dst->d_rtb_hardlimit =
-               XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
-       dst->d_rtb_softlimit =
-               XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
-       dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
-       dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
-       dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
+       dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
+       dst->d_ino_count = dqp->q_res_icount;
+       dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
+       dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
+       dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
+       dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
+       dst->d_rt_spc_hardlimit =
+               XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
+       dst->d_rt_spc_softlimit =
+               XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
+       dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
+       dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
+       dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
 
        /*
         * Internally, we don't reset all the timers when quota enforcement
@@ -882,23 +878,23 @@ xfs_qm_scall_getquota(
             dqp->q_core.d_flags == XFS_DQ_GROUP) ||
            (!XFS_IS_PQUOTA_ENFORCED(mp) &&
             dqp->q_core.d_flags == XFS_DQ_PROJ)) {
-               dst->d_btimer = 0;
-               dst->d_itimer = 0;
-               dst->d_rtbtimer = 0;
+               dst->d_spc_timer = 0;
+               dst->d_ino_timer = 0;
+               dst->d_rt_spc_timer = 0;
        }
 
 #ifdef DEBUG
-       if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
-            (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) ||
-            (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) &&
-           dst->d_id != 0) {
-               if ((dst->d_bcount > dst->d_blk_softlimit) &&
-                   (dst->d_blk_softlimit > 0)) {
-                       ASSERT(dst->d_btimer != 0);
+       if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
+            (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
+            (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
+           id != 0) {
+               if ((dst->d_space > dst->d_spc_softlimit) &&
+                   (dst->d_spc_softlimit > 0)) {
+                       ASSERT(dst->d_spc_timer != 0);
                }
-               if ((dst->d_icount > dst->d_ino_softlimit) &&
+               if ((dst->d_ino_count > dst->d_ino_softlimit) &&
                    (dst->d_ino_softlimit > 0)) {
-                       ASSERT(dst->d_itimer != 0);
+                       ASSERT(dst->d_ino_timer != 0);
                }
        }
 #endif
@@ -908,26 +904,6 @@ out_put:
 }
 
 STATIC uint
-xfs_qm_export_qtype_flags(
-       uint flags)
-{
-       /*
-        * Can't be more than one, or none.
-        */
-       ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
-               (FS_PROJ_QUOTA | FS_USER_QUOTA));
-       ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
-               (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
-       ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
-               (FS_USER_QUOTA | FS_GROUP_QUOTA));
-       ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
-
-       return (flags & XFS_DQ_USER) ?
-               FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
-                       FS_PROJ_QUOTA : FS_GROUP_QUOTA;
-}
-
-STATIC uint
 xfs_qm_export_flags(
        uint flags)
 {
index 7542bbe..801a84c 100644 (file)
@@ -131,7 +131,7 @@ STATIC int
 xfs_fs_get_dqblk(
        struct super_block      *sb,
        struct kqid             qid,
-       struct fs_disk_quota    *fdq)
+       struct qc_dqblk         *qdq)
 {
        struct xfs_mount        *mp = XFS_M(sb);
 
@@ -141,14 +141,14 @@ xfs_fs_get_dqblk(
                return -ESRCH;
 
        return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
-                                     xfs_quota_type(qid.type), fdq);
+                                     xfs_quota_type(qid.type), qdq);
 }
 
 STATIC int
 xfs_fs_set_dqblk(
        struct super_block      *sb,
        struct kqid             qid,
-       struct fs_disk_quota    *fdq)
+       struct qc_dqblk         *qdq)
 {
        struct xfs_mount        *mp = XFS_M(sb);
 
@@ -160,7 +160,7 @@ xfs_fs_set_dqblk(
                return -ESRCH;
 
        return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
-                                    xfs_quota_type(qid.type), fdq);
+                                    xfs_quota_type(qid.type), qdq);
 }
 
 const struct quotactl_ops xfs_quotactl_operations = {
index 80fc92a..dd5ea30 100644 (file)
@@ -1070,6 +1070,7 @@ static inline int page_mapped(struct page *page)
 #define VM_FAULT_WRITE 0x0008  /* Special case for get_user_pages */
 #define VM_FAULT_HWPOISON 0x0010       /* Hit poisoned small page */
 #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
+#define VM_FAULT_SIGSEGV 0x0040
 
 #define VM_FAULT_NOPAGE        0x0100  /* ->fault installed the pte, not return page */
 #define VM_FAULT_LOCKED        0x0200  /* ->fault locked the returned page */
@@ -1078,8 +1079,9 @@ static inline int page_mapped(struct page *page)
 
 #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
 
-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
-                        VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
+                        VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
+                        VM_FAULT_FALLBACK)
 
 /* Encode hstate index for a hwpoisoned large page */
 #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
index 50978b7..097d7eb 100644 (file)
@@ -321,6 +321,49 @@ struct dquot_operations {
 
 struct path;
 
+/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */
+struct qc_dqblk {
+       int d_fieldmask;        /* mask of fields to change in ->set_dqblk() */
+       u64 d_spc_hardlimit;    /* absolute limit on used space */
+       u64 d_spc_softlimit;    /* preferred limit on used space */
+       u64 d_ino_hardlimit;    /* maximum # allocated inodes */
+       u64 d_ino_softlimit;    /* preferred inode limit */
+       u64 d_space;            /* Space owned by the user */
+       u64 d_ino_count;        /* # inodes owned by the user */
+       s64 d_ino_timer;        /* zero if within inode limits */
+                               /* if not, we refuse service */
+       s64 d_spc_timer;        /* similar to above; for space */
+       int d_ino_warns;        /* # warnings issued wrt num inodes */
+       int d_spc_warns;        /* # warnings issued wrt used space */
+       u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */
+       u64 d_rt_spc_softlimit; /* preferred limit on RT space */
+       u64 d_rt_space;         /* realtime space owned */
+       s64 d_rt_spc_timer;     /* similar to above; for RT space */
+       int d_rt_spc_warns;     /* # warnings issued wrt RT space */
+};
+
+/* Field specifiers for ->set_dqblk() in struct qc_dqblk */
+#define        QC_INO_SOFT     (1<<0)
+#define        QC_INO_HARD     (1<<1)
+#define        QC_SPC_SOFT     (1<<2)
+#define        QC_SPC_HARD     (1<<3)
+#define        QC_RT_SPC_SOFT  (1<<4)
+#define        QC_RT_SPC_HARD  (1<<5)
+#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \
+                      QC_RT_SPC_SOFT | QC_RT_SPC_HARD)
+#define        QC_SPC_TIMER    (1<<6)
+#define        QC_INO_TIMER    (1<<7)
+#define        QC_RT_SPC_TIMER (1<<8)
+#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER)
+#define        QC_SPC_WARNS    (1<<9)
+#define        QC_INO_WARNS    (1<<10)
+#define        QC_RT_SPC_WARNS (1<<11)
+#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS)
+#define        QC_SPACE        (1<<12)
+#define        QC_INO_COUNT    (1<<13)
+#define        QC_RT_SPACE     (1<<14)
+#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
+
 /* Operations handling requests from userspace */
 struct quotactl_ops {
        int (*quota_on)(struct super_block *, int, int, struct path *);
@@ -329,8 +372,8 @@ struct quotactl_ops {
        int (*quota_sync)(struct super_block *, int);
        int (*get_info)(struct super_block *, int, struct if_dqinfo *);
        int (*set_info)(struct super_block *, int, struct if_dqinfo *);
-       int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
-       int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
+       int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+       int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
        int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
        int (*set_xstate)(struct super_block *, unsigned int, int);
        int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
index f23538a..29e3455 100644 (file)
@@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block *sb, int type);
 int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
 int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
 int dquot_get_dqblk(struct super_block *sb, struct kqid id,
-               struct fs_disk_quota *di);
+               struct qc_dqblk *di);
 int dquot_set_dqblk(struct super_block *sb, struct kqid id,
-               struct fs_disk_quota *di);
+               struct qc_dqblk *di);
 
 int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
 int dquot_transfer(struct inode *inode, struct iattr *iattr);
index 0bb6207..f7cbd70 100644 (file)
@@ -39,11 +39,12 @@ struct inet_skb_parm {
        struct ip_options       opt;            /* Compiled IP options          */
        unsigned char           flags;
 
-#define IPSKB_FORWARDED                1
-#define IPSKB_XFRM_TUNNEL_SIZE 2
-#define IPSKB_XFRM_TRANSFORMED 4
-#define IPSKB_FRAG_COMPLETE    8
-#define IPSKB_REROUTED         16
+#define IPSKB_FORWARDED                BIT(0)
+#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
+#define IPSKB_XFRM_TRANSFORMED BIT(2)
+#define IPSKB_FRAG_COMPLETE    BIT(3)
+#define IPSKB_REROUTED         BIT(4)
+#define IPSKB_DOREDIRECT       BIT(5)
 
        u16                     frag_max_size;
 };
index 088ac0b..536edc2 100644 (file)
@@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr)
        int ufd = attr->map_fd;
        struct fd f = fdget(ufd);
        struct bpf_map *map;
-       void *key, *value;
+       void *key, *value, *ptr;
        int err;
 
        if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
@@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr)
        if (copy_from_user(key, ukey, map->key_size) != 0)
                goto free_key;
 
-       err = -ENOENT;
-       rcu_read_lock();
-       value = map->ops->map_lookup_elem(map, key);
+       err = -ENOMEM;
+       value = kmalloc(map->value_size, GFP_USER);
        if (!value)
-               goto err_unlock;
+               goto free_key;
+
+       rcu_read_lock();
+       ptr = map->ops->map_lookup_elem(map, key);
+       if (ptr)
+               memcpy(value, ptr, map->value_size);
+       rcu_read_unlock();
+
+       err = -ENOENT;
+       if (!ptr)
+               goto free_value;
 
        err = -EFAULT;
        if (copy_to_user(uvalue, value, map->value_size) != 0)
-               goto err_unlock;
+               goto free_value;
 
        err = 0;
 
-err_unlock:
-       rcu_read_unlock();
+free_value:
+       kfree(value);
 free_key:
        kfree(key);
 err_put:
index a900759..8dd50ce 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -296,7 +296,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
                        return -ENOMEM;
                if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
                        return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
-               if (ret & VM_FAULT_SIGBUS)
+               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
                        return -EFAULT;
                BUG();
        }
@@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
                        return -ENOMEM;
                if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
                        return -EHWPOISON;
-               if (ret & VM_FAULT_SIGBUS)
+               if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
                        return -EFAULT;
                BUG();
        }
index d247efa..15647fb 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
                else
                        ret = VM_FAULT_WRITE;
                put_page(page);
-       } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
+       } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
        /*
         * We must loop because handle_mm_fault() may back out if there's
         * any difficulty e.g. if pte accessed bit gets updated concurrently.
index 54f3a9b..2c3536c 100644 (file)
@@ -2632,7 +2632,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        /* Check if we need to add a guard page to the stack */
        if (check_stack_guard_page(vma, address) < 0)
-               return VM_FAULT_SIGBUS;
+               return VM_FAULT_SIGSEGV;
 
        /* Use the zero-page for reads */
        if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
index 515569f..589aafd 100644 (file)
@@ -46,6 +46,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
        snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",
                        ds->index, ds->pd->sw_addr);
        ds->slave_mii_bus->parent = ds->master_dev;
+       ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
 }
 
 
index 3a83ce5..787b3c2 100644 (file)
@@ -129,7 +129,8 @@ int ip_forward(struct sk_buff *skb)
         *      We now generate an ICMP HOST REDIRECT giving the route
         *      we calculated.
         */
-       if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb))
+       if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
+           !skb_sec_path(skb))
                ip_rt_send_redirect(skb);
 
        skb->priority = rt_tos2priority(iph->tos);
index c0d82f7..2a3720f 100644 (file)
@@ -966,8 +966,11 @@ bool ping_rcv(struct sk_buff *skb)
 
        sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
        if (sk != NULL) {
+               struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+
                pr_debug("rcv on socket %p\n", sk);
-               ping_queue_rcv_skb(sk, skb_get(skb));
+               if (skb2)
+                       ping_queue_rcv_skb(sk, skb2);
                sock_put(sk);
                return true;
        }
index 6a2155b..d58dd0e 100644 (file)
@@ -1554,11 +1554,10 @@ static int __mkroute_input(struct sk_buff *skb,
 
        do_cache = res->fi && !itag;
        if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
+           skb->protocol == htons(ETH_P_IP) &&
            (IN_DEV_SHARED_MEDIA(out_dev) ||
-            inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
-               flags |= RTCF_DOREDIRECT;
-               do_cache = false;
-       }
+            inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
+               IPCB(skb)->flags |= IPSKB_DOREDIRECT;
 
        if (skb->protocol != htons(ETH_P_IP)) {
                /* Not IP (i.e. ARP). Do not create route, if it is
@@ -2303,6 +2302,8 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src,
        r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
        if (rt->rt_flags & RTCF_NOTIFY)
                r->rtm_flags |= RTM_F_NOTIFY;
+       if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
+               r->rtm_flags |= RTCF_DOREDIRECT;
 
        if (nla_put_be32(skb, RTA_DST, dst))
                goto nla_put_failure;
index 7927db0..4a000f1 100644 (file)
@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
        s_slot = cb->args[0];
        num = s_num = cb->args[1];
 
-       for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
+       for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
                struct sock *sk;
                struct hlist_nulls_node *node;
                struct udp_hslot *hslot = &table->hash[slot];
 
+               num = 0;
+
                if (hlist_nulls_empty(&hslot->head))
                        continue;
 
index b2d1838..f1c6d5e 100644 (file)
@@ -659,6 +659,29 @@ static int fib6_commit_metrics(struct dst_entry *dst,
        return 0;
 }
 
+static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
+                         struct net *net)
+{
+       if (atomic_read(&rt->rt6i_ref) != 1) {
+               /* This route is used as dummy address holder in some split
+                * nodes. It is not leaked, but it still holds other resources,
+                * which must be released in time. So, scan ascendant nodes
+                * and replace dummy references to this route with references
+                * to still alive ones.
+                */
+               while (fn) {
+                       if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
+                               fn->leaf = fib6_find_prefix(net, fn);
+                               atomic_inc(&fn->leaf->rt6i_ref);
+                               rt6_release(rt);
+                       }
+                       fn = fn->parent;
+               }
+               /* No more references are possible at this point. */
+               BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
+       }
+}
+
 /*
  *     Insert routing information in a node.
  */
@@ -807,11 +830,12 @@ add:
                rt->dst.rt6_next = iter->dst.rt6_next;
                atomic_inc(&rt->rt6i_ref);
                inet6_rt_notify(RTM_NEWROUTE, rt, info);
-               rt6_release(iter);
                if (!(fn->fn_flags & RTN_RTINFO)) {
                        info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
                        fn->fn_flags |= RTN_RTINFO;
                }
+               fib6_purge_rt(iter, fn, info->nl_net);
+               rt6_release(iter);
        }
 
        return 0;
@@ -1322,24 +1346,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
                fn = fib6_repair_tree(net, fn);
        }
 
-       if (atomic_read(&rt->rt6i_ref) != 1) {
-               /* This route is used as dummy address holder in some split
-                * nodes. It is not leaked, but it still holds other resources,
-                * which must be released in time. So, scan ascendant nodes
-                * and replace dummy references to this route with references
-                * to still alive ones.
-                */
-               while (fn) {
-                       if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
-                               fn->leaf = fib6_find_prefix(net, fn);
-                               atomic_inc(&fn->leaf->rt6i_ref);
-                               rt6_release(rt);
-                       }
-                       fn = fn->parent;
-               }
-               /* No more references are possible at this point. */
-               BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
-       }
+       fib6_purge_rt(rt, fn, net);
 
        inet6_rt_notify(RTM_DELROUTE, rt, info);
        rt6_release(rt);
index 166e33b..4959653 100644 (file)
@@ -1242,12 +1242,16 @@ restart:
                rt = net->ipv6.ip6_null_entry;
        else if (rt->dst.error) {
                rt = net->ipv6.ip6_null_entry;
-       } else if (rt == net->ipv6.ip6_null_entry) {
+               goto out;
+       }
+
+       if (rt == net->ipv6.ip6_null_entry) {
                fn = fib6_backtrack(fn, &fl6->saddr);
                if (fn)
                        goto restart;
        }
 
+out:
        dst_hold(&rt->dst);
 
        read_unlock_bh(&table->tb6_lock);
index 5f98364..48bf5a0 100644 (file)
@@ -130,12 +130,18 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
 {
        struct flowi6 *fl6 = &fl->u.ip6;
        int onlyproto = 0;
-       u16 offset = skb_network_header_len(skb);
        const struct ipv6hdr *hdr = ipv6_hdr(skb);
+       u16 offset = sizeof(*hdr);
        struct ipv6_opt_hdr *exthdr;
        const unsigned char *nh = skb_network_header(skb);
-       u8 nexthdr = nh[IP6CB(skb)->nhoff];
+       u16 nhoff = IP6CB(skb)->nhoff;
        int oif = 0;
+       u8 nexthdr;
+
+       if (!nhoff)
+               nhoff = offsetof(struct ipv6hdr, nexthdr);
+
+       nexthdr = nh[nhoff];
 
        if (skb_dst(skb))
                oif = skb_dst(skb)->dev->ifindex;
index 612a5dd..799bafc 100644 (file)
@@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = {
        {
                .procname       = "ack",
                .data           = &sysctl_llc2_ack_timeout,
-               .maxlen         = sizeof(long),
+               .maxlen         = sizeof(sysctl_llc2_ack_timeout),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
        {
                .procname       = "busy",
                .data           = &sysctl_llc2_busy_timeout,
-               .maxlen         = sizeof(long),
+               .maxlen         = sizeof(sysctl_llc2_busy_timeout),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
        {
                .procname       = "p",
                .data           = &sysctl_llc2_p_timeout,
-               .maxlen         = sizeof(long),
+               .maxlen         = sizeof(sysctl_llc2_p_timeout),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
        {
                .procname       = "rej",
                .data           = &sysctl_llc2_rej_timeout,
-               .maxlen         = sizeof(long),
+               .maxlen         = sizeof(sysctl_llc2_rej_timeout),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
index 4c5192e..4a95fe3 100644 (file)
@@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                }
        }
 
-       /* tear down aggregation sessions and remove STAs */
-       mutex_lock(&local->sta_mtx);
-       list_for_each_entry(sta, &local->sta_list, list) {
-               if (sta->uploaded) {
-                       enum ieee80211_sta_state state;
-
-                       state = sta->sta_state;
-                       for (; state > IEEE80211_STA_NOTEXIST; state--)
-                               WARN_ON(drv_sta_state(local, sta->sdata, sta,
-                                                     state, state - 1));
-               }
-       }
-       mutex_unlock(&local->sta_mtx);
-
        /* remove all interfaces that were created in the driver */
        list_for_each_entry(sdata, &local->interfaces, list) {
                if (!ieee80211_sdata_running(sdata))
@@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                case NL80211_IFTYPE_STATION:
                        ieee80211_mgd_quiesce(sdata);
                        break;
+               case NL80211_IFTYPE_WDS:
+                       /* tear down aggregation sessions and remove STAs */
+                       mutex_lock(&local->sta_mtx);
+                       sta = sdata->u.wds.sta;
+                       if (sta && sta->uploaded) {
+                               enum ieee80211_sta_state state;
+
+                               state = sta->sta_state;
+                               for (; state > IEEE80211_STA_NOTEXIST; state--)
+                                       WARN_ON(drv_sta_state(local, sta->sdata,
+                                                             sta, state,
+                                                             state - 1));
+                       }
+                       mutex_unlock(&local->sta_mtx);
+                       break;
                default:
                        break;
                }
index 683b10f..d69ca51 100644 (file)
@@ -272,7 +272,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
        else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
                channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
        else if (rate)
-               channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
+               channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
        else
                channel_flags |= IEEE80211_CHAN_2GHZ;
        put_unaligned_le16(channel_flags, pos);
index 84c8219..f59adf8 100644 (file)
@@ -180,6 +180,11 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
        }
 
        bpf_size = bpf_len * sizeof(*bpf_ops);
+       if (bpf_size != nla_len(tb[TCA_BPF_OPS])) {
+               ret = -EINVAL;
+               goto errout;
+       }
+
        bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
        if (bpf_ops == NULL) {
                ret = -ENOMEM;
@@ -215,15 +220,21 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
                                   struct cls_bpf_head *head)
 {
        unsigned int i = 0x80000000;
+       u32 handle;
 
        do {
                if (++head->hgen == 0x7FFFFFFF)
                        head->hgen = 1;
        } while (--i > 0 && cls_bpf_get(tp, head->hgen));
-       if (i == 0)
+
+       if (unlikely(i == 0)) {
                pr_err("Insufficient number of handles\n");
+               handle = 0;
+       } else {
+               handle = head->hgen;
+       }
 
-       return i;
+       return handle;
 }
 
 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
index f791edd..26d06db 100644 (file)
@@ -1182,7 +1182,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
        asoc->peer.peer_hmacs = new->peer.peer_hmacs;
        new->peer.peer_hmacs = NULL;
 
-       sctp_auth_key_put(asoc->asoc_shared_key);
        sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
 }
 
index a2c33a4..418795c 100644 (file)
@@ -869,9 +869,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
 static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
                                         struct sock_iocb *siocb)
 {
-       if (!is_sync_kiocb(iocb))
-               BUG();
-
        siocb->kiocb = iocb;
        iocb->private = siocb;
        return siocb;
index 7ca4b51..8887c6e 100644 (file)
@@ -2854,6 +2854,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->get_key)
                return -EOPNOTSUPP;
 
+       if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+               return -ENOENT;
+
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
@@ -2873,10 +2876,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
                goto nla_put_failure;
 
-       if (pairwise && mac_addr &&
-           !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
-               return -ENOENT;
-
        err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
                           get_key_callback);
 
@@ -3047,7 +3046,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
        wdev_lock(dev->ieee80211_ptr);
        err = nl80211_key_allowed(dev->ieee80211_ptr);
 
-       if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
+       if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
            !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
                err = -ENOENT;
 
index d0ac795..5488c36 100644 (file)
@@ -308,6 +308,12 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
                goto out;
        }
 
+       if (ieee80211_is_mgmt(fc)) {
+               if (ieee80211_has_order(fc))
+                       hdrlen += IEEE80211_HT_CTL_LEN;
+               goto out;
+       }
+
        if (ieee80211_is_ctl(fc)) {
                /*
                 * ACK and CTS are 10 bytes, all others 16. To see how
index e286b42..6299ee9 100644 (file)
@@ -69,9 +69,9 @@ static void test_hashmap_sanity(int i, void *data)
 
        /* iterate over two elements */
        assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 &&
-              next_key == 2);
+              (next_key == 1 || next_key == 2));
        assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 &&
-              next_key == 1);
+              (next_key == 1 || next_key == 2));
        assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 &&
               errno == ENOENT);
 
index ec667f1..5d905d9 100644 (file)
@@ -82,36 +82,6 @@ struct snd_seq_dummy_port {
 static int my_client = -1;
 
 /*
- * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
- * to subscribers.
- * Note: this callback is called only after all subscribers are removed.
- */
-static int
-dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
-{
-       struct snd_seq_dummy_port *p;
-       int i;
-       struct snd_seq_event ev;
-
-       p = private_data;
-       memset(&ev, 0, sizeof(ev));
-       if (p->duplex)
-               ev.source.port = p->connect;
-       else
-               ev.source.port = p->port;
-       ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
-       ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
-       for (i = 0; i < 16; i++) {
-               ev.data.control.channel = i;
-               ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
-               snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
-               ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
-               snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
-       }
-       return 0;
-}
-
-/*
  * event input callback - just redirect events to subscribers
  */
 static int
@@ -175,7 +145,6 @@ create_port(int idx, int type)
                | SNDRV_SEQ_PORT_TYPE_PORT;
        memset(&pcb, 0, sizeof(pcb));
        pcb.owner = THIS_MODULE;
-       pcb.unuse = dummy_unuse;
        pcb.event_input = dummy_input;
        pcb.private_free = dummy_free;
        pcb.private_data = rec;
index 7752860..4c23381 100644 (file)
@@ -240,6 +240,8 @@ static int axi_i2s_probe(struct platform_device *pdev)
        if (ret)
                goto err_clk_disable;
 
+       return 0;
+
 err_clk_disable:
        clk_disable_unprepare(i2s->clk);
        return ret;
index e5f2fb8..30c673c 100644 (file)
@@ -188,8 +188,8 @@ static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0);
 static const char * const pcm512x_dsp_program_texts[] = {
        "FIR interpolation with de-emphasis",
        "Low latency IIR with de-emphasis",
-       "Fixed process flow",
        "High attenuation with de-emphasis",
+       "Fixed process flow",
        "Ringing-less low latency FIR",
 };
 
index 2cd4fe4..1d1c7f8 100644 (file)
@@ -861,10 +861,8 @@ static int rt286_hw_params(struct snd_pcm_substream *substream,
                RT286_I2S_CTRL1, 0x0018, d_len_code << 3);
        dev_dbg(codec->dev, "format val = 0x%x\n", val);
 
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val);
-       else
-               snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
+       snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val);
+       snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
 
        return 0;
 }
index c0fbe18..918ada9 100644 (file)
@@ -2083,10 +2083,14 @@ static int rt5677_set_pll1_event(struct snd_soc_dapm_widget *w,
        struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
 
        switch (event) {
-       case SND_SOC_DAPM_POST_PMU:
+       case SND_SOC_DAPM_PRE_PMU:
                regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2);
+               break;
+
+       case SND_SOC_DAPM_POST_PMU:
                regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0);
                break;
+
        default:
                return 0;
        }
@@ -2101,10 +2105,14 @@ static int rt5677_set_pll2_event(struct snd_soc_dapm_widget *w,
        struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
 
        switch (event) {
-       case SND_SOC_DAPM_POST_PMU:
+       case SND_SOC_DAPM_PRE_PMU:
                regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2);
+               break;
+
+       case SND_SOC_DAPM_POST_PMU:
                regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0);
                break;
+
        default:
                return 0;
        }
@@ -2212,9 +2220,11 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w,
 
 static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT,
-               0, rt5677_set_pll1_event, SND_SOC_DAPM_POST_PMU),
+               0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU |
+               SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT,
-               0, rt5677_set_pll2_event, SND_SOC_DAPM_POST_PMU),
+               0, rt5677_set_pll2_event, SND_SOC_DAPM_PRE_PMU |
+               SND_SOC_DAPM_POST_PMU),
 
        /* Input Side */
        /* micbias */
index 1d12057..9f2dced 100644 (file)
@@ -254,6 +254,7 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
        struct ts3a227e *ts3a227e;
        struct device *dev = &i2c->dev;
        int ret;
+       unsigned int acc_reg;
 
        ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL);
        if (ts3a227e == NULL)
@@ -283,6 +284,11 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
                           INTB_DISABLE | ADC_COMPLETE_INT_DISABLE,
                           ADC_COMPLETE_INT_DISABLE);
 
+       /* Read jack status because chip might not trigger interrupt at boot. */
+       regmap_read(ts3a227e->regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg);
+       ts3a227e_new_jack_state(ts3a227e, acc_reg);
+       ts3a227e_jack_report(ts3a227e);
+
        return 0;
 }
 
index 4d2d2b1..75b87c5 100644 (file)
@@ -1076,10 +1076,13 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
        { "Right Capture PGA", NULL, "Right Capture Mux" },
        { "Right Capture PGA", NULL, "Right Capture Inverting Mux" },
 
-       { "AIFOUTL", "Left",  "ADCL" },
-       { "AIFOUTL", "Right", "ADCR" },
-       { "AIFOUTR", "Left",  "ADCL" },
-       { "AIFOUTR", "Right", "ADCR" },
+       { "AIFOUTL Mux", "Left", "ADCL" },
+       { "AIFOUTL Mux", "Right", "ADCR" },
+       { "AIFOUTR Mux", "Left", "ADCL" },
+       { "AIFOUTR Mux", "Right", "ADCR" },
+
+       { "AIFOUTL", NULL, "AIFOUTL Mux" },
+       { "AIFOUTR", NULL, "AIFOUTR Mux" },
 
        { "ADCL", NULL, "CLK_DSP" },
        { "ADCL", NULL, "Left Capture PGA" },
@@ -1089,12 +1092,16 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
 };
 
 static const struct snd_soc_dapm_route dac_intercon[] = {
-       { "DACL", "Right", "AIFINR" },
-       { "DACL", "Left",  "AIFINL" },
+       { "DACL Mux", "Left", "AIFINL" },
+       { "DACL Mux", "Right", "AIFINR" },
+
+       { "DACR Mux", "Left", "AIFINL" },
+       { "DACR Mux", "Right", "AIFINR" },
+
+       { "DACL", NULL, "DACL Mux" },
        { "DACL", NULL, "CLK_DSP" },
 
-       { "DACR", "Right", "AIFINR" },
-       { "DACR", "Left",  "AIFINL" },
+       { "DACR", NULL, "DACR Mux" },
        { "DACR", NULL, "CLK_DSP" },
 
        { "Charge pump", NULL, "SYSCLK" },
index 031a1ae..a96eb49 100644 (file)
@@ -556,7 +556,7 @@ static struct {
        { 22050, 2 },
        { 24000, 2 },
        { 16000, 3 },
-       { 11250, 4 },
+       { 11025, 4 },
        { 12000, 4 },
        {  8000, 5 },
 };
index 91a550f..5e793bb 100644 (file)
 #define ESAI_xCCR_xFP_MASK     (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT)
 #define ESAI_xCCR_xFP(v)       ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK)
 #define ESAI_xCCR_xDC_SHIFT     9
-#define ESAI_xCCR_xDC_WIDTH    4
+#define ESAI_xCCR_xDC_WIDTH    5
 #define ESAI_xCCR_xDC_MASK     (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT)
 #define ESAI_xCCR_xDC(v)       ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK)
 #define ESAI_xCCR_xPSR_SHIFT   8
index a65f17d..059496e 100644 (file)
@@ -1362,9 +1362,9 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        }
 
        ssi_private->irq = platform_get_irq(pdev, 0);
-       if (!ssi_private->irq) {
+       if (ssi_private->irq < 0) {
                dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
-               return -ENXIO;
+               return ssi_private->irq;
        }
 
        /* Are the RX and the TX clocks locked? */
index 4caacb0..cd146d4 100644 (file)
@@ -257,6 +257,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
        if (ret)
                goto clk_fail;
        data->card.num_links = 1;
+       data->card.owner = THIS_MODULE;
        data->card.dai_link = &data->dai;
        data->card.dapm_widgets = imx_wm8962_dapm_widgets;
        data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets);
index fb9240f..7fe3009 100644 (file)
@@ -452,9 +452,8 @@ static int asoc_simple_card_parse_of(struct device_node *node,
 }
 
 /* Decrease the reference count of the device nodes */
-static int asoc_simple_card_unref(struct platform_device *pdev)
+static int asoc_simple_card_unref(struct snd_soc_card *card)
 {
-       struct snd_soc_card *card = platform_get_drvdata(pdev);
        struct snd_soc_dai_link *dai_link;
        int num_links;
 
@@ -556,7 +555,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
                return ret;
 
 err:
-       asoc_simple_card_unref(pdev);
+       asoc_simple_card_unref(&priv->snd_card);
        return ret;
 }
 
@@ -572,7 +571,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev)
                snd_soc_jack_free_gpios(&simple_card_mic_jack, 1,
                                        &simple_card_mic_jack_gpio);
 
-       return asoc_simple_card_unref(pdev);
+       return asoc_simple_card_unref(card);
 }
 
 static const struct of_device_id asoc_simple_of_match[] = {
index ef2e8b5..b3f9489 100644 (file)
@@ -706,6 +706,7 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
        struct list_head *block_list)
 {
        struct sst_mem_block *block, *tmp;
+       struct sst_block_allocator ba_tmp = *ba;
        u32 end = ba->offset + ba->size, block_end;
        int err;
 
@@ -730,9 +731,9 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
                if (ba->offset >= block->offset && ba->offset < block_end) {
 
                        /* align ba to block boundary */
-                       ba->size -= block_end - ba->offset;
-                       ba->offset = block_end;
-                       err = block_alloc_contiguous(dsp, ba, block_list);
+                       ba_tmp.size -= block_end - ba->offset;
+                       ba_tmp.offset = block_end;
+                       err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
                        if (err < 0)
                                return -ENOMEM;
 
@@ -767,10 +768,10 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
                        list_move(&block->list, &dsp->used_block_list);
                        list_add(&block->module_list, block_list);
                        /* align ba to block boundary */
-                       ba->size -= block_end - ba->offset;
-                       ba->offset = block_end;
+                       ba_tmp.size -= block_end - ba->offset;
+                       ba_tmp.offset = block_end;
 
-                       err = block_alloc_contiguous(dsp, ba, block_list);
+                       err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
                        if (err < 0)
                                return -ENOMEM;
 
index 3f8c482..5bf1404 100644 (file)
@@ -1228,6 +1228,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
        struct sst_dsp *sst = hsw->dsp;
        unsigned long flags;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to free, ignore it.\n");
+               return 0;
+       }
+
        /* dont free DSP streams that are not commited */
        if (!stream->commited)
                goto out;
@@ -1415,6 +1420,16 @@ int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
        u32 header;
        int ret;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to commit, ignore it.\n");
+               return 0;
+       }
+
+       if (stream->commited) {
+               dev_warn(hsw->dev, "warning: stream is already committed, ignore it.\n");
+               return 0;
+       }
+
        trace_ipc_request("stream alloc", stream->host_id);
 
        header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM);
@@ -1519,6 +1534,11 @@ int sst_hsw_stream_pause(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
 {
        int ret;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to pause, ignore it.\n");
+               return 0;
+       }
+
        trace_ipc_request("stream pause", stream->reply.stream_hw_id);
 
        ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE,
@@ -1535,6 +1555,11 @@ int sst_hsw_stream_resume(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
 {
        int ret;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to resume, ignore it.\n");
+               return 0;
+       }
+
        trace_ipc_request("stream resume", stream->reply.stream_hw_id);
 
        ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME,
@@ -1550,6 +1575,11 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
 {
        int ret, tries = 10;
 
+       if (!stream) {
+               dev_warn(hsw->dev, "warning: stream is NULL, no stream to reset, ignore it.\n");
+               return 0;
+       }
+
        /* dont reset streams that are not commited */
        if (!stream->commited)
                return 0;
index 8b79caf..c7eb9dd 100644 (file)
@@ -434,7 +434,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
        case SND_SOC_DAIFMT_CBM_CFS:
                /* McBSP slave. FS clock as output */
                regs->srgr2     |= FSGM;
-               regs->pcr0      |= FSXM;
+               regs->pcr0      |= FSXM | FSRM;
                break;
        case SND_SOC_DAIFMT_CBM_CFM:
                /* McBSP slave */
index 13d8507..dcc26ed 100644 (file)
@@ -335,6 +335,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
                            SNDRV_PCM_FMTBIT_S24_LE),
        },
        .ops = &rockchip_i2s_dai_ops,
+       .symmetric_rates = 1,
 };
 
 static const struct snd_soc_component_driver rockchip_i2s_component = {
index 590a82f..025c38f 100644 (file)
@@ -659,7 +659,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
                        rtd->dai_link->stream_name);
 
                ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
-                               1, 0, &be_pcm);
+                               rtd->dai_link->dpcm_playback,
+                               rtd->dai_link->dpcm_capture, &be_pcm);
                if (ret < 0) {
                        dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",
                                rtd->dai_link->name);
@@ -668,8 +669,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
 
                rtd->pcm = be_pcm;
                rtd->fe_compr = 1;
-               be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
-               be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
+               if (rtd->dai_link->dpcm_playback)
+                       be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
+               else if (rtd->dai_link->dpcm_capture)
+                       be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
                memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
        } else
                memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));