Merge branches 'x86/vt-d', 'arm/omap', 'core', 'x86/amd' and 'arm/smmu' into next
authorJoerg Roedel <joro@8bytes.org>
Tue, 25 Jun 2013 21:34:29 +0000 (23:34 +0200)
committerJoerg Roedel <joro@8bytes.org>
Tue, 25 Jun 2013 21:34:29 +0000 (23:34 +0200)
172 files changed:
Documentation/DocBook/media/v4l/dev-codec.xml
Documentation/DocBook/media/v4l/v4l2.xml
Documentation/devicetree/bindings/iommu/arm,smmu.txt [new file with mode: 0644]
Documentation/devicetree/bindings/media/exynos-fimc-lite.txt
Documentation/sound/alsa/HD-Audio-Models.txt
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/boot/compressed/Makefile
arch/arm/boot/dts/exynos5250-pinctrl.dtsi
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/include/asm/cacheflush.h
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/process.c
arch/arm/kernel/smp.c
arch/arm/mm/cache-v7.S
arch/arm/mm/flush.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7.S
arch/arm64/kernel/perf_event.c
arch/ia64/include/asm/irqflags.h
arch/metag/include/asm/hugetlb.h
arch/mn10300/include/asm/irqflags.h
arch/mn10300/include/asm/smp.h
arch/parisc/include/asm/mmzone.h
arch/parisc/include/asm/pci.h
arch/parisc/kernel/hardware.c
arch/parisc/kernel/pacache.S
arch/parisc/kernel/pci.c
arch/parisc/mm/init.c
arch/powerpc/kvm/booke.c
arch/powerpc/mm/hugetlbpage.c
arch/sparc/include/asm/Kbuild
arch/sparc/include/asm/leon.h
arch/sparc/include/asm/leon_amba.h
arch/sparc/include/asm/linkage.h [deleted file]
arch/sparc/kernel/ds.c
arch/sparc/kernel/leon_kernel.c
arch/sparc/kernel/leon_pci_grpci1.c
arch/sparc/kernel/leon_pmc.c
arch/sparc/kernel/setup_32.c
arch/sparc/kernel/setup_64.c
arch/sparc/mm/init_64.c
arch/sparc/mm/tlb.c
arch/sparc/prom/bootstr_32.c
arch/sparc/prom/tree_64.c
arch/tile/lib/exports.c
arch/um/drivers/mconsole_kern.c
arch/x86/Kconfig
arch/x86/crypto/aesni-intel_asm.S
arch/x86/ia32/ia32_aout.c
arch/x86/include/asm/irq.h
arch/x86/include/asm/microcode.h
arch/x86/include/asm/nmi.h
arch/x86/kernel/apic/hw_nmi.c
arch/x86/kernel/cpu/mtrr/cleanup.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/process.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/x86.c
arch/x86/platform/efi/efi.c
drivers/acpi/acpi_lpss.c
drivers/acpi/device_pm.c
drivers/acpi/dock.c
drivers/acpi/power.c
drivers/acpi/resource.c
drivers/base/firmware_class.c
drivers/block/rbd.c
drivers/clk/clk.c
drivers/clk/samsung/clk-exynos5250.c
drivers/clk/samsung/clk-pll.c
drivers/clk/spear/spear3xx_clock.c
drivers/clk/tegra/clk-tegra30.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/amd_iommu.c
drivers/iommu/arm-smmu.c [new file with mode: 0644]
drivers/iommu/iommu.c
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iopgtable.h
drivers/iommu/omap-iovmm.c
drivers/irqchip/irq-gic.c
drivers/media/Kconfig
drivers/media/i2c/s5c73m3/s5c73m3-core.c
drivers/media/pci/cx88/cx88-alsa.c
drivers/media/pci/cx88/cx88-video.c
drivers/media/platform/coda.c
drivers/media/platform/davinci/vpbe_display.c
drivers/media/platform/davinci/vpfe_capture.c
drivers/media/platform/exynos4-is/fimc-is-regs.c
drivers/media/platform/exynos4-is/fimc-is.c
drivers/media/platform/exynos4-is/fimc-is.h
drivers/media/platform/exynos4-is/fimc-isp.c
drivers/media/platform/exynos4-is/mipi-csis.c
drivers/media/platform/s3c-camif/camif-core.h
drivers/media/platform/s5p-jpeg/Makefile
drivers/media/platform/s5p-mfc/Makefile
drivers/media/platform/s5p-mfc/s5p_mfc.c
drivers/media/platform/s5p-mfc/s5p_mfc_common.h
drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
drivers/media/platform/sh_veu.c
drivers/media/platform/soc_camera/soc_camera.c
drivers/media/radio/Kconfig
drivers/media/radio/radio-si476x.c
drivers/media/tuners/Kconfig
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/media/usb/gspca/sonixb.c
drivers/media/usb/pwc/pwc.h
drivers/media/v4l2-core/v4l2-ctrls.c
drivers/media/v4l2-core/v4l2-ioctl.c
drivers/media/v4l2-core/v4l2-mem2mem.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
drivers/parisc/iosapic.c
drivers/scsi/bfa/bfad_debugfs.c
drivers/scsi/fnic/fnic_debugfs.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/staging/media/davinci_vpfe/Kconfig
drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
drivers/staging/media/solo6x10/Kconfig
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/tty/pty.c
drivers/tty/serial/8250/8250_gsc.c
drivers/tty/vt/vt_ioctl.c
drivers/usb/phy/Kconfig
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/ti_usb_3410_5052.h
fs/internal.h
fs/read_write.c
fs/splice.c
include/acpi/acpi_bus.h
include/linux/context_tracking.h
include/linux/fs.h
include/linux/kvm_host.h
include/linux/perf_event.h
include/linux/preempt.h
include/linux/splice.h
include/linux/vtime.h
include/media/v4l2-mem2mem.h
kernel/context_tracking.c
kernel/cpu/idle.c
kernel/events/core.c
kernel/events/internal.h
kernel/kprobes.c
kernel/range.c
kernel/sched/core.c
kernel/sched/cputime.c
kernel/time/tick-broadcast.c
kernel/time/tick-sched.c
mm/slab_common.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_realtek.c
sound/usb/card.c
sound/usb/mixer.c

index dca0ecd..ff44c16 100644 (file)
@@ -1,18 +1,27 @@
   <title>Codec Interface</title>
 
-  <note>
-    <title>Suspended</title>
+  <para>A V4L2 codec can compress, decompress, transform, or otherwise
+convert video data from one format into another format, in memory. Typically
+such devices are memory-to-memory devices (i.e. devices with the
+<constant>V4L2_CAP_VIDEO_M2M</constant> or <constant>V4L2_CAP_VIDEO_M2M_MPLANE</constant>
+capability set).
+</para>
 
-    <para>This interface has been be suspended from the V4L2 API
-implemented in Linux 2.6 until we have more experience with codec
-device interfaces.</para>
-  </note>
+  <para>A memory-to-memory video node acts just like a normal video node, but it
+supports both output (sending frames from memory to the codec hardware) and
+capture (receiving the processed frames from the codec hardware into memory)
+stream I/O. An application will have to setup the stream
+I/O for both sides and finally call &VIDIOC-STREAMON; for both capture and output
+to start the codec.</para>
 
-  <para>A V4L2 codec can compress, decompress, transform, or otherwise
-convert video data from one format into another format, in memory.
-Applications send data to be converted to the driver through a
-&func-write; call, and receive the converted data through a
-&func-read; call. For efficiency a driver may also support streaming
-I/O.</para>
+  <para>Video compression codecs use the MPEG controls to setup their codec parameters
+(note that the MPEG controls actually support many more codecs than just MPEG).
+See <xref linkend="mpeg-controls"></xref>.</para>
 
-  <para>[to do]</para>
+  <para>Memory-to-memory devices can often be used as a shared resource: you can
+open the video node multiple times, each application setting up their own codec properties
+that are local to the file handle, and each can use it independently from the others.
+The driver will arbitrate access to the codec and reprogram it whenever another file
+handler gets access. This is different from the usual video node behavior where the video properties
+are global to the device (i.e. changing something through one file handle is visible
+through another file handle).</para>
index bfc93cd..bfe823d 100644 (file)
@@ -493,7 +493,7 @@ and discussions on the V4L mailing list.</revremark>
 </partinfo>
 
 <title>Video for Linux Two API Specification</title>
- <subtitle>Revision 3.9</subtitle>
+ <subtitle>Revision 3.10</subtitle>
 
   <chapter id="common">
     &sub-common;
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
new file mode 100644 (file)
index 0000000..e34c6cd
--- /dev/null
@@ -0,0 +1,70 @@
+* ARM System MMU Architecture Implementation
+
+ARM SoCs may contain an implementation of the ARM System Memory
+Management Unit Architecture, which can be used to provide 1 or 2 stages
+of address translation to bus masters external to the CPU.
+
+The SMMU may also raise interrupts in response to various fault
+conditions.
+
+** System MMU required properties:
+
+- compatible    : Should be one of:
+
+                        "arm,smmu-v1"
+                        "arm,smmu-v2"
+                        "arm,mmu-400"
+                        "arm,mmu-500"
+
+                  depending on the particular implementation and/or the
+                  version of the architecture implemented.
+
+- reg           : Base address and size of the SMMU.
+
+- #global-interrupts : The number of global interrupts exposed by the
+                       device.
+
+- interrupts    : Interrupt list, with the first #global-irqs entries
+                  corresponding to the global interrupts and any
+                  following entries corresponding to context interrupts,
+                  specified in order of their indexing by the SMMU.
+
+                  For SMMUv2 implementations, there must be exactly one
+                  interrupt per context bank. In the case of a single,
+                  combined interrupt, it must be listed multiple times.
+
+- mmu-masters   : A list of phandles to device nodes representing bus
+                  masters for which the SMMU can provide a translation
+                  and their corresponding StreamIDs (see example below).
+                  Each device node linked from this list must have a
+                  "#stream-id-cells" property, indicating the number of
+                  StreamIDs associated with it.
+
+** System MMU optional properties:
+
+- smmu-parent   : When multiple SMMUs are chained together, this
+                  property can be used to provide a phandle to the
+                  parent SMMU (that is the next SMMU on the path going
+                  from the mmu-masters towards memory) node for this
+                  SMMU.
+
+Example:
+
+        smmu {
+                compatible = "arm,smmu-v1";
+                reg = <0xba5e0000 0x10000>;
+                #global-interrupts = <2>;
+                interrupts = <0 32 4>,
+                             <0 33 4>,
+                             <0 34 4>, /* This is the first context interrupt */
+                             <0 35 4>,
+                             <0 36 4>,
+                             <0 37 4>;
+
+                /*
+                 * Two DMA controllers, the first with two StreamIDs (0xd01d
+                 * and 0xd01e) and the second with only one (0xd11c).
+                 */
+                mmu-masters = <&dma0 0xd01d 0xd01e>,
+                              <&dma1 0xd11c>;
+        };
index 3f62adf..de9f6b7 100644 (file)
@@ -2,7 +2,7 @@ Exynos4x12/Exynos5 SoC series camera host interface (FIMC-LITE)
 
 Required properties:
 
-- compatible   : should be "samsung,exynos4212-fimc" for Exynos4212 and
+- compatible   : should be "samsung,exynos4212-fimc-lite" for Exynos4212 and
                  Exynos4412 SoCs;
 - reg          : physical base address and size of the device memory mapped
                  registers;
index bb8b0dc..77d68e2 100644 (file)
@@ -29,6 +29,8 @@ ALC269/270/275/276/280/282
   alc271-dmic  Enable ALC271X digital mic workaround
   inv-dmic     Inverted internal mic workaround
   lenovo-dock   Enables docking station I/O for some Lenovos
+  dell-headset-multi   Headset jack, which can also be used as mic-in
+  dell-headset-dock    Headset jack (without mic-in), and also dock I/O
 
 ALC662/663/272
 ==============
@@ -42,6 +44,7 @@ ALC662/663/272
   asus-mode7   ASUS
   asus-mode8   ASUS
   inv-dmic     Inverted internal mic workaround
+  dell-headset-multi   Headset jack, which can also be used as mic-in
 
 ALC680
 ======
index 5be702c..1eb2a72 100644 (file)
@@ -1310,6 +1310,12 @@ T:       git git://git.xilinx.com/linux-xlnx.git
 S:     Supported
 F:     arch/arm/mach-zynq/
 
+ARM SMMU DRIVER
+M:     Will Deacon <will.deacon@arm.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     drivers/iommu/arm-smmu.c
+
 ARM64 PORT (AARCH64 ARCHITECTURE)
 M:     Catalin Marinas <catalin.marinas@arm.com>
 M:     Will Deacon <will.deacon@arm.com>
index c6863b5..0142c93 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
index 49d993c..2651b1d 100644 (file)
@@ -1189,6 +1189,16 @@ config PL310_ERRATA_588369
           is not correctly implemented in PL310 as clean lines are not
           invalidated as a result of these operations.
 
+config ARM_ERRATA_643719
+       bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
+       depends on CPU_V7 && SMP
+       help
+         This option enables the workaround for the 643719 Cortex-A9 (prior to
+         r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR
+         register returns zero when it should return one. The workaround
+         corrects this value, ensuring cache maintenance operations which use
+         it behave as intended and avoiding data corruption.
+
 config ARM_ERRATA_720789
        bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
        depends on CPU_V7
@@ -2006,7 +2016,7 @@ config XIP_PHYS_ADDR
 
 config KEXEC
        bool "Kexec system call (EXPERIMENTAL)"
-       depends on (!SMP || HOTPLUG_CPU)
+       depends on (!SMP || PM_SLEEP_SMP)
        help
          kexec is a system call that implements the ability to shutdown your
          current kernel, and to start another kernel.  It is like a reboot
index 79e9bdb..120b83b 100644 (file)
@@ -116,7 +116,8 @@ targets       := vmlinux vmlinux.lds \
 
 # Make sure files are removed during clean
 extra-y       += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \
-                lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs)
+                lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \
+                hyp-stub.S
 
 ifeq ($(CONFIG_FUNCTION_TRACER),y)
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
index d1650fb..ded558b 100644 (file)
                };
        };
 
-       pinctrl@03680000 {
+       pinctrl@03860000 {
                gpz: gpz {
                        gpio-controller;
                        #gpio-cells = <2>;
index 0673524..fc9fb3d 100644 (file)
                interrupts = <0 50 0>;
        };
 
-       pinctrl_3: pinctrl@03680000 {
+       pinctrl_3: pinctrl@03860000 {
                compatible = "samsung,exynos5250-pinctrl";
-               reg = <0x0368000 0x1000>;
+               reg = <0x03860000 0x1000>;
                interrupts = <0 47 0>;
        };
 
index bff7138..17d0ae8 100644 (file)
@@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
 }
 
 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
+extern void flush_kernel_dcache_page(struct page *);
 
 #define flush_dcache_mmap_lock(mapping) \
        spin_lock_irq(&(mapping)->tree_lock)
index 8ef8c93..4fb074c 100644 (file)
@@ -134,6 +134,10 @@ void machine_kexec(struct kimage *image)
        unsigned long reboot_code_buffer_phys;
        void *reboot_code_buffer;
 
+       if (num_online_cpus() > 1) {
+               pr_err("kexec: error: multiple CPUs still online\n");
+               return;
+       }
 
        page_list = image->head & PAGE_MASK;
 
index 282de48..6e8931c 100644 (file)
@@ -184,30 +184,61 @@ int __init reboot_setup(char *str)
 
 __setup("reboot=", reboot_setup);
 
+/*
+ * Called by kexec, immediately prior to machine_kexec().
+ *
+ * This must completely disable all secondary CPUs; simply causing those CPUs
+ * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
+ * kexec'd kernel to use any and all RAM as it sees fit, without having to
+ * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
+ * functionality embodied in disable_nonboot_cpus() to achieve this.
+ */
 void machine_shutdown(void)
 {
-#ifdef CONFIG_SMP
-       smp_send_stop();
-#endif
+       disable_nonboot_cpus();
 }
 
+/*
+ * Halting simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this.
+ */
 void machine_halt(void)
 {
-       machine_shutdown();
+       smp_send_stop();
+
        local_irq_disable();
        while (1);
 }
 
+/*
+ * Power-off simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this. When the system power is turned off, it will take all CPUs
+ * with it.
+ */
 void machine_power_off(void)
 {
-       machine_shutdown();
+       smp_send_stop();
+
        if (pm_power_off)
                pm_power_off();
 }
 
+/*
+ * Restart requires that the secondary CPUs stop performing any activity
+ * while the primary CPU resets the system. Systems with a single CPU can
+ * use soft_restart() as their machine descriptor's .restart hook, since that
+ * will cause the only available CPU to reset. Systems with multiple CPUs must
+ * provide a HW restart implementation, to ensure that all CPUs reset at once.
+ * This is required so that any code running after reset on the primary CPU
+ * doesn't have to co-ordinate with other CPUs to ensure they aren't still
+ * executing pre-reset code, and using RAM that the primary CPU's code wishes
+ * to use. Implementing such co-ordination would be essentially impossible.
+ */
 void machine_restart(char *cmd)
 {
-       machine_shutdown();
+       smp_send_stop();
 
        arm_pm_restart(reboot_mode, cmd);
 
index 550d63c..5919eb4 100644 (file)
@@ -651,17 +651,6 @@ void smp_send_reschedule(int cpu)
        smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-static void smp_kill_cpus(cpumask_t *mask)
-{
-       unsigned int cpu;
-       for_each_cpu(cpu, mask)
-               platform_cpu_kill(cpu);
-}
-#else
-static void smp_kill_cpus(cpumask_t *mask) { }
-#endif
-
 void smp_send_stop(void)
 {
        unsigned long timeout;
@@ -679,8 +668,6 @@ void smp_send_stop(void)
 
        if (num_online_cpus() > 1)
                pr_warning("SMP: failed to stop secondary CPUs\n");
-
-       smp_kill_cpus(&mask);
 }
 
 /*
index 15451ee..515b000 100644 (file)
@@ -92,6 +92,14 @@ ENTRY(v7_flush_dcache_louis)
        mrc     p15, 1, r0, c0, c0, 1           @ read clidr, r0 = clidr
        ALT_SMP(ands    r3, r0, #(7 << 21))     @ extract LoUIS from clidr
        ALT_UP(ands     r3, r0, #(7 << 27))     @ extract LoUU from clidr
+#ifdef CONFIG_ARM_ERRATA_643719
+       ALT_SMP(mrceq   p15, 0, r2, c0, c0, 0)  @ read main ID register
+       ALT_UP(moveq    pc, lr)                 @ LoUU is zero, so nothing to do
+       ldreq   r1, =0x410fc090                 @ ID of ARM Cortex A9 r0p?
+       biceq   r2, r2, #0x0000000f             @ clear minor revision number
+       teqeq   r2, r1                          @ test for errata affected core and if so...
+       orreqs  r3, #(1 << 21)                  @   fix LoUIS value (and set flags state to 'ne')
+#endif
        ALT_SMP(mov     r3, r3, lsr #20)        @ r3 = LoUIS * 2
        ALT_UP(mov      r3, r3, lsr #26)        @ r3 = LoUU * 2
        moveq   pc, lr                          @ return if level == 0
index 0d473cc..32aa586 100644 (file)
@@ -301,6 +301,39 @@ void flush_dcache_page(struct page *page)
 EXPORT_SYMBOL(flush_dcache_page);
 
 /*
+ * Ensure cache coherency for the kernel mapping of this page. We can
+ * assume that the page is pinned via kmap.
+ *
+ * If the page only exists in the page cache and there are no user
+ * space mappings, this is a no-op since the page was already marked
+ * dirty at creation.  Otherwise, we need to flush the dirty kernel
+ * cache lines directly.
+ */
+void flush_kernel_dcache_page(struct page *page)
+{
+       if (cache_is_vivt() || cache_is_vipt_aliasing()) {
+               struct address_space *mapping;
+
+               mapping = page_mapping(page);
+
+               if (!mapping || mapping_mapped(mapping)) {
+                       void *addr;
+
+                       addr = page_address(page);
+                       /*
+                        * kmap_atomic() doesn't set the page virtual
+                        * address for highmem pages, and
+                        * kunmap_atomic() takes care of cache
+                        * flushing already.
+                        */
+                       if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
+                               __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+               }
+       }
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page);
+
+/*
  * Flush an anonymous page so that users of get_user_pages()
  * can safely access the data.  The expected sequence is:
  *
index e0d8565..4d409e6 100644 (file)
@@ -616,10 +616,12 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
        } while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
-static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
                        unsigned long end, phys_addr_t phys,
                        const struct mem_type *type)
 {
+       pmd_t *p = pmd;
+
 #ifndef CONFIG_ARM_LPAE
        /*
         * In classic MMU format, puds and pmds are folded in to
@@ -638,7 +640,7 @@ static void __init map_init_section(pmd_t *pmd, unsigned long addr,
                phys += SECTION_SIZE;
        } while (pmd++, addr += SECTION_SIZE, addr != end);
 
-       flush_pmd_entry(pmd);
+       flush_pmd_entry(p);
 }
 
 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
@@ -661,7 +663,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
                 */
                if (type->prot_sect &&
                                ((addr | next | phys) & ~SECTION_MASK) == 0) {
-                       map_init_section(pmd, addr, next, phys, type);
+                       __map_init_section(pmd, addr, next, phys, type);
                } else {
                        alloc_init_pte(pmd, addr, next,
                                                __phys_to_pfn(phys), type);
index 2c73a73..4c8c9c1 100644 (file)
@@ -409,8 +409,8 @@ __v7_ca9mp_proc_info:
         */
        .type   __v7_pj4b_proc_info, #object
 __v7_pj4b_proc_info:
-       .long   0x562f5840
-       .long   0xfffffff0
+       .long   0x560f5800
+       .long   0xff0fff00
        __v7_proc __v7_pj4b_setup
        .size   __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
 
index 1e49e5e..9ba33c4 100644 (file)
@@ -1336,6 +1336,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
                return;
        }
 
+       perf_callchain_store(entry, regs->pc);
        tail = (struct frame_tail __user *)regs->regs[29];
 
        while (entry->nr < PERF_MAX_STACK_DEPTH &&
index 1bf2cf2..cec6c06 100644 (file)
@@ -11,6 +11,7 @@
 #define _ASM_IA64_IRQFLAGS_H
 
 #include <asm/pal.h>
+#include <asm/kregs.h>
 
 #ifdef CONFIG_IA64_DEBUG_IRQ
 extern unsigned long last_cli_ip;
index f545477..471f481 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_METAG_HUGETLB_H
 
 #include <asm/page.h>
+#include <asm-generic/hugetlb.h>
 
 
 static inline int is_hugepage_only_range(struct mm_struct *mm,
index 678f68d..8730c0a 100644 (file)
@@ -13,9 +13,8 @@
 #define _ASM_IRQFLAGS_H
 
 #include <asm/cpu-regs.h>
-#ifndef __ASSEMBLY__
-#include <linux/smp.h>
-#endif
+/* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */
+#include <asm/smp.h>
 
 /*
  * interrupt control
index 6745dbe..56c4241 100644 (file)
@@ -24,6 +24,7 @@
 #ifndef __ASSEMBLY__
 #include <linux/threads.h>
 #include <linux/cpumask.h>
+#include <linux/thread_info.h>
 #endif
 
 #ifdef CONFIG_SMP
@@ -85,7 +86,7 @@ extern cpumask_t cpu_boot_map;
 extern void smp_init_cpus(void);
 extern void smp_cache_interrupt(void);
 extern void send_IPI_allbutself(int irq);
-extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
+extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait);
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -100,6 +101,7 @@ extern void __cpu_die(unsigned int cpu);
 #ifndef __ASSEMBLY__
 
 static inline void smp_init_cpus(void) {}
+#define raw_smp_processor_id() 0
 
 #endif /* __ASSEMBLY__ */
 #endif /* CONFIG_SMP */
index cc50d33..b6b34a0 100644 (file)
@@ -27,7 +27,7 @@ extern struct node_map_data node_data[];
 
 #define PFNNID_SHIFT (30 - PAGE_SHIFT)
 #define PFNNID_MAP_MAX  512     /* support 512GB */
-extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
+extern signed char pfnnid_map[PFNNID_MAP_MAX];
 
 #ifndef CONFIG_64BIT
 #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
@@ -46,7 +46,7 @@ static inline int pfn_to_nid(unsigned long pfn)
        i = pfn >> PFNNID_SHIFT;
        BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
 
-       return (int)pfnnid_map[i];
+       return pfnnid_map[i];
 }
 
 static inline int pfn_valid(int pfn)
index 3234f49..4651540 100644 (file)
@@ -225,4 +225,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
        return channel ? 15 : 14;
 }
 
+#define HAVE_PCI_MMAP
+
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+       enum pci_mmap_state mmap_state, int write_combine);
+
 #endif /* __ASM_PARISC_PCI_H */
index 9e2d2e4..8722756 100644 (file)
@@ -1205,6 +1205,7 @@ static struct hp_hardware hp_hardware_list[] = {
        {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, 
        {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, 
        {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, 
+       {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"},
        {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, 
        {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, 
        {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, 
index 36d7f40..b743a80 100644 (file)
@@ -860,7 +860,7 @@ ENTRY(flush_dcache_page_asm)
 #endif
 
        ldil            L%dcache_stride, %r1
-       ldw             R%dcache_stride(%r1), %r1
+       ldw             R%dcache_stride(%r1), r31
 
 #ifdef CONFIG_64BIT
        depdi,z         1, 63-PAGE_SHIFT,1, %r25
@@ -868,26 +868,26 @@ ENTRY(flush_dcache_page_asm)
        depwi,z         1, 31-PAGE_SHIFT,1, %r25
 #endif
        add             %r28, %r25, %r25
-       sub             %r25, %r1, %r25
-
-
-1:      fdc,m          %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
-       fdc,m           %r1(%r28)
+       sub             %r25, r31, %r25
+
+
+1:      fdc,m          r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
+       fdc,m           r31(%r28)
        cmpb,COND(<<)           %r28, %r25,1b
-       fdc,m           %r1(%r28)
+       fdc,m           r31(%r28)
 
        sync
 
@@ -936,7 +936,7 @@ ENTRY(flush_icache_page_asm)
 #endif
 
        ldil            L%icache_stride, %r1
-       ldw             R%icache_stride(%r1), %r1
+       ldw             R%icache_stride(%r1), %r31
 
 #ifdef CONFIG_64BIT
        depdi,z         1, 63-PAGE_SHIFT,1, %r25
@@ -944,28 +944,28 @@ ENTRY(flush_icache_page_asm)
        depwi,z         1, 31-PAGE_SHIFT,1, %r25
 #endif
        add             %r28, %r25, %r25
-       sub             %r25, %r1, %r25
+       sub             %r25, %r31, %r25
 
 
        /* fic only has the type 26 form on PA1.1, requiring an
         * explicit space specification, so use %sr4 */
-1:      fic,m          %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
-       fic,m           %r1(%sr4,%r28)
+1:      fic,m          %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
        cmpb,COND(<<)   %r28, %r25,1b
-       fic,m           %r1(%sr4,%r28)
+       fic,m           %r31(%sr4,%r28)
 
        sync
 
index 6030905..64f2764 100644 (file)
@@ -220,6 +220,33 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 }
 
 
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+                       enum pci_mmap_state mmap_state, int write_combine)
+{
+       unsigned long prot;
+
+       /*
+        * I/O space can be accessed via normal processor loads and stores on
+        * this platform but for now we elect not to do this and portable
+        * drivers should not do this anyway.
+        */
+       if (mmap_state == pci_mmap_io)
+               return -EINVAL;
+
+       if (write_combine)
+               return -EINVAL;
+
+       /*
+        * Ignore write-combine; for now only return uncached mappings.
+        */
+       prot = pgprot_val(vma->vm_page_prot);
+       prot |= _PAGE_NO_CACHE;
+       vma->vm_page_prot = __pgprot(prot);
+
+       return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+               vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
 /*
  * A driver is enabling the device.  We make sure that all the appropriate
  * bits are set to allow the device to operate as the driver is expecting.
index 1c96564..505b56c 100644 (file)
@@ -47,7 +47,7 @@ pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pt
 
 #ifdef CONFIG_DISCONTIGMEM
 struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
-unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
+signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
 #endif
 
 static struct resource data_resource = {
index 5cd7ad0..1a1b511 100644 (file)
@@ -673,7 +673,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                ret = s;
                goto out;
        }
-       kvmppc_lazy_ee_enable();
 
        kvm_guest_enter();
 
@@ -699,6 +698,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        kvmppc_load_guest_fp(vcpu);
 #endif
 
+       kvmppc_lazy_ee_enable();
+
        ret = __kvmppc_vcpu_run(kvm_run, vcpu);
 
        /* No need for kvm_guest_exit. It's done in handle_exit.
index 237c8e5..77fdd2c 100644 (file)
@@ -592,8 +592,14 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
        do {
                pmd = pmd_offset(pud, addr);
                next = pmd_addr_end(addr, end);
-               if (pmd_none_or_clear_bad(pmd))
+               if (!is_hugepd(pmd)) {
+                       /*
+                        * if it is not hugepd pointer, we should already find
+                        * it cleared.
+                        */
+                       WARN_ON(!pmd_none_or_clear_bad(pmd));
                        continue;
+               }
 #ifdef CONFIG_PPC_FSL_BOOK3E
                /*
                 * Increment next by the size of the huge mapping since
index ff18e3c..7e4a97f 100644 (file)
@@ -6,6 +6,7 @@ generic-y += cputime.h
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += exec.h
+generic-y += linkage.h
 generic-y += local64.h
 generic-y += mutex.h
 generic-y += irq_regs.h
index 15a7169..b836e92 100644 (file)
@@ -135,7 +135,7 @@ static inline int sparc_leon3_cpuid(void)
 
 #ifdef CONFIG_SMP
 # define LEON3_IRQ_IPI_DEFAULT         13
-# define LEON3_IRQ_TICKER              (leon3_ticker_irq)
+# define LEON3_IRQ_TICKER              (leon3_gptimer_irq)
 # define LEON3_IRQ_CROSS_CALL          15
 #endif
 
index f3034ed..24ec48c 100644 (file)
@@ -47,6 +47,7 @@ struct amba_prom_registers {
 #define LEON3_GPTIMER_LD 4
 #define LEON3_GPTIMER_IRQEN 8
 #define LEON3_GPTIMER_SEPIRQ 8
+#define LEON3_GPTIMER_TIMERS 0x7
 
 #define LEON23_REG_TIMER_CONTROL_EN    0x00000001 /* 1 = enable counting */
 /* 0 = hold scalar and counter */
diff --git a/arch/sparc/include/asm/linkage.h b/arch/sparc/include/asm/linkage.h
deleted file mode 100644 (file)
index 291c2d0..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_LINKAGE_H
-#define __ASM_LINKAGE_H
-
-/* Nothing to see here... */
-
-#endif
index 75bb608..5ef48da 100644 (file)
@@ -843,7 +843,8 @@ void ldom_reboot(const char *boot_command)
                unsigned long len;
 
                strcpy(full_boot_str, "boot ");
-               strcpy(full_boot_str + strlen("boot "), boot_command);
+               strlcpy(full_boot_str + strlen("boot "), boot_command,
+                       sizeof(full_boot_str + strlen("boot ")));
                len = strlen(full_boot_str);
 
                if (reboot_data_supported) {
index 7c0231d..b7c6897 100644 (file)
@@ -38,7 +38,6 @@ static DEFINE_SPINLOCK(leon_irq_lock);
 
 unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
 unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
-int leon3_ticker_irq; /* Timer ticker IRQ */
 unsigned int sparc_leon_eirq;
 #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
 #define LEON_IACK (&leon3_irqctrl_regs->iclear)
@@ -278,6 +277,9 @@ irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
 
        leon_clear_profile_irq(cpu);
 
+       if (cpu == boot_cpu_id)
+               timer_interrupt(irq, NULL);
+
        ce = &per_cpu(sparc32_clockevent, cpu);
 
        irq_enter();
@@ -299,6 +301,7 @@ void __init leon_init_timers(void)
        int icsel;
        int ampopts;
        int err;
+       u32 config;
 
        sparc_config.get_cycles_offset = leon_cycles_offset;
        sparc_config.cs_period = 1000000 / HZ;
@@ -377,23 +380,6 @@ void __init leon_init_timers(void)
        LEON3_BYPASS_STORE_PA(
                        &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
 
-#ifdef CONFIG_SMP
-       leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx;
-
-       if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
-             (1<<LEON3_GPTIMER_SEPIRQ))) {
-               printk(KERN_ERR "timer not configured with separate irqs\n");
-               BUG();
-       }
-
-       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val,
-                               0);
-       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
-                               (((1000000/HZ) - 1)));
-       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
-                               0);
-#endif
-
        /*
         * The IRQ controller may (if implemented) consist of multiple
         * IRQ controllers, each mapped on a 4Kb boundary.
@@ -416,13 +402,6 @@ void __init leon_init_timers(void)
        if (eirq != 0)
                leon_eirq_setup(eirq);
 
-       irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx);
-       err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
-       if (err) {
-               printk(KERN_ERR "unable to attach timer IRQ%d\n", irq);
-               prom_halt();
-       }
-
 #ifdef CONFIG_SMP
        {
                unsigned long flags;
@@ -439,30 +418,31 @@ void __init leon_init_timers(void)
        }
 #endif
 
-       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
-                             LEON3_GPTIMER_EN |
-                             LEON3_GPTIMER_RL |
-                             LEON3_GPTIMER_LD |
-                             LEON3_GPTIMER_IRQEN);
+       config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config);
+       if (config & (1 << LEON3_GPTIMER_SEPIRQ))
+               leon3_gptimer_irq += leon3_gptimer_idx;
+       else if ((config & LEON3_GPTIMER_TIMERS) > 1)
+               pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n");
 
 #ifdef CONFIG_SMP
        /* Install per-cpu IRQ handler for broadcasted ticker */
-       irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq,
+       irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq,
                                    "per-cpu", 0);
        err = request_irq(irq, leon_percpu_timer_ce_interrupt,
-                         IRQF_PERCPU | IRQF_TIMER, "ticker",
-                         NULL);
+                         IRQF_PERCPU | IRQF_TIMER, "timer", NULL);
+#else
+       irq = _leon_build_device_irq(NULL, leon3_gptimer_irq);
+       err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
+#endif
        if (err) {
-               printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq);
+               pr_err("Unable to attach timer IRQ%d\n", irq);
                prom_halt();
        }
-
-       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
+       LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
                              LEON3_GPTIMER_EN |
                              LEON3_GPTIMER_RL |
                              LEON3_GPTIMER_LD |
                              LEON3_GPTIMER_IRQEN);
-#endif
        return;
 bad:
        printk(KERN_ERR "No Timer/irqctrl found\n");
index 7739a54..6df26e3 100644 (file)
@@ -536,11 +536,9 @@ static int grpci1_of_probe(struct platform_device *ofdev)
 
        /* find device register base address */
        res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
-       regs = devm_request_and_ioremap(&ofdev->dev, res);
-       if (!regs) {
-               dev_err(&ofdev->dev, "io-regs mapping failed\n");
-               return -EADDRNOTAVAIL;
-       }
+       regs = devm_ioremap_resource(&ofdev->dev, res);
+       if (IS_ERR(regs))
+               return PTR_ERR(regs);
 
        /*
         * check that we're in Host Slot and that we can act as a Host Bridge
index bdf53d9..b0b3967 100644 (file)
@@ -47,6 +47,10 @@ void pmc_leon_idle_fixup(void)
         * MMU does not get a TLB miss here by using the MMU BYPASS ASI.
         */
        register unsigned int address = (unsigned int)leon3_irqctrl_regs;
+
+       /* Interrupts need to be enabled to not hang the CPU */
+       local_irq_enable();
+
        __asm__ __volatile__ (
                "wr     %%g0, %%asr19\n"
                "lda    [%0] %1, %%g0\n"
@@ -60,6 +64,9 @@ void pmc_leon_idle_fixup(void)
  */
 void pmc_leon_idle(void)
 {
+       /* Interrupts need to be enabled to not hang the CPU */
+       local_irq_enable();
+
        /* For systems without power-down, this will be no-op */
        __asm__ __volatile__ ("wr       %g0, %asr19\n\t");
 }
index 38bf80a..1434526 100644 (file)
@@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p)
 
        /* Initialize PROM console and command line. */
        *cmdline_p = prom_getbootargs();
-       strcpy(boot_command_line, *cmdline_p);
+       strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
        parse_early_param();
 
        boot_flags_init(*cmdline_p);
index 88a127b..1378554 100644 (file)
@@ -555,7 +555,7 @@ void __init setup_arch(char **cmdline_p)
 {
        /* Initialize PROM console and command line. */
        *cmdline_p = prom_getbootargs();
-       strcpy(boot_command_line, *cmdline_p);
+       strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
        parse_early_param();
 
        boot_flags_init(*cmdline_p);
index a717199..04fd55a 100644 (file)
@@ -1098,7 +1098,14 @@ static int __init grab_mblocks(struct mdesc_handle *md)
                m->size = *val;
                val = mdesc_get_property(md, node,
                                         "address-congruence-offset", NULL);
-               m->offset = *val;
+
+               /* The address-congruence-offset property is optional.
+                * Explicity zero it be identifty this.
+                */
+               if (val)
+                       m->offset = *val;
+               else
+                       m->offset = 0UL;
 
                numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
                        count - 1, m->base, m->size, m->offset);
index 83d89bc..37e7bc4 100644 (file)
@@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
        }
 
        if (!tb->active) {
-               global_flush_tlb_page(mm, vaddr);
                flush_tsb_user_page(mm, vaddr);
+               global_flush_tlb_page(mm, vaddr);
                goto out;
        }
 
index f5ec32e..d2b49d2 100644 (file)
@@ -23,23 +23,25 @@ prom_getbootargs(void)
                return barg_buf;
        }
 
-       switch(prom_vers) {
+       switch (prom_vers) {
        case PROM_V0:
                cp = barg_buf;
                /* Start from 1 and go over fd(0,0,0)kernel */
-               for(iter = 1; iter < 8; iter++) {
+               for (iter = 1; iter < 8; iter++) {
                        arg = (*(romvec->pv_v0bootargs))->argv[iter];
                        if (arg == NULL)
                                break;
-                       while(*arg != 0) {
+                       while (*arg != 0) {
                                /* Leave place for space and null. */
-                               if(cp >= barg_buf + BARG_LEN-2){
+                               if (cp >= barg_buf + BARG_LEN - 2)
                                        /* We might issue a warning here. */
                                        break;
-                               }
                                *cp++ = *arg++;
                        }
                        *cp++ = ' ';
+                       if (cp >= barg_buf + BARG_LEN - 1)
+                               /* We might issue a warning here. */
+                               break;
                }
                *cp = 0;
                break;
index 92204c3..bd1b2a3 100644 (file)
@@ -39,7 +39,7 @@ inline phandle __prom_getchild(phandle node)
        return prom_node_to_node("child", node);
 }
 
-inline phandle prom_getchild(phandle node)
+phandle prom_getchild(phandle node)
 {
        phandle cnode;
 
@@ -72,7 +72,7 @@ inline phandle __prom_getsibling(phandle node)
        return prom_node_to_node(prom_peer_name, node);
 }
 
-inline phandle prom_getsibling(phandle node)
+phandle prom_getsibling(phandle node)
 {
        phandle sibnode;
 
@@ -89,7 +89,7 @@ EXPORT_SYMBOL(prom_getsibling);
 /* Return the length in bytes of property 'prop' at node 'node'.
  * Return -1 on error.
  */
-inline int prom_getproplen(phandle node, const char *prop)
+int prom_getproplen(phandle node, const char *prop)
 {
        unsigned long args[6];
 
@@ -113,8 +113,8 @@ EXPORT_SYMBOL(prom_getproplen);
  * 'buffer' which has a size of 'bufsize'.  If the acquisition
  * was successful the length will be returned, else -1 is returned.
  */
-inline int prom_getproperty(phandle node, const char *prop,
-                           char *buffer, int bufsize)
+int prom_getproperty(phandle node, const char *prop,
+                    char *buffer, int bufsize)
 {
        unsigned long args[8];
        int plen;
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(prom_getproperty);
 /* Acquire an integer property and return its value.  Returns -1
  * on failure.
  */
-inline int prom_getint(phandle node, const char *prop)
+int prom_getint(phandle node, const char *prop)
 {
        int intprop;
 
@@ -235,7 +235,7 @@ static const char *prom_nextprop_name = "nextprop";
 /* Return the first property type for node 'node'.
  * buffer should be at least 32B in length
  */
-inline char *prom_firstprop(phandle node, char *buffer)
+char *prom_firstprop(phandle node, char *buffer)
 {
        unsigned long args[7];
 
@@ -261,7 +261,7 @@ EXPORT_SYMBOL(prom_firstprop);
  * at node 'node' .  Returns NULL string if no more
  * property types for this node.
  */
-inline char *prom_nextprop(phandle node, const char *oprop, char *buffer)
+char *prom_nextprop(phandle node, const char *oprop, char *buffer)
 {
        unsigned long args[7];
        char buf[32];
index 4385cb6..a93b02a 100644 (file)
@@ -84,4 +84,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int);
 EXPORT_SYMBOL(__ashrdi3);
 uint64_t __ashldi3(uint64_t, unsigned int);
 EXPORT_SYMBOL(__ashldi3);
+int __ffsdi2(uint64_t);
+EXPORT_SYMBOL(__ffsdi2);
 #endif
index d7d2185..3df3bd5 100644 (file)
@@ -147,7 +147,7 @@ void mconsole_proc(struct mc_request *req)
        }
 
        do {
-               loff_t pos;
+               loff_t pos = file->f_pos;
                mm_segment_t old_fs = get_fs();
                set_fs(KERNEL_DS);
                len = vfs_read(file, buf, PAGE_SIZE - 1, &pos);
index 685692c..fe120da 100644 (file)
@@ -2265,6 +2265,7 @@ source "fs/Kconfig.binfmt"
 config IA32_EMULATION
        bool "IA32 Emulation"
        depends on X86_64
+       select BINFMT_ELF
        select COMPAT_BINFMT_ELF
        select HAVE_UID16
        ---help---
index 62fe22c..477e9d7 100644 (file)
@@ -2681,56 +2681,68 @@ ENTRY(aesni_xts_crypt8)
        addq %rcx, KEYP
 
        movdqa IV, STATE1
-       pxor 0x00(INP), STATE1
+       movdqu 0x00(INP), INC
+       pxor INC, STATE1
        movdqu IV, 0x00(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE2
-       pxor 0x10(INP), STATE2
+       movdqu 0x10(INP), INC
+       pxor INC, STATE2
        movdqu IV, 0x10(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE3
-       pxor 0x20(INP), STATE3
+       movdqu 0x20(INP), INC
+       pxor INC, STATE3
        movdqu IV, 0x20(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE4
-       pxor 0x30(INP), STATE4
+       movdqu 0x30(INP), INC
+       pxor INC, STATE4
        movdqu IV, 0x30(OUTP)
 
        call *%r11
 
-       pxor 0x00(OUTP), STATE1
+       movdqu 0x00(OUTP), INC
+       pxor INC, STATE1
        movdqu STATE1, 0x00(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE1
-       pxor 0x40(INP), STATE1
+       movdqu 0x40(INP), INC
+       pxor INC, STATE1
        movdqu IV, 0x40(OUTP)
 
-       pxor 0x10(OUTP), STATE2
+       movdqu 0x10(OUTP), INC
+       pxor INC, STATE2
        movdqu STATE2, 0x10(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE2
-       pxor 0x50(INP), STATE2
+       movdqu 0x50(INP), INC
+       pxor INC, STATE2
        movdqu IV, 0x50(OUTP)
 
-       pxor 0x20(OUTP), STATE3
+       movdqu 0x20(OUTP), INC
+       pxor INC, STATE3
        movdqu STATE3, 0x20(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE3
-       pxor 0x60(INP), STATE3
+       movdqu 0x60(INP), INC
+       pxor INC, STATE3
        movdqu IV, 0x60(OUTP)
 
-       pxor 0x30(OUTP), STATE4
+       movdqu 0x30(OUTP), INC
+       pxor INC, STATE4
        movdqu STATE4, 0x30(OUTP)
 
        _aesni_gf128mul_x_ble()
        movdqa IV, STATE4
-       pxor 0x70(INP), STATE4
+       movdqu 0x70(INP), INC
+       pxor INC, STATE4
        movdqu IV, 0x70(OUTP)
 
        _aesni_gf128mul_x_ble()
@@ -2738,16 +2750,20 @@ ENTRY(aesni_xts_crypt8)
 
        call *%r11
 
-       pxor 0x40(OUTP), STATE1
+       movdqu 0x40(OUTP), INC
+       pxor INC, STATE1
        movdqu STATE1, 0x40(OUTP)
 
-       pxor 0x50(OUTP), STATE2
+       movdqu 0x50(OUTP), INC
+       pxor INC, STATE2
        movdqu STATE2, 0x50(OUTP)
 
-       pxor 0x60(OUTP), STATE3
+       movdqu 0x60(OUTP), INC
+       pxor INC, STATE3
        movdqu STATE3, 0x60(OUTP)
 
-       pxor 0x70(OUTP), STATE4
+       movdqu 0x70(OUTP), INC
+       pxor INC, STATE4
        movdqu STATE4, 0x70(OUTP)
 
        ret
index 805078e..52ff81c 100644 (file)
@@ -192,7 +192,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
        /* struct user */
        DUMP_WRITE(&dump, sizeof(dump));
        /* Now dump all of the user data.  Include malloced stuff as well */
-       DUMP_SEEK(PAGE_SIZE);
+       DUMP_SEEK(PAGE_SIZE - sizeof(dump));
        /* now we start writing out the user space info */
        set_fs(USER_DS);
        /* Dump the data area */
index ba870bb..57873be 100644 (file)
@@ -41,4 +41,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
 
 extern void init_ISA_irqs(void);
 
+#ifdef CONFIG_X86_LOCAL_APIC
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+#endif
+
 #endif /* _ASM_X86_IRQ_H */
index 6825e2e..6bc3985 100644 (file)
@@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {}
 #ifdef CONFIG_MICROCODE_EARLY
 #define MAX_UCODE_COUNT 128
 extern void __init load_ucode_bsp(void);
-extern __init void load_ucode_ap(void);
+extern void __cpuinit load_ucode_ap(void);
 extern int __init save_microcode_in_initrd(void);
 #else
 static inline void __init load_ucode_bsp(void) {}
-static inline __init void load_ucode_ap(void) {}
+static inline void __cpuinit load_ucode_ap(void) {}
 static inline int __init save_microcode_in_initrd(void)
 {
        return 0;
index c0fa356..86f9301 100644 (file)
@@ -18,9 +18,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int ,
                        void __user *, size_t *, loff_t *);
 extern int unknown_nmi_panic;
 
-void arch_trigger_all_cpu_backtrace(void);
-#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
-#endif
+#endif /* CONFIG_X86_LOCAL_APIC */
 
 #define NMI_FLAG_FIRST 1
 
index 31cb9ae..a698d71 100644 (file)
@@ -9,6 +9,7 @@
  *
  */
 #include <asm/apic.h>
+#include <asm/nmi.h>
 
 #include <linux/cpumask.h>
 #include <linux/kdebug.h>
index 35ffda5..5f90b85 100644 (file)
@@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits)
        if (mtrr_tom2)
                x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
 
-       nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
        /*
         * [0, 1M) should always be covered by var mtrr with WB
         * and fixed mtrrs should take effect before var mtrr for it:
         */
-       nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0,
+       nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
                                        1ULL<<(20 - PAGE_SHIFT));
-       /* Sort the ranges: */
-       sort_range(range, nr_range);
+       /* add from var mtrr at last */
+       nr_range = x86_get_mtrr_mem_range(range, nr_range,
+                                         x_remove_base, x_remove_size);
 
        range_sums = sum_ranges(range, nr_range);
        printk(KERN_INFO "total RAM covered: %ldM\n",
index f60d41f..a9e2207 100644 (file)
@@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
        INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
        INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
        INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
-       INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
        EVENT_EXTRA_END
 };
 
 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
        INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
        INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
+       INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
        EVENT_EXTRA_END
 };
 
index d2c3812..3dd37eb 100644 (file)
@@ -242,6 +242,7 @@ void __init kvmclock_init(void)
        if (!mem)
                return;
        hv_clock = __va(mem);
+       memset(hv_clock, 0, size);
 
        if (kvm_register_clock("boot clock")) {
                hv_clock = NULL;
index 4e7a37f..81a5f5e 100644 (file)
@@ -277,18 +277,6 @@ void exit_idle(void)
 }
 #endif
 
-void arch_cpu_idle_prepare(void)
-{
-       /*
-        * If we're the non-boot CPU, nothing set the stack canary up
-        * for us.  CPU0 already has it initialized but no harm in
-        * doing it again.  This is a good place for updating it, as
-        * we wont ever return from this function (so the invalid
-        * canaries already on the stack wont ever trigger).
-        */
-       boot_init_stack_canary();
-}
-
 void arch_cpu_idle_enter(void)
 {
        local_touch_nmi();
index 9c73b51..bfd348e 100644 (file)
@@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
 
 void __cpuinit set_cpu_sibling_map(int cpu)
 {
-       bool has_mc = boot_cpu_data.x86_max_cores > 1;
        bool has_smt = smp_num_siblings > 1;
+       bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
        struct cpuinfo_x86 *o;
        int i;
 
        cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
 
-       if (!has_smt && !has_mc) {
+       if (!has_mp) {
                cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
                cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
                cpumask_set_cpu(cpu, cpu_core_mask(cpu));
@@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
                if ((i == cpu) || (has_smt && match_smt(c, o)))
                        link_mask(sibling, cpu, i);
 
-               if ((i == cpu) || (has_mc && match_llc(c, o)))
+               if ((i == cpu) || (has_mp && match_llc(c, o)))
                        link_mask(llc_shared, cpu, i);
 
        }
@@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
        for_each_cpu(i, cpu_sibling_setup_mask) {
                o = &cpu_data(i);
 
-               if ((i == cpu) || (has_mc && match_mc(c, o))) {
+               if ((i == cpu) || (has_mp && match_mc(c, o))) {
                        link_mask(core, cpu, i);
 
                        /*
index 094b5d9..e8ba99c 100644 (file)
@@ -582,8 +582,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
        if (index != XCR_XFEATURE_ENABLED_MASK)
                return 1;
        xcr0 = xcr;
-       if (kvm_x86_ops->get_cpl(vcpu) != 0)
-               return 1;
        if (!(xcr0 & XSTATE_FP))
                return 1;
        if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
@@ -597,7 +595,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 
 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
-       if (__kvm_set_xcr(vcpu, index, xcr)) {
+       if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
+           __kvm_set_xcr(vcpu, index, xcr)) {
                kvm_inject_gp(vcpu, 0);
                return 1;
        }
index 5ae2eb0..d2fbced 100644 (file)
@@ -1069,7 +1069,10 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
                 * that by attempting to use more space than is available.
                 */
                unsigned long dummy_size = remaining_size + 1024;
-               void *dummy = kmalloc(dummy_size, GFP_ATOMIC);
+               void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
+
+               if (!dummy)
+                       return EFI_OUT_OF_RESOURCES;
 
                status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
                                          EFI_VARIABLE_NON_VOLATILE |
@@ -1089,6 +1092,8 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
                                         0, dummy);
                }
 
+               kfree(dummy);
+
                /*
                 * The runtime code may now have triggered a garbage collection
                 * run, so check the variable info again
index 652fd5c..cab13f2 100644 (file)
@@ -164,15 +164,24 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
        if (dev_desc->clk_required) {
                ret = register_device_clock(adev, pdata);
                if (ret) {
-                       /*
-                        * Skip the device, but don't terminate the namespace
-                        * scan.
-                        */
-                       kfree(pdata);
-                       return 0;
+                       /* Skip the device, but continue the namespace scan. */
+                       ret = 0;
+                       goto err_out;
                }
        }
 
+       /*
+        * This works around a known issue in ACPI tables where LPSS devices
+        * have _PS0 and _PS3 without _PSC (and no power resources), so
+        * acpi_bus_init_power() will assume that the BIOS has put them into D0.
+        */
+       ret = acpi_device_fix_up_power(adev);
+       if (ret) {
+               /* Skip the device, but continue the namespace scan. */
+               ret = 0;
+               goto err_out;
+       }
+
        adev->driver_data = pdata;
        ret = acpi_create_platform_device(adev, id);
        if (ret > 0)
index 318fa32..31c217a 100644 (file)
@@ -290,6 +290,26 @@ int acpi_bus_init_power(struct acpi_device *device)
        return 0;
 }
 
+/**
+ * acpi_device_fix_up_power - Force device with missing _PSC into D0.
+ * @device: Device object whose power state is to be fixed up.
+ *
+ * Devices without power resources and _PSC, but having _PS0 and _PS3 defined,
+ * are assumed to be put into D0 by the BIOS.  However, in some cases that may
+ * not be the case and this function should be used then.
+ */
+int acpi_device_fix_up_power(struct acpi_device *device)
+{
+       int ret = 0;
+
+       if (!device->power.flags.power_resources
+           && !device->power.flags.explicit_get
+           && device->power.state == ACPI_STATE_D0)
+               ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0);
+
+       return ret;
+}
+
 int acpi_bus_update_power(acpi_handle handle, int *state_p)
 {
        struct acpi_device *device;
index 4fdea38..ec117c6 100644 (file)
@@ -868,8 +868,10 @@ static ssize_t write_undock(struct device *dev, struct device_attribute *attr,
        if (!count)
                return -EINVAL;
 
+       acpi_scan_lock_acquire();
        begin_undock(dock_station);
        ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
+       acpi_scan_lock_release();
        return ret ? ret: count;
 }
 static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
index f962047..288bb27 100644 (file)
@@ -885,6 +885,7 @@ int acpi_add_power_resource(acpi_handle handle)
                                ACPI_STA_DEFAULT);
        mutex_init(&resource->resource_lock);
        INIT_LIST_HEAD(&resource->dependent);
+       INIT_LIST_HEAD(&resource->list_node);
        resource->name = device->pnp.bus_id;
        strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
        strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
index a3868f6..3322b47 100644 (file)
@@ -304,7 +304,8 @@ static void acpi_dev_irqresource_disabled(struct resource *res, u32 gsi)
 }
 
 static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
-                                    u8 triggering, u8 polarity, u8 shareable)
+                                    u8 triggering, u8 polarity, u8 shareable,
+                                    bool legacy)
 {
        int irq, p, t;
 
@@ -317,14 +318,19 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
         * In IO-APIC mode, use overrided attribute. Two reasons:
         * 1. BIOS bug in DSDT
         * 2. BIOS uses IO-APIC mode Interrupt Source Override
+        *
+        * We do this only if we are dealing with IRQ() or IRQNoFlags()
+        * resource (the legacy ISA resources). With modern ACPI 5 devices
+        * using extended IRQ descriptors we take the IRQ configuration
+        * from _CRS directly.
         */
-       if (!acpi_get_override_irq(gsi, &t, &p)) {
+       if (legacy && !acpi_get_override_irq(gsi, &t, &p)) {
                u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
                u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
 
                if (triggering != trig || polarity != pol) {
                        pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi,
-                                  t ? "edge" : "level", p ? "low" : "high");
+                                  t ? "level" : "edge", p ? "low" : "high");
                        triggering = trig;
                        polarity = pol;
                }
@@ -373,7 +379,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
                }
                acpi_dev_get_irqresource(res, irq->interrupts[index],
                                         irq->triggering, irq->polarity,
-                                        irq->sharable);
+                                        irq->sharable, true);
                break;
        case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
                ext_irq = &ares->data.extended_irq;
@@ -383,7 +389,7 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
                }
                acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
                                         ext_irq->triggering, ext_irq->polarity,
-                                        ext_irq->sharable);
+                                        ext_irq->sharable, false);
                break;
        default:
                return false;
index 4b1f926..01e2103 100644 (file)
@@ -450,8 +450,18 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
 {
        struct firmware_buf *buf = fw_priv->buf;
 
+       /*
+        * There is a small window in which user can write to 'loading'
+        * between loading done and disappearance of 'loading'
+        */
+       if (test_bit(FW_STATUS_DONE, &buf->status))
+               return;
+
        set_bit(FW_STATUS_ABORT, &buf->status);
        complete_all(&buf->completion);
+
+       /* avoid user action after loading abort */
+       fw_priv->buf = NULL;
 }
 
 #define is_fw_load_aborted(buf)        \
@@ -528,7 +538,12 @@ static ssize_t firmware_loading_show(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
        struct firmware_priv *fw_priv = to_firmware_priv(dev);
-       int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+       int loading = 0;
+
+       mutex_lock(&fw_lock);
+       if (fw_priv->buf)
+               loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+       mutex_unlock(&fw_lock);
 
        return sprintf(buf, "%d\n", loading);
 }
@@ -570,12 +585,12 @@ static ssize_t firmware_loading_store(struct device *dev,
                                      const char *buf, size_t count)
 {
        struct firmware_priv *fw_priv = to_firmware_priv(dev);
-       struct firmware_buf *fw_buf = fw_priv->buf;
+       struct firmware_buf *fw_buf;
        int loading = simple_strtol(buf, NULL, 10);
        int i;
 
        mutex_lock(&fw_lock);
-
+       fw_buf = fw_priv->buf;
        if (!fw_buf)
                goto out;
 
@@ -777,10 +792,6 @@ static void firmware_class_timeout_work(struct work_struct *work)
                        struct firmware_priv, timeout_work.work);
 
        mutex_lock(&fw_lock);
-       if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) {
-               mutex_unlock(&fw_lock);
-               return;
-       }
        fw_load_abort(fw_priv);
        mutex_unlock(&fw_lock);
 }
@@ -861,8 +872,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
 
        cancel_delayed_work_sync(&fw_priv->timeout_work);
 
-       fw_priv->buf = NULL;
-
        device_remove_file(f_dev, &dev_attr_loading);
 err_del_bin_attr:
        device_remove_bin_file(f_dev, &firmware_attr_data);
index 3063452..49394e3 100644 (file)
@@ -1036,12 +1036,16 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
        char *name;
        u64 segment;
        int ret;
+       char *name_format;
 
        name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
        if (!name)
                return NULL;
        segment = offset >> rbd_dev->header.obj_order;
-       ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
+       name_format = "%s.%012llx";
+       if (rbd_dev->image_format == 2)
+               name_format = "%s.%016llx";
+       ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
                        rbd_dev->header.object_prefix, segment);
        if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
                pr_err("error formatting segment name for #%llu (%d)\n",
index 934cfd1..1144e8c 100644 (file)
@@ -1955,6 +1955,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
                /* XXX the notifier code should handle this better */
                if (!cn->notifier_head.head) {
                        srcu_cleanup_notifier_head(&cn->notifier_head);
+                       list_del(&cn->node);
                        kfree(cn);
                }
 
index 5c97e75..22d7699 100644 (file)
@@ -155,7 +155,7 @@ static __initdata unsigned long exynos5250_clk_regs[] = {
 
 /* list of all parent clock list */
 PNAME(mout_apll_p)     = { "fin_pll", "fout_apll", };
-PNAME(mout_cpu_p)      = { "mout_apll", "mout_mpll", };
+PNAME(mout_cpu_p)      = { "mout_apll", "sclk_mpll", };
 PNAME(mout_mpll_fout_p)        = { "fout_mplldiv2", "fout_mpll" };
 PNAME(mout_mpll_p)     = { "fin_pll", "mout_mpll_fout" };
 PNAME(mout_bpll_fout_p)        = { "fout_bplldiv2", "fout_bpll" };
@@ -208,10 +208,10 @@ struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __initdata = {
 };
 
 struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
-       MUX(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1),
-       MUX(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
+       MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"),
+       MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
        MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
-       MUX(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1),
+       MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
        MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
        MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
        MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
@@ -378,7 +378,7 @@ struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
        GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
        GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
        GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
-       GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, 0, 0),
+       GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
        GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
        GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
        GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0),
index 89135f6..362f12d 100644 (file)
@@ -111,7 +111,8 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
                                unsigned long parent_rate)
 {
        struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw);
-       u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1;
+       u32 mdiv, pdiv, sdiv, pll_con0, pll_con1;
+       s16 kdiv;
        u64 fvco = parent_rate;
 
        pll_con0 = __raw_readl(pll->con_reg);
@@ -119,7 +120,7 @@ static unsigned long samsung_pll36xx_recalc_rate(struct clk_hw *hw,
        mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
        pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
        sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK;
-       kdiv = pll_con1 & PLL36XX_KDIV_MASK;
+       kdiv = (s16)(pll_con1 & PLL36XX_KDIV_MASK);
 
        fvco *= (mdiv << 16) + kdiv;
        do_div(fvco, (pdiv << sdiv));
index f9ec43f..080c3c5 100644 (file)
@@ -369,7 +369,7 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
        clk_register_clkdev(clk, NULL, "60100000.serial");
 }
 #else
-static inline void spear320_clk_init(void) { }
+static inline void spear320_clk_init(void __iomem *soc_config_base) { }
 #endif
 
 void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
index c6921f5..ba99e38 100644 (file)
@@ -1598,6 +1598,12 @@ static void __init tegra30_periph_clk_init(void)
        clk_register_clkdev(clk, "afi", "tegra-pcie");
        clks[afi] = clk;
 
+       /* pciex */
+       clk = tegra_clk_register_periph_gate("pciex", "pll_e", 0, clk_base, 0,
+                                   74, &periph_u_regs, periph_clk_enb_refcnt);
+       clk_register_clkdev(clk, "pciex", "tegra-pcie");
+       clks[pciex] = clk;
+
        /* kfuse */
        clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
                                    TEGRA_PERIPH_ON_APB,
@@ -1716,11 +1722,6 @@ static void __init tegra30_fixed_clk_init(void)
                                1, 0, &cml_lock);
        clk_register_clkdev(clk, "cml1", NULL);
        clks[cml1] = clk;
-
-       /* pciex */
-       clk = clk_register_fixed_rate(NULL, "pciex", "pll_e", 0, 100000000);
-       clk_register_clkdev(clk, "pciex", NULL);
-       clks[pciex] = clk;
 }
 
 static void __init tegra30_osc_clk_init(void)
index dcde352..5b7b911 100644 (file)
@@ -190,8 +190,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
                if (ret)
                        return ERR_PTR(ret);
        }
-       return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
-                             0600);
+       return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
 }
 EXPORT_SYMBOL(drm_gem_prime_export);
 
index 0e53416..6948eb8 100644 (file)
@@ -2687,6 +2687,9 @@ void r600_uvd_rbc_stop(struct radeon_device *rdev)
 int r600_uvd_init(struct radeon_device *rdev)
 {
        int i, j, r;
+       /* disable byte swapping */
+       u32 lmi_swap_cntl = 0;
+       u32 mp_swap_cntl = 0;
 
        /* raise clocks while booting up the VCPU */
        radeon_set_uvd_clocks(rdev, 53300, 40000);
@@ -2711,9 +2714,13 @@ int r600_uvd_init(struct radeon_device *rdev)
        WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
                             (1 << 21) | (1 << 9) | (1 << 20));
 
-       /* disable byte swapping */
-       WREG32(UVD_LMI_SWAP_CNTL, 0);
-       WREG32(UVD_MP_SWAP_CNTL, 0);
+#ifdef __BIG_ENDIAN
+       /* swap (8 in 32) RB and IB */
+       lmi_swap_cntl = 0xa;
+       mp_swap_cntl = 0;
+#endif
+       WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+       WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
 
        WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
        WREG32(UVD_MPC_SET_MUXA1, 0x0);
index 1899738..b0dc0b6 100644 (file)
@@ -244,16 +244,6 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
  */
 void radeon_wb_disable(struct radeon_device *rdev)
 {
-       int r;
-
-       if (rdev->wb.wb_obj) {
-               r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-               if (unlikely(r != 0))
-                       return;
-               radeon_bo_kunmap(rdev->wb.wb_obj);
-               radeon_bo_unpin(rdev->wb.wb_obj);
-               radeon_bo_unreserve(rdev->wb.wb_obj);
-       }
        rdev->wb.enabled = false;
 }
 
@@ -269,6 +259,11 @@ void radeon_wb_fini(struct radeon_device *rdev)
 {
        radeon_wb_disable(rdev);
        if (rdev->wb.wb_obj) {
+               if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
+                       radeon_bo_kunmap(rdev->wb.wb_obj);
+                       radeon_bo_unpin(rdev->wb.wb_obj);
+                       radeon_bo_unreserve(rdev->wb.wb_obj);
+               }
                radeon_bo_unref(&rdev->wb.wb_obj);
                rdev->wb.wb = NULL;
                rdev->wb.wb_obj = NULL;
@@ -295,26 +290,26 @@ int radeon_wb_init(struct radeon_device *rdev)
                        dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
                        return r;
                }
-       }
-       r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-       if (unlikely(r != 0)) {
-               radeon_wb_fini(rdev);
-               return r;
-       }
-       r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
-                         &rdev->wb.gpu_addr);
-       if (r) {
+               r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+               if (unlikely(r != 0)) {
+                       radeon_wb_fini(rdev);
+                       return r;
+               }
+               r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+                               &rdev->wb.gpu_addr);
+               if (r) {
+                       radeon_bo_unreserve(rdev->wb.wb_obj);
+                       dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+                       radeon_wb_fini(rdev);
+                       return r;
+               }
+               r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
                radeon_bo_unreserve(rdev->wb.wb_obj);
-               dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
-               radeon_wb_fini(rdev);
-               return r;
-       }
-       r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
-       radeon_bo_unreserve(rdev->wb.wb_obj);
-       if (r) {
-               dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
-               radeon_wb_fini(rdev);
-               return r;
+               if (r) {
+                       dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+                       radeon_wb_fini(rdev);
+                       return r;
+               }
        }
 
        /* clear wb memory */
index 5b937df..ddb8f8e 100644 (file)
@@ -63,7 +63,9 @@ static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
 {
        struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
        if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
-               *drv->cpu_addr = cpu_to_le32(seq);
+               if (drv->cpu_addr) {
+                       *drv->cpu_addr = cpu_to_le32(seq);
+               }
        } else {
                WREG32(drv->scratch_reg, seq);
        }
@@ -84,7 +86,11 @@ static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
        u32 seq = 0;
 
        if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
-               seq = le32_to_cpu(*drv->cpu_addr);
+               if (drv->cpu_addr) {
+                       seq = le32_to_cpu(*drv->cpu_addr);
+               } else {
+                       seq = lower_32_bits(atomic64_read(&drv->last_seq));
+               }
        } else {
                seq = RREG32(drv->scratch_reg);
        }
index 2c1341f..43ec4a4 100644 (file)
@@ -1197,11 +1197,13 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
 int radeon_vm_bo_rmv(struct radeon_device *rdev,
                     struct radeon_bo_va *bo_va)
 {
-       int r;
+       int r = 0;
 
        mutex_lock(&rdev->vm_manager.lock);
        mutex_lock(&bo_va->vm->mutex);
-       r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+       if (bo_va->soffset) {
+               r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+       }
        mutex_unlock(&rdev->vm_manager.lock);
        list_del(&bo_va->vm_list);
        mutex_unlock(&bo_va->vm->mutex);
index e17faa7..8243401 100644 (file)
@@ -402,6 +402,13 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
                return -ENOMEM;
        /* Align requested size with padding so unlock_commit can
         * pad safely */
+       radeon_ring_free_size(rdev, ring);
+       if (ring->ring_free_dw == (ring->ring_size / 4)) {
+               /* This is an empty ring update lockup info to avoid
+                * false positive.
+                */
+               radeon_ring_lockup_update(ring);
+       }
        ndw = (ndw + ring->align_mask) & ~ring->align_mask;
        while (ndw > (ring->ring_free_dw - 1)) {
                radeon_ring_free_size(rdev, ring);
index 906e5c0..cad735d 100644 (file)
@@ -159,7 +159,17 @@ int radeon_uvd_suspend(struct radeon_device *rdev)
        if (!r) {
                radeon_bo_kunmap(rdev->uvd.vcpu_bo);
                radeon_bo_unpin(rdev->uvd.vcpu_bo);
+               rdev->uvd.cpu_addr = NULL;
+               if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
+                       radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
+               }
                radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+
+               if (rdev->uvd.cpu_addr) {
+                       radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+               } else {
+                       rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
+               }
        }
        return r;
 }
@@ -178,6 +188,10 @@ int radeon_uvd_resume(struct radeon_device *rdev)
                return r;
        }
 
+       /* Have been pin in cpu unmap unpin */
+       radeon_bo_kunmap(rdev->uvd.vcpu_bo);
+       radeon_bo_unpin(rdev->uvd.vcpu_bo);
+
        r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
                          &rdev->uvd.gpu_addr);
        if (r) {
@@ -613,19 +627,19 @@ int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
        }
 
        /* stitch together an UVD create msg */
-       msg[0] = 0x00000de4;
-       msg[1] = 0x00000000;
-       msg[2] = handle;
-       msg[3] = 0x00000000;
-       msg[4] = 0x00000000;
-       msg[5] = 0x00000000;
-       msg[6] = 0x00000000;
-       msg[7] = 0x00000780;
-       msg[8] = 0x00000440;
-       msg[9] = 0x00000000;
-       msg[10] = 0x01b37000;
+       msg[0] = cpu_to_le32(0x00000de4);
+       msg[1] = cpu_to_le32(0x00000000);
+       msg[2] = cpu_to_le32(handle);
+       msg[3] = cpu_to_le32(0x00000000);
+       msg[4] = cpu_to_le32(0x00000000);
+       msg[5] = cpu_to_le32(0x00000000);
+       msg[6] = cpu_to_le32(0x00000000);
+       msg[7] = cpu_to_le32(0x00000780);
+       msg[8] = cpu_to_le32(0x00000440);
+       msg[9] = cpu_to_le32(0x00000000);
+       msg[10] = cpu_to_le32(0x01b37000);
        for (i = 11; i < 1024; ++i)
-               msg[i] = 0x0;
+               msg[i] = cpu_to_le32(0x0);
 
        radeon_bo_kunmap(bo);
        radeon_bo_unreserve(bo);
@@ -659,12 +673,12 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
        }
 
        /* stitch together an UVD destroy msg */
-       msg[0] = 0x00000de4;
-       msg[1] = 0x00000002;
-       msg[2] = handle;
-       msg[3] = 0x00000000;
+       msg[0] = cpu_to_le32(0x00000de4);
+       msg[1] = cpu_to_le32(0x00000002);
+       msg[2] = cpu_to_le32(handle);
+       msg[3] = cpu_to_le32(0x00000000);
        for (i = 4; i < 1024; ++i)
-               msg[i] = 0x0;
+               msg[i] = cpu_to_le32(0x0);
 
        radeon_bo_kunmap(bo);
        radeon_bo_unreserve(bo);
index c332fb9..957cfd4 100644 (file)
@@ -261,4 +261,17 @@ config SHMOBILE_IOMMU_L1SIZE
        default 256 if SHMOBILE_IOMMU_ADDRSIZE_64MB
        default 128 if SHMOBILE_IOMMU_ADDRSIZE_32MB
 
+config ARM_SMMU
+       bool "ARM Ltd. System MMU (SMMU) Support"
+       depends on ARM64 || (ARM_LPAE && OF)
+       select IOMMU_API
+       select ARM_DMA_USE_IOMMU if ARM
+       help
+         Support for implementations of the ARM System MMU architecture
+         versions 1 and 2. The driver supports both v7l and v8l table
+         formats with 4k and 64k page sizes.
+
+         Say Y here if your SoC includes an IOMMU device implementing
+         the ARM SMMU architecture.
+
 endif # IOMMU_SUPPORT
index ef0e520..bbe7041 100644 (file)
@@ -3,6 +3,7 @@ obj-$(CONFIG_OF_IOMMU)  += of_iommu.o
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
+obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
 obj-$(CONFIG_DMAR_TABLE) += dmar.o
 obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
 obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
index 5a02d07..6dc6594 100644 (file)
@@ -1497,6 +1497,10 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
 
                        /* Large PTE found which maps this address */
                        unmap_size = PTE_PAGE_SIZE(*pte);
+
+                       /* Only unmap from the first pte in the page */
+                       if ((unmap_size - 1) & bus_addr)
+                               break;
                        count      = PAGE_SIZE_PTE_COUNT(unmap_size);
                        for (i = 0; i < count; i++)
                                pte[i] = 0ULL;
@@ -1506,7 +1510,7 @@ static unsigned long iommu_unmap_page(struct protection_domain *dom,
                unmapped += unmap_size;
        }
 
-       BUG_ON(!is_power_of_2(unmapped));
+       BUG_ON(unmapped && !is_power_of_2(unmapped));
 
        return unmapped;
 }
@@ -1906,34 +1910,59 @@ static void domain_id_free(int id)
        write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
 }
 
+#define DEFINE_FREE_PT_FN(LVL, FN)                             \
+static void free_pt_##LVL (unsigned long __pt)                 \
+{                                                              \
+       unsigned long p;                                        \
+       u64 *pt;                                                \
+       int i;                                                  \
+                                                               \
+       pt = (u64 *)__pt;                                       \
+                                                               \
+       for (i = 0; i < 512; ++i) {                             \
+               if (!IOMMU_PTE_PRESENT(pt[i]))                  \
+                       continue;                               \
+                                                               \
+               p = (unsigned long)IOMMU_PTE_PAGE(pt[i]);       \
+               FN(p);                                          \
+       }                                                       \
+       free_page((unsigned long)pt);                           \
+}
+
+DEFINE_FREE_PT_FN(l2, free_page)
+DEFINE_FREE_PT_FN(l3, free_pt_l2)
+DEFINE_FREE_PT_FN(l4, free_pt_l3)
+DEFINE_FREE_PT_FN(l5, free_pt_l4)
+DEFINE_FREE_PT_FN(l6, free_pt_l5)
+
 static void free_pagetable(struct protection_domain *domain)
 {
-       int i, j;
-       u64 *p1, *p2, *p3;
-
-       p1 = domain->pt_root;
-
-       if (!p1)
-               return;
-
-       for (i = 0; i < 512; ++i) {
-               if (!IOMMU_PTE_PRESENT(p1[i]))
-                       continue;
-
-               p2 = IOMMU_PTE_PAGE(p1[i]);
-               for (j = 0; j < 512; ++j) {
-                       if (!IOMMU_PTE_PRESENT(p2[j]))
-                               continue;
-                       p3 = IOMMU_PTE_PAGE(p2[j]);
-                       free_page((unsigned long)p3);
-               }
+       unsigned long root = (unsigned long)domain->pt_root;
 
-               free_page((unsigned long)p2);
+       switch (domain->mode) {
+       case PAGE_MODE_NONE:
+               break;
+       case PAGE_MODE_1_LEVEL:
+               free_page(root);
+               break;
+       case PAGE_MODE_2_LEVEL:
+               free_pt_l2(root);
+               break;
+       case PAGE_MODE_3_LEVEL:
+               free_pt_l3(root);
+               break;
+       case PAGE_MODE_4_LEVEL:
+               free_pt_l4(root);
+               break;
+       case PAGE_MODE_5_LEVEL:
+               free_pt_l5(root);
+               break;
+       case PAGE_MODE_6_LEVEL:
+               free_pt_l6(root);
+               break;
+       default:
+               BUG();
        }
-
-       free_page((unsigned long)p1);
-
-       domain->pt_root = NULL;
 }
 
 static void free_gcr3_tbl_level1(u64 *tbl)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
new file mode 100644 (file)
index 0000000..ebd0a4c
--- /dev/null
@@ -0,0 +1,1969 @@
+/*
+ * IOMMU API for ARM architected SMMU implementations.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) 2013 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ *
+ * This driver currently supports:
+ *     - SMMUv1 and v2 implementations
+ *     - Stream-matching and stream-indexing
+ *     - v7/v8 long-descriptor format
+ *     - Non-secure access to the SMMU
+ *     - 4k and 64k pages, with contiguous pte hints.
+ *     - Up to 39-bit addressing
+ *     - Context fault reporting
+ */
+
+#define pr_fmt(fmt) "arm-smmu: " fmt
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <linux/amba/bus.h>
+
+#include <asm/pgalloc.h>
+
+/* Maximum number of stream IDs assigned to a single device */
+#define MAX_MASTER_STREAMIDS           8
+
+/* Maximum number of context banks per SMMU */
+#define ARM_SMMU_MAX_CBS               128
+
+/* Maximum number of mapping groups per SMMU */
+#define ARM_SMMU_MAX_SMRS              128
+
+/* Number of VMIDs per SMMU */
+#define ARM_SMMU_NUM_VMIDS             256
+
+/* SMMU global address space */
+#define ARM_SMMU_GR0(smmu)             ((smmu)->base)
+#define ARM_SMMU_GR1(smmu)             ((smmu)->base + (smmu)->pagesize)
+
+/* Page table bits */
+#define ARM_SMMU_PTE_PAGE              (((pteval_t)3) << 0)
+#define ARM_SMMU_PTE_CONT              (((pteval_t)1) << 52)
+#define ARM_SMMU_PTE_AF                        (((pteval_t)1) << 10)
+#define ARM_SMMU_PTE_SH_NS             (((pteval_t)0) << 8)
+#define ARM_SMMU_PTE_SH_OS             (((pteval_t)2) << 8)
+#define ARM_SMMU_PTE_SH_IS             (((pteval_t)3) << 8)
+
+#if PAGE_SIZE == SZ_4K
+#define ARM_SMMU_PTE_CONT_ENTRIES      16
+#elif PAGE_SIZE == SZ_64K
+#define ARM_SMMU_PTE_CONT_ENTRIES      32
+#else
+#define ARM_SMMU_PTE_CONT_ENTRIES      1
+#endif
+
+#define ARM_SMMU_PTE_CONT_SIZE         (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
+#define ARM_SMMU_PTE_CONT_MASK         (~(ARM_SMMU_PTE_CONT_SIZE - 1))
+#define ARM_SMMU_PTE_HWTABLE_SIZE      (PTRS_PER_PTE * sizeof(pte_t))
+
+/* Stage-1 PTE */
+#define ARM_SMMU_PTE_AP_UNPRIV         (((pteval_t)1) << 6)
+#define ARM_SMMU_PTE_AP_RDONLY         (((pteval_t)2) << 6)
+#define ARM_SMMU_PTE_ATTRINDX_SHIFT    2
+
+/* Stage-2 PTE */
+#define ARM_SMMU_PTE_HAP_FAULT         (((pteval_t)0) << 6)
+#define ARM_SMMU_PTE_HAP_READ          (((pteval_t)1) << 6)
+#define ARM_SMMU_PTE_HAP_WRITE         (((pteval_t)2) << 6)
+#define ARM_SMMU_PTE_MEMATTR_OIWB      (((pteval_t)0xf) << 2)
+#define ARM_SMMU_PTE_MEMATTR_NC                (((pteval_t)0x5) << 2)
+#define ARM_SMMU_PTE_MEMATTR_DEV       (((pteval_t)0x1) << 2)
+
+/* Configuration registers */
+#define ARM_SMMU_GR0_sCR0              0x0
+#define sCR0_CLIENTPD                  (1 << 0)
+#define sCR0_GFRE                      (1 << 1)
+#define sCR0_GFIE                      (1 << 2)
+#define sCR0_GCFGFRE                   (1 << 4)
+#define sCR0_GCFGFIE                   (1 << 5)
+#define sCR0_USFCFG                    (1 << 10)
+#define sCR0_VMIDPNE                   (1 << 11)
+#define sCR0_PTM                       (1 << 12)
+#define sCR0_FB                                (1 << 13)
+#define sCR0_BSU_SHIFT                 14
+#define sCR0_BSU_MASK                  0x3
+
+/* Identification registers */
+#define ARM_SMMU_GR0_ID0               0x20
+#define ARM_SMMU_GR0_ID1               0x24
+#define ARM_SMMU_GR0_ID2               0x28
+#define ARM_SMMU_GR0_ID3               0x2c
+#define ARM_SMMU_GR0_ID4               0x30
+#define ARM_SMMU_GR0_ID5               0x34
+#define ARM_SMMU_GR0_ID6               0x38
+#define ARM_SMMU_GR0_ID7               0x3c
+#define ARM_SMMU_GR0_sGFSR             0x48
+#define ARM_SMMU_GR0_sGFSYNR0          0x50
+#define ARM_SMMU_GR0_sGFSYNR1          0x54
+#define ARM_SMMU_GR0_sGFSYNR2          0x58
+#define ARM_SMMU_GR0_PIDR0             0xfe0
+#define ARM_SMMU_GR0_PIDR1             0xfe4
+#define ARM_SMMU_GR0_PIDR2             0xfe8
+
+#define ID0_S1TS                       (1 << 30)
+#define ID0_S2TS                       (1 << 29)
+#define ID0_NTS                                (1 << 28)
+#define ID0_SMS                                (1 << 27)
+#define ID0_PTFS_SHIFT                 24
+#define ID0_PTFS_MASK                  0x2
+#define ID0_PTFS_V8_ONLY               0x2
+#define ID0_CTTW                       (1 << 14)
+#define ID0_NUMIRPT_SHIFT              16
+#define ID0_NUMIRPT_MASK               0xff
+#define ID0_NUMSMRG_SHIFT              0
+#define ID0_NUMSMRG_MASK               0xff
+
+#define ID1_PAGESIZE                   (1 << 31)
+#define ID1_NUMPAGENDXB_SHIFT          28
+#define ID1_NUMPAGENDXB_MASK           7
+#define ID1_NUMS2CB_SHIFT              16
+#define ID1_NUMS2CB_MASK               0xff
+#define ID1_NUMCB_SHIFT                        0
+#define ID1_NUMCB_MASK                 0xff
+
+#define ID2_OAS_SHIFT                  4
+#define ID2_OAS_MASK                   0xf
+#define ID2_IAS_SHIFT                  0
+#define ID2_IAS_MASK                   0xf
+#define ID2_UBS_SHIFT                  8
+#define ID2_UBS_MASK                   0xf
+#define ID2_PTFS_4K                    (1 << 12)
+#define ID2_PTFS_16K                   (1 << 13)
+#define ID2_PTFS_64K                   (1 << 14)
+
+#define PIDR2_ARCH_SHIFT               4
+#define PIDR2_ARCH_MASK                        0xf
+
+/* Global TLB invalidation */
+#define ARM_SMMU_GR0_STLBIALL          0x60
+#define ARM_SMMU_GR0_TLBIVMID          0x64
+#define ARM_SMMU_GR0_TLBIALLNSNH       0x68
+#define ARM_SMMU_GR0_TLBIALLH          0x6c
+#define ARM_SMMU_GR0_sTLBGSYNC         0x70
+#define ARM_SMMU_GR0_sTLBGSTATUS       0x74
+#define sTLBGSTATUS_GSACTIVE           (1 << 0)
+#define TLB_LOOP_TIMEOUT               1000000 /* 1s! */
+
+/* Stream mapping registers */
+#define ARM_SMMU_GR0_SMR(n)            (0x800 + ((n) << 2))
+#define SMR_VALID                      (1 << 31)
+#define SMR_MASK_SHIFT                 16
+#define SMR_MASK_MASK                  0x7fff
+#define SMR_ID_SHIFT                   0
+#define SMR_ID_MASK                    0x7fff
+
+#define ARM_SMMU_GR0_S2CR(n)           (0xc00 + ((n) << 2))
+#define S2CR_CBNDX_SHIFT               0
+#define S2CR_CBNDX_MASK                        0xff
+#define S2CR_TYPE_SHIFT                        16
+#define S2CR_TYPE_MASK                 0x3
+#define S2CR_TYPE_TRANS                        (0 << S2CR_TYPE_SHIFT)
+#define S2CR_TYPE_BYPASS               (1 << S2CR_TYPE_SHIFT)
+#define S2CR_TYPE_FAULT                        (2 << S2CR_TYPE_SHIFT)
+
+/* Context bank attribute registers */
+#define ARM_SMMU_GR1_CBAR(n)           (0x0 + ((n) << 2))
+#define CBAR_VMID_SHIFT                        0
+#define CBAR_VMID_MASK                 0xff
+#define CBAR_S1_MEMATTR_SHIFT          12
+#define CBAR_S1_MEMATTR_MASK           0xf
+#define CBAR_S1_MEMATTR_WB             0xf
+#define CBAR_TYPE_SHIFT                        16
+#define CBAR_TYPE_MASK                 0x3
+#define CBAR_TYPE_S2_TRANS             (0 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_BYPASS   (1 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_FAULT    (2 << CBAR_TYPE_SHIFT)
+#define CBAR_TYPE_S1_TRANS_S2_TRANS    (3 << CBAR_TYPE_SHIFT)
+#define CBAR_IRPTNDX_SHIFT             24
+#define CBAR_IRPTNDX_MASK              0xff
+
+#define ARM_SMMU_GR1_CBA2R(n)          (0x800 + ((n) << 2))
+#define CBA2R_RW64_32BIT               (0 << 0)
+#define CBA2R_RW64_64BIT               (1 << 0)
+
+/* Translation context bank */
+#define ARM_SMMU_CB_BASE(smmu)         ((smmu)->base + ((smmu)->size >> 1))
+#define ARM_SMMU_CB(smmu, n)           ((n) * (smmu)->pagesize)
+
+#define ARM_SMMU_CB_SCTLR              0x0
+#define ARM_SMMU_CB_RESUME             0x8
+#define ARM_SMMU_CB_TTBCR2             0x10
+#define ARM_SMMU_CB_TTBR0_LO           0x20
+#define ARM_SMMU_CB_TTBR0_HI           0x24
+#define ARM_SMMU_CB_TTBCR              0x30
+#define ARM_SMMU_CB_S1_MAIR0           0x38
+#define ARM_SMMU_CB_FSR                        0x58
+#define ARM_SMMU_CB_FAR_LO             0x60
+#define ARM_SMMU_CB_FAR_HI             0x64
+#define ARM_SMMU_CB_FSYNR0             0x68
+
+#define SCTLR_S1_ASIDPNE               (1 << 12)
+#define SCTLR_CFCFG                    (1 << 7)
+#define SCTLR_CFIE                     (1 << 6)
+#define SCTLR_CFRE                     (1 << 5)
+#define SCTLR_E                                (1 << 4)
+#define SCTLR_AFE                      (1 << 2)
+#define SCTLR_TRE                      (1 << 1)
+#define SCTLR_M                                (1 << 0)
+#define SCTLR_EAE_SBOP                 (SCTLR_AFE | SCTLR_TRE)
+
+#define RESUME_RETRY                   (0 << 0)
+#define RESUME_TERMINATE               (1 << 0)
+
+#define TTBCR_EAE                      (1 << 31)
+
+#define TTBCR_PASIZE_SHIFT             16
+#define TTBCR_PASIZE_MASK              0x7
+
+#define TTBCR_TG0_4K                   (0 << 14)
+#define TTBCR_TG0_64K                  (1 << 14)
+
+#define TTBCR_SH0_SHIFT                        12
+#define TTBCR_SH0_MASK                 0x3
+#define TTBCR_SH_NS                    0
+#define TTBCR_SH_OS                    2
+#define TTBCR_SH_IS                    3
+
+#define TTBCR_ORGN0_SHIFT              10
+#define TTBCR_IRGN0_SHIFT              8
+#define TTBCR_RGN_MASK                 0x3
+#define TTBCR_RGN_NC                   0
+#define TTBCR_RGN_WBWA                 1
+#define TTBCR_RGN_WT                   2
+#define TTBCR_RGN_WB                   3
+
+#define TTBCR_SL0_SHIFT                        6
+#define TTBCR_SL0_MASK                 0x3
+#define TTBCR_SL0_LVL_2                        0
+#define TTBCR_SL0_LVL_1                        1
+
+#define TTBCR_T1SZ_SHIFT               16
+#define TTBCR_T0SZ_SHIFT               0
+#define TTBCR_SZ_MASK                  0xf
+
+#define TTBCR2_SEP_SHIFT               15
+#define TTBCR2_SEP_MASK                        0x7
+
+#define TTBCR2_PASIZE_SHIFT            0
+#define TTBCR2_PASIZE_MASK             0x7
+
+/* Common definitions for PASize and SEP fields */
+#define TTBCR2_ADDR_32                 0
+#define TTBCR2_ADDR_36                 1
+#define TTBCR2_ADDR_40                 2
+#define TTBCR2_ADDR_42                 3
+#define TTBCR2_ADDR_44                 4
+#define TTBCR2_ADDR_48                 5
+
+#define MAIR_ATTR_SHIFT(n)             ((n) << 3)
+#define MAIR_ATTR_MASK                 0xff
+#define MAIR_ATTR_DEVICE               0x04
+#define MAIR_ATTR_NC                   0x44
+#define MAIR_ATTR_WBRWA                        0xff
+#define MAIR_ATTR_IDX_NC               0
+#define MAIR_ATTR_IDX_CACHE            1
+#define MAIR_ATTR_IDX_DEV              2
+
+#define FSR_MULTI                      (1 << 31)
+#define FSR_SS                         (1 << 30)
+#define FSR_UUT                                (1 << 8)
+#define FSR_ASF                                (1 << 7)
+#define FSR_TLBLKF                     (1 << 6)
+#define FSR_TLBMCF                     (1 << 5)
+#define FSR_EF                         (1 << 4)
+#define FSR_PF                         (1 << 3)
+#define FSR_AFF                                (1 << 2)
+#define FSR_TF                         (1 << 1)
+
+#define FSR_IGN                                (FSR_AFF | FSR_ASF | FSR_TLBMCF |       \
+                                        FSR_TLBLKF)
+#define FSR_FAULT                      (FSR_MULTI | FSR_SS | FSR_UUT |         \
+                                        FSR_EF | FSR_PF | FSR_TF)
+
+#define FSYNR0_WNR                     (1 << 4)
+
+struct arm_smmu_smr {
+       u8                              idx;
+       u16                             mask;
+       u16                             id;
+};
+
+struct arm_smmu_master {
+       struct device_node              *of_node;
+
+       /*
+        * The following is specific to the master's position in the
+        * SMMU chain.
+        */
+       struct rb_node                  node;
+       int                             num_streamids;
+       u16                             streamids[MAX_MASTER_STREAMIDS];
+
+       /*
+        * We only need to allocate these on the root SMMU, as we
+        * configure unmatched streams to bypass translation.
+        */
+       struct arm_smmu_smr             *smrs;
+};
+
+struct arm_smmu_device {
+       struct device                   *dev;
+       struct device_node              *parent_of_node;
+
+       void __iomem                    *base;
+       unsigned long                   size;
+       unsigned long                   pagesize;
+
+#define ARM_SMMU_FEAT_COHERENT_WALK    (1 << 0)
+#define ARM_SMMU_FEAT_STREAM_MATCH     (1 << 1)
+#define ARM_SMMU_FEAT_TRANS_S1         (1 << 2)
+#define ARM_SMMU_FEAT_TRANS_S2         (1 << 3)
+#define ARM_SMMU_FEAT_TRANS_NESTED     (1 << 4)
+       u32                             features;
+       int                             version;
+
+       u32                             num_context_banks;
+       u32                             num_s2_context_banks;
+       DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
+       atomic_t                        irptndx;
+
+       u32                             num_mapping_groups;
+       DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
+
+       unsigned long                   input_size;
+       unsigned long                   s1_output_size;
+       unsigned long                   s2_output_size;
+
+       u32                             num_global_irqs;
+       u32                             num_context_irqs;
+       unsigned int                    *irqs;
+
+       DECLARE_BITMAP(vmid_map, ARM_SMMU_NUM_VMIDS);
+
+       struct list_head                list;
+       struct rb_root                  masters;
+};
+
+struct arm_smmu_cfg {
+       struct arm_smmu_device          *smmu;
+       u8                              vmid;
+       u8                              cbndx;
+       u8                              irptndx;
+       u32                             cbar;
+       pgd_t                           *pgd;
+};
+
+struct arm_smmu_domain {
+       /*
+        * A domain can span across multiple, chained SMMUs and requires
+        * all devices within the domain to follow the same translation
+        * path.
+        */
+       struct arm_smmu_device          *leaf_smmu;
+       struct arm_smmu_cfg             root_cfg;
+       phys_addr_t                     output_mask;
+
+       spinlock_t                      lock;
+};
+
+static DEFINE_SPINLOCK(arm_smmu_devices_lock);
+static LIST_HEAD(arm_smmu_devices);
+
+static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
+                                               struct device_node *dev_node)
+{
+       struct rb_node *node = smmu->masters.rb_node;
+
+       while (node) {
+               struct arm_smmu_master *master;
+               master = container_of(node, struct arm_smmu_master, node);
+
+               if (dev_node < master->of_node)
+                       node = node->rb_left;
+               else if (dev_node > master->of_node)
+                       node = node->rb_right;
+               else
+                       return master;
+       }
+
+       return NULL;
+}
+
+static int insert_smmu_master(struct arm_smmu_device *smmu,
+                             struct arm_smmu_master *master)
+{
+       struct rb_node **new, *parent;
+
+       new = &smmu->masters.rb_node;
+       parent = NULL;
+       while (*new) {
+               struct arm_smmu_master *this;
+               this = container_of(*new, struct arm_smmu_master, node);
+
+               parent = *new;
+               if (master->of_node < this->of_node)
+                       new = &((*new)->rb_left);
+               else if (master->of_node > this->of_node)
+                       new = &((*new)->rb_right);
+               else
+                       return -EEXIST;
+       }
+
+       rb_link_node(&master->node, parent, new);
+       rb_insert_color(&master->node, &smmu->masters);
+       return 0;
+}
+
+static int register_smmu_master(struct arm_smmu_device *smmu,
+                               struct device *dev,
+                               struct of_phandle_args *masterspec)
+{
+       int i;
+       struct arm_smmu_master *master;
+
+       master = find_smmu_master(smmu, masterspec->np);
+       if (master) {
+               dev_err(dev,
+                       "rejecting multiple registrations for master device %s\n",
+                       masterspec->np->name);
+               return -EBUSY;
+       }
+
+       if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
+               dev_err(dev,
+                       "reached maximum number (%d) of stream IDs for master device %s\n",
+                       MAX_MASTER_STREAMIDS, masterspec->np->name);
+               return -ENOSPC;
+       }
+
+       master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
+       if (!master)
+               return -ENOMEM;
+
+       master->of_node         = masterspec->np;
+       master->num_streamids   = masterspec->args_count;
+
+       for (i = 0; i < master->num_streamids; ++i)
+               master->streamids[i] = masterspec->args[i];
+
+       return insert_smmu_master(smmu, master);
+}
+
+static struct arm_smmu_device *find_parent_smmu(struct arm_smmu_device *smmu)
+{
+       struct arm_smmu_device *parent;
+
+       if (!smmu->parent_of_node)
+               return NULL;
+
+       spin_lock(&arm_smmu_devices_lock);
+       list_for_each_entry(parent, &arm_smmu_devices, list)
+               if (parent->dev->of_node == smmu->parent_of_node)
+                       goto out_unlock;
+
+       parent = NULL;
+       dev_warn(smmu->dev,
+                "Failed to find SMMU parent despite parent in DT\n");
+out_unlock:
+       spin_unlock(&arm_smmu_devices_lock);
+       return parent;
+}
+
+static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
+{
+       int idx;
+
+       do {
+               idx = find_next_zero_bit(map, end, start);
+               if (idx == end)
+                       return -ENOSPC;
+       } while (test_and_set_bit(idx, map));
+
+       return idx;
+}
+
+static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
+{
+       clear_bit(idx, map);
+}
+
+/* Wait for any pending TLB invalidations to complete */
+static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
+{
+       int count = 0;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+       writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
+       while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
+              & sTLBGSTATUS_GSACTIVE) {
+               cpu_relax();
+               if (++count == TLB_LOOP_TIMEOUT) {
+                       dev_err_ratelimited(smmu->dev,
+                       "TLB sync timed out -- SMMU may be deadlocked\n");
+                       return;
+               }
+               udelay(1);
+       }
+}
+
+static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
+{
+       int flags, ret;
+       u32 fsr, far, fsynr, resume;
+       unsigned long iova;
+       struct iommu_domain *domain = dev;
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       struct arm_smmu_device *smmu = root_cfg->smmu;
+       void __iomem *cb_base;
+
+       cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
+       fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+
+       if (!(fsr & FSR_FAULT))
+               return IRQ_NONE;
+
+       if (fsr & FSR_IGN)
+               dev_err_ratelimited(smmu->dev,
+                                   "Unexpected context fault (fsr 0x%u)\n",
+                                   fsr);
+
+       fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
+       flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+
+       far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
+       iova = far;
+#ifdef CONFIG_64BIT
+       far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
+       iova |= ((unsigned long)far << 32);
+#endif
+
+       if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
+               ret = IRQ_HANDLED;
+               resume = RESUME_RETRY;
+       } else {
+               ret = IRQ_NONE;
+               resume = RESUME_TERMINATE;
+       }
+
+       /* Clear the faulting FSR */
+       writel(fsr, cb_base + ARM_SMMU_CB_FSR);
+
+       /* Retry or terminate any stalled transactions */
+       if (fsr & FSR_SS)
+               writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
+
+       return ret;
+}
+
+static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
+{
+       u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
+       struct arm_smmu_device *smmu = dev;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+       gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
+       gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
+       gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
+       gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
+
+       dev_err_ratelimited(smmu->dev,
+               "Unexpected global fault, this could be serious\n");
+       dev_err_ratelimited(smmu->dev,
+               "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
+               gfsr, gfsynr0, gfsynr1, gfsynr2);
+
+       writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
+       return IRQ_NONE;
+}
+
+static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
+{
+       u32 reg;
+       bool stage1;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       struct arm_smmu_device *smmu = root_cfg->smmu;
+       void __iomem *cb_base, *gr0_base, *gr1_base;
+
+       gr0_base = ARM_SMMU_GR0(smmu);
+       gr1_base = ARM_SMMU_GR1(smmu);
+       stage1 = root_cfg->cbar != CBAR_TYPE_S2_TRANS;
+       cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, root_cfg->cbndx);
+
+       /* CBAR */
+       reg = root_cfg->cbar |
+             (root_cfg->vmid << CBAR_VMID_SHIFT);
+       if (smmu->version == 1)
+             reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
+
+       /* Use the weakest memory type, so it is overridden by the pte */
+       if (stage1)
+               reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+       writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
+
+       if (smmu->version > 1) {
+               /* CBA2R */
+#ifdef CONFIG_64BIT
+               reg = CBA2R_RW64_64BIT;
+#else
+               reg = CBA2R_RW64_32BIT;
+#endif
+               writel_relaxed(reg,
+                              gr1_base + ARM_SMMU_GR1_CBA2R(root_cfg->cbndx));
+
+               /* TTBCR2 */
+               switch (smmu->input_size) {
+               case 32:
+                       reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
+                       break;
+               case 36:
+                       reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
+                       break;
+               case 39:
+                       reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
+                       break;
+               case 42:
+                       reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
+                       break;
+               case 44:
+                       reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
+                       break;
+               case 48:
+                       reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
+                       break;
+               }
+
+               switch (smmu->s1_output_size) {
+               case 32:
+                       reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT);
+                       break;
+               case 36:
+                       reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT);
+                       break;
+               case 39:
+                       reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT);
+                       break;
+               case 42:
+                       reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT);
+                       break;
+               case 44:
+                       reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT);
+                       break;
+               case 48:
+                       reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT);
+                       break;
+               }
+
+               if (stage1)
+                       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
+       }
+
+       /* TTBR0 */
+       reg = __pa(root_cfg->pgd);
+#ifndef __BIG_ENDIAN
+       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+       reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
+       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+#else
+       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI);
+       reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
+       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
+#endif
+
+       /*
+        * TTBCR
+        * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
+        */
+       if (smmu->version > 1) {
+               if (PAGE_SIZE == SZ_4K)
+                       reg = TTBCR_TG0_4K;
+               else
+                       reg = TTBCR_TG0_64K;
+
+               if (!stage1) {
+                       switch (smmu->s2_output_size) {
+                       case 32:
+                               reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT);
+                               break;
+                       case 36:
+                               reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT);
+                               break;
+                       case 40:
+                               reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT);
+                               break;
+                       case 42:
+                               reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT);
+                               break;
+                       case 44:
+                               reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT);
+                               break;
+                       case 48:
+                               reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT);
+                               break;
+                       }
+               } else {
+                       reg |= (64 - smmu->s1_output_size) << TTBCR_T0SZ_SHIFT;
+               }
+       } else {
+               reg = 0;
+       }
+
+       reg |= TTBCR_EAE |
+             (TTBCR_SH_IS << TTBCR_SH0_SHIFT) |
+             (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) |
+             (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) |
+             (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT);
+       writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+
+       /* MAIR0 (stage-1 only) */
+       if (stage1) {
+               reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) |
+                     (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) |
+                     (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV));
+               writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
+       }
+
+       /* Nuke the TLB */
+       writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
+       arm_smmu_tlb_sync(smmu);
+
+       /* SCTLR */
+       reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
+       if (stage1)
+               reg |= SCTLR_S1_ASIDPNE;
+#ifdef __BIG_ENDIAN
+       reg |= SCTLR_E;
+#endif
+       writel(reg, cb_base + ARM_SMMU_CB_SCTLR);
+}
+
+static int arm_smmu_init_domain_context(struct iommu_domain *domain,
+                                       struct device *dev)
+{
+       int irq, ret, start;
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       struct arm_smmu_device *smmu, *parent;
+
+       /*
+        * Walk the SMMU chain to find the root device for this chain.
+        * We assume that no masters have translations which terminate
+        * early, and therefore check that the root SMMU does indeed have
+        * a StreamID for the master in question.
+        */
+       parent = dev->archdata.iommu;
+       smmu_domain->output_mask = -1;
+       do {
+               smmu = parent;
+               smmu_domain->output_mask &= (1ULL << smmu->s2_output_size) - 1;
+       } while ((parent = find_parent_smmu(smmu)));
+
+       if (!find_smmu_master(smmu, dev->of_node)) {
+               dev_err(dev, "unable to find root SMMU for device\n");
+               return -ENODEV;
+       }
+
+       ret = __arm_smmu_alloc_bitmap(smmu->vmid_map, 0, ARM_SMMU_NUM_VMIDS);
+       if (IS_ERR_VALUE(ret))
+               return ret;
+
+       root_cfg->vmid = ret;
+       if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
+               /*
+                * We will likely want to change this if/when KVM gets
+                * involved.
+                */
+               root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+               start = smmu->num_s2_context_banks;
+       } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) {
+               root_cfg->cbar = CBAR_TYPE_S2_TRANS;
+               start = 0;
+       } else {
+               root_cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
+               start = smmu->num_s2_context_banks;
+       }
+
+       ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
+                                     smmu->num_context_banks);
+       if (IS_ERR_VALUE(ret))
+               goto out_free_vmid;
+
+       root_cfg->cbndx = ret;
+
+       if (smmu->version == 1) {
+               root_cfg->irptndx = atomic_inc_return(&smmu->irptndx);
+               root_cfg->irptndx %= smmu->num_context_irqs;
+       } else {
+               root_cfg->irptndx = root_cfg->cbndx;
+       }
+
+       irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
+       ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
+                         "arm-smmu-context-fault", domain);
+       if (IS_ERR_VALUE(ret)) {
+               dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
+                       root_cfg->irptndx, irq);
+               root_cfg->irptndx = -1;
+               goto out_free_context;
+       }
+
+       root_cfg->smmu = smmu;
+       arm_smmu_init_context_bank(smmu_domain);
+       return ret;
+
+out_free_context:
+       __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
+out_free_vmid:
+       __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
+       return ret;
+}
+
+static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
+{
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       struct arm_smmu_device *smmu = root_cfg->smmu;
+       int irq;
+
+       if (!smmu)
+               return;
+
+       if (root_cfg->irptndx != -1) {
+               irq = smmu->irqs[smmu->num_global_irqs + root_cfg->irptndx];
+               free_irq(irq, domain);
+       }
+
+       __arm_smmu_free_bitmap(smmu->vmid_map, root_cfg->vmid);
+       __arm_smmu_free_bitmap(smmu->context_map, root_cfg->cbndx);
+}
+
+static int arm_smmu_domain_init(struct iommu_domain *domain)
+{
+       struct arm_smmu_domain *smmu_domain;
+       pgd_t *pgd;
+
+       /*
+        * Allocate the domain and initialise some of its data structures.
+        * We can't really do anything meaningful until we've added a
+        * master.
+        */
+       smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
+       if (!smmu_domain)
+               return -ENOMEM;
+
+       pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+       if (!pgd)
+               goto out_free_domain;
+       smmu_domain->root_cfg.pgd = pgd;
+
+       spin_lock_init(&smmu_domain->lock);
+       domain->priv = smmu_domain;
+       return 0;
+
+out_free_domain:
+       kfree(smmu_domain);
+       return -ENOMEM;
+}
+
+static void arm_smmu_free_ptes(pmd_t *pmd)
+{
+       pgtable_t table = pmd_pgtable(*pmd);
+       pgtable_page_dtor(table);
+       __free_page(table);
+}
+
+static void arm_smmu_free_pmds(pud_t *pud)
+{
+       int i;
+       pmd_t *pmd, *pmd_base = pmd_offset(pud, 0);
+
+       pmd = pmd_base;
+       for (i = 0; i < PTRS_PER_PMD; ++i) {
+               if (pmd_none(*pmd))
+                       continue;
+
+               arm_smmu_free_ptes(pmd);
+               pmd++;
+       }
+
+       pmd_free(NULL, pmd_base);
+}
+
+static void arm_smmu_free_puds(pgd_t *pgd)
+{
+       int i;
+       pud_t *pud, *pud_base = pud_offset(pgd, 0);
+
+       pud = pud_base;
+       for (i = 0; i < PTRS_PER_PUD; ++i) {
+               if (pud_none(*pud))
+                       continue;
+
+               arm_smmu_free_pmds(pud);
+               pud++;
+       }
+
+       pud_free(NULL, pud_base);
+}
+
+static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain)
+{
+       int i;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       pgd_t *pgd, *pgd_base = root_cfg->pgd;
+
+       /*
+        * Recursively free the page tables for this domain. We don't
+        * care about speculative TLB filling, because the TLB will be
+        * nuked next time this context bank is re-allocated and no devices
+        * currently map to these tables.
+        */
+       pgd = pgd_base;
+       for (i = 0; i < PTRS_PER_PGD; ++i) {
+               if (pgd_none(*pgd))
+                       continue;
+               arm_smmu_free_puds(pgd);
+               pgd++;
+       }
+
+       kfree(pgd_base);
+}
+
+static void arm_smmu_domain_destroy(struct iommu_domain *domain)
+{
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       arm_smmu_destroy_domain_context(domain);
+       arm_smmu_free_pgtables(smmu_domain);
+       kfree(smmu_domain);
+}
+
+static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
+                                         struct arm_smmu_master *master)
+{
+       int i;
+       struct arm_smmu_smr *smrs;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+       if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
+               return 0;
+
+       if (master->smrs)
+               return -EEXIST;
+
+       smrs = kmalloc(sizeof(*smrs) * master->num_streamids, GFP_KERNEL);
+       if (!smrs) {
+               dev_err(smmu->dev, "failed to allocate %d SMRs for master %s\n",
+                       master->num_streamids, master->of_node->name);
+               return -ENOMEM;
+       }
+
+       /* Allocate the SMRs on the root SMMU */
+       for (i = 0; i < master->num_streamids; ++i) {
+               int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
+                                                 smmu->num_mapping_groups);
+               if (IS_ERR_VALUE(idx)) {
+                       dev_err(smmu->dev, "failed to allocate free SMR\n");
+                       goto err_free_smrs;
+               }
+
+               smrs[i] = (struct arm_smmu_smr) {
+                       .idx    = idx,
+                       .mask   = 0, /* We don't currently share SMRs */
+                       .id     = master->streamids[i],
+               };
+       }
+
+       /* It worked! Now, poke the actual hardware */
+       for (i = 0; i < master->num_streamids; ++i) {
+               u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
+                         smrs[i].mask << SMR_MASK_SHIFT;
+               writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
+       }
+
+       master->smrs = smrs;
+       return 0;
+
+err_free_smrs:
+       while (--i >= 0)
+               __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
+       kfree(smrs);
+       return -ENOSPC;
+}
+
+static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
+                                     struct arm_smmu_master *master)
+{
+       int i;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+       struct arm_smmu_smr *smrs = master->smrs;
+
+       /* Invalidate the SMRs before freeing back to the allocator */
+       for (i = 0; i < master->num_streamids; ++i) {
+               u8 idx = smrs[i].idx;
+               writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
+               __arm_smmu_free_bitmap(smmu->smr_map, idx);
+       }
+
+       master->smrs = NULL;
+       kfree(smrs);
+}
+
+static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu,
+                                          struct arm_smmu_master *master)
+{
+       int i;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+       for (i = 0; i < master->num_streamids; ++i) {
+               u16 sid = master->streamids[i];
+               writel_relaxed(S2CR_TYPE_BYPASS,
+                              gr0_base + ARM_SMMU_GR0_S2CR(sid));
+       }
+}
+
+static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
+                                     struct arm_smmu_master *master)
+{
+       int i, ret;
+       struct arm_smmu_device *parent, *smmu = smmu_domain->root_cfg.smmu;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+       ret = arm_smmu_master_configure_smrs(smmu, master);
+       if (ret)
+               return ret;
+
+       /* Bypass the leaves */
+       smmu = smmu_domain->leaf_smmu;
+       while ((parent = find_parent_smmu(smmu))) {
+               /*
+                * We won't have a StreamID match for anything but the root
+                * smmu, so we only need to worry about StreamID indexing,
+                * where we must install bypass entries in the S2CRs.
+                */
+               if (smmu->features & ARM_SMMU_FEAT_STREAM_MATCH)
+                       continue;
+
+               arm_smmu_bypass_stream_mapping(smmu, master);
+               smmu = parent;
+       }
+
+       /* Now we're at the root, time to point at our context bank */
+       for (i = 0; i < master->num_streamids; ++i) {
+               u32 idx, s2cr;
+               idx = master->smrs ? master->smrs[i].idx : master->streamids[i];
+               s2cr = (S2CR_TYPE_TRANS << S2CR_TYPE_SHIFT) |
+                      (smmu_domain->root_cfg.cbndx << S2CR_CBNDX_SHIFT);
+               writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
+       }
+
+       return 0;
+}
+
+static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
+                                         struct arm_smmu_master *master)
+{
+       struct arm_smmu_device *smmu = smmu_domain->root_cfg.smmu;
+
+       /*
+        * We *must* clear the S2CR first, because freeing the SMR means
+        * that it can be re-allocated immediately.
+        */
+       arm_smmu_bypass_stream_mapping(smmu, master);
+       arm_smmu_master_free_smrs(smmu, master);
+}
+
+static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+       int ret = -EINVAL;
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_device *device_smmu = dev->archdata.iommu;
+       struct arm_smmu_master *master;
+
+       if (!device_smmu) {
+               dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
+               return -ENXIO;
+       }
+
+       /*
+        * Sanity check the domain. We don't currently support domains
+        * that cross between different SMMU chains.
+        */
+       spin_lock(&smmu_domain->lock);
+       if (!smmu_domain->leaf_smmu) {
+               /* Now that we have a master, we can finalise the domain */
+               ret = arm_smmu_init_domain_context(domain, dev);
+               if (IS_ERR_VALUE(ret))
+                       goto err_unlock;
+
+               smmu_domain->leaf_smmu = device_smmu;
+       } else if (smmu_domain->leaf_smmu != device_smmu) {
+               dev_err(dev,
+                       "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
+                       dev_name(smmu_domain->leaf_smmu->dev),
+                       dev_name(device_smmu->dev));
+               goto err_unlock;
+       }
+       spin_unlock(&smmu_domain->lock);
+
+       /* Looks ok, so add the device to the domain */
+       master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
+       if (!master)
+               return -ENODEV;
+
+       return arm_smmu_domain_add_master(smmu_domain, master);
+
+err_unlock:
+       spin_unlock(&smmu_domain->lock);
+       return ret;
+}
+
+static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
+{
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_master *master;
+
+       master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
+       if (master)
+               arm_smmu_domain_remove_master(smmu_domain, master);
+}
+
+static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
+                                  size_t size)
+{
+       unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+       /*
+        * If the SMMU can't walk tables in the CPU caches, treat them
+        * like non-coherent DMA since we need to flush the new entries
+        * all the way out to memory. There's no possibility of recursion
+        * here as the SMMU table walker will not be wired through another
+        * SMMU.
+        */
+       if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK))
+               dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
+                            DMA_TO_DEVICE);
+}
+
+static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
+                                            unsigned long end)
+{
+       return !(addr & ~ARM_SMMU_PTE_CONT_MASK) &&
+               (addr + ARM_SMMU_PTE_CONT_SIZE <= end);
+}
+
+static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
+                                  unsigned long addr, unsigned long end,
+                                  unsigned long pfn, int flags, int stage)
+{
+       pte_t *pte, *start;
+       pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
+
+       if (pmd_none(*pmd)) {
+               /* Allocate a new set of tables */
+               pgtable_t table = alloc_page(PGALLOC_GFP);
+               if (!table)
+                       return -ENOMEM;
+
+               arm_smmu_flush_pgtable(smmu, page_address(table),
+                                      ARM_SMMU_PTE_HWTABLE_SIZE);
+               pgtable_page_ctor(table);
+               pmd_populate(NULL, pmd, table);
+               arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd));
+       }
+
+       if (stage == 1) {
+               pteval |= ARM_SMMU_PTE_AP_UNPRIV;
+               if (!(flags & IOMMU_WRITE) && (flags & IOMMU_READ))
+                       pteval |= ARM_SMMU_PTE_AP_RDONLY;
+
+               if (flags & IOMMU_CACHE)
+                       pteval |= (MAIR_ATTR_IDX_CACHE <<
+                                  ARM_SMMU_PTE_ATTRINDX_SHIFT);
+       } else {
+               pteval |= ARM_SMMU_PTE_HAP_FAULT;
+               if (flags & IOMMU_READ)
+                       pteval |= ARM_SMMU_PTE_HAP_READ;
+               if (flags & IOMMU_WRITE)
+                       pteval |= ARM_SMMU_PTE_HAP_WRITE;
+               if (flags & IOMMU_CACHE)
+                       pteval |= ARM_SMMU_PTE_MEMATTR_OIWB;
+               else
+                       pteval |= ARM_SMMU_PTE_MEMATTR_NC;
+       }
+
+       /* If no access, create a faulting entry to avoid TLB fills */
+       if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
+               pteval &= ~ARM_SMMU_PTE_PAGE;
+
+       pteval |= ARM_SMMU_PTE_SH_IS;
+       start = pmd_page_vaddr(*pmd) + pte_index(addr);
+       pte = start;
+
+       /*
+        * Install the page table entries. This is fairly complicated
+        * since we attempt to make use of the contiguous hint in the
+        * ptes where possible. The contiguous hint indicates a series
+        * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
+        * contiguous region with the following constraints:
+        *
+        *   - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
+        *   - Each pte in the region has the contiguous hint bit set
+        *
+        * This complicates unmapping (also handled by this code, when
+        * neither IOMMU_READ or IOMMU_WRITE are set) because it is
+        * possible, yet highly unlikely, that a client may unmap only
+        * part of a contiguous range. This requires clearing of the
+        * contiguous hint bits in the range before installing the new
+        * faulting entries.
+        *
+        * Note that re-mapping an address range without first unmapping
+        * it is not supported, so TLB invalidation is not required here
+        * and is instead performed at unmap and domain-init time.
+        */
+       do {
+               int i = 1;
+               pteval &= ~ARM_SMMU_PTE_CONT;
+
+               if (arm_smmu_pte_is_contiguous_range(addr, end)) {
+                       i = ARM_SMMU_PTE_CONT_ENTRIES;
+                       pteval |= ARM_SMMU_PTE_CONT;
+               } else if (pte_val(*pte) &
+                          (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) {
+                       int j;
+                       pte_t *cont_start;
+                       unsigned long idx = pte_index(addr);
+
+                       idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1);
+                       cont_start = pmd_page_vaddr(*pmd) + idx;
+                       for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j)
+                               pte_val(*(cont_start + j)) &= ~ARM_SMMU_PTE_CONT;
+
+                       arm_smmu_flush_pgtable(smmu, cont_start,
+                                              sizeof(*pte) *
+                                              ARM_SMMU_PTE_CONT_ENTRIES);
+               }
+
+               do {
+                       *pte = pfn_pte(pfn, __pgprot(pteval));
+               } while (pte++, pfn++, addr += PAGE_SIZE, --i);
+       } while (addr != end);
+
+       arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start));
+       return 0;
+}
+
+static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
+                                  unsigned long addr, unsigned long end,
+                                  phys_addr_t phys, int flags, int stage)
+{
+       int ret;
+       pmd_t *pmd;
+       unsigned long next, pfn = __phys_to_pfn(phys);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+       if (pud_none(*pud)) {
+               pmd = pmd_alloc_one(NULL, addr);
+               if (!pmd)
+                       return -ENOMEM;
+       } else
+#endif
+               pmd = pmd_offset(pud, addr);
+
+       do {
+               next = pmd_addr_end(addr, end);
+               ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
+                                             flags, stage);
+               pud_populate(NULL, pud, pmd);
+               arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
+               phys += next - addr;
+       } while (pmd++, addr = next, addr < end);
+
+       return ret;
+}
+
+static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
+                                  unsigned long addr, unsigned long end,
+                                  phys_addr_t phys, int flags, int stage)
+{
+       int ret = 0;
+       pud_t *pud;
+       unsigned long next;
+
+#ifndef __PAGETABLE_PUD_FOLDED
+       if (pgd_none(*pgd)) {
+               pud = pud_alloc_one(NULL, addr);
+               if (!pud)
+                       return -ENOMEM;
+       } else
+#endif
+               pud = pud_offset(pgd, addr);
+
+       do {
+               next = pud_addr_end(addr, end);
+               ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
+                                             flags, stage);
+               pgd_populate(NULL, pud, pgd);
+               arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
+               phys += next - addr;
+       } while (pud++, addr = next, addr < end);
+
+       return ret;
+}
+
+static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
+                                  unsigned long iova, phys_addr_t paddr,
+                                  size_t size, int flags)
+{
+       int ret, stage;
+       unsigned long end;
+       phys_addr_t input_mask, output_mask;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       pgd_t *pgd = root_cfg->pgd;
+       struct arm_smmu_device *smmu = root_cfg->smmu;
+
+       if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
+               stage = 2;
+               output_mask = (1ULL << smmu->s2_output_size) - 1;
+       } else {
+               stage = 1;
+               output_mask = (1ULL << smmu->s1_output_size) - 1;
+       }
+
+       if (!pgd)
+               return -EINVAL;
+
+       if (size & ~PAGE_MASK)
+               return -EINVAL;
+
+       input_mask = (1ULL << smmu->input_size) - 1;
+       if ((phys_addr_t)iova & ~input_mask)
+               return -ERANGE;
+
+       if (paddr & ~output_mask)
+               return -ERANGE;
+
+       spin_lock(&smmu_domain->lock);
+       pgd += pgd_index(iova);
+       end = iova + size;
+       do {
+               unsigned long next = pgd_addr_end(iova, end);
+
+               ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr,
+                                             flags, stage);
+               if (ret)
+                       goto out_unlock;
+
+               paddr += next - iova;
+               iova = next;
+       } while (pgd++, iova != end);
+
+out_unlock:
+       spin_unlock(&smmu_domain->lock);
+
+       /* Ensure new page tables are visible to the hardware walker */
+       if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+               dsb();
+
+       return ret;
+}
+
+static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
+                       phys_addr_t paddr, size_t size, int flags)
+{
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_device *smmu = smmu_domain->leaf_smmu;
+
+       if (!smmu_domain || !smmu)
+               return -ENODEV;
+
+       /* Check for silent address truncation up the SMMU chain. */
+       if ((phys_addr_t)iova & ~smmu_domain->output_mask)
+               return -ERANGE;
+
+       return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, flags);
+}
+
+static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
+                            size_t size)
+{
+       int ret;
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       struct arm_smmu_device *smmu = root_cfg->smmu;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+       ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0);
+       writel_relaxed(root_cfg->vmid, gr0_base + ARM_SMMU_GR0_TLBIVMID);
+       arm_smmu_tlb_sync(smmu);
+       return ret ? ret : size;
+}
+
+static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+                                        dma_addr_t iova)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+       struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
+       struct arm_smmu_device *smmu = root_cfg->smmu;
+
+       spin_lock(&smmu_domain->lock);
+       pgd = root_cfg->pgd;
+       if (!pgd)
+               goto err_unlock;
+
+       pgd += pgd_index(iova);
+       if (pgd_none_or_clear_bad(pgd))
+               goto err_unlock;
+
+       pud = pud_offset(pgd, iova);
+       if (pud_none_or_clear_bad(pud))
+               goto err_unlock;
+
+       pmd = pmd_offset(pud, iova);
+       if (pmd_none_or_clear_bad(pmd))
+               goto err_unlock;
+
+       pte = pmd_page_vaddr(*pmd) + pte_index(iova);
+       if (pte_none(pte))
+               goto err_unlock;
+
+       spin_unlock(&smmu_domain->lock);
+       return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
+
+err_unlock:
+       spin_unlock(&smmu_domain->lock);
+       dev_warn(smmu->dev,
+                "invalid (corrupt?) page tables detected for iova 0x%llx\n",
+                (unsigned long long)iova);
+       return -EINVAL;
+}
+
+static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
+                                  unsigned long cap)
+{
+       unsigned long caps = 0;
+       struct arm_smmu_domain *smmu_domain = domain->priv;
+
+       if (smmu_domain->root_cfg.smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+               caps |= IOMMU_CAP_CACHE_COHERENCY;
+
+       return !!(cap & caps);
+}
+
+static int arm_smmu_add_device(struct device *dev)
+{
+       struct arm_smmu_device *child, *parent, *smmu;
+       struct arm_smmu_master *master = NULL;
+
+       spin_lock(&arm_smmu_devices_lock);
+       list_for_each_entry(parent, &arm_smmu_devices, list) {
+               smmu = parent;
+
+               /* Try to find a child of the current SMMU. */
+               list_for_each_entry(child, &arm_smmu_devices, list) {
+                       if (child->parent_of_node == parent->dev->of_node) {
+                               /* Does the child sit above our master? */
+                               master = find_smmu_master(child, dev->of_node);
+                               if (master) {
+                                       smmu = NULL;
+                                       break;
+                               }
+                       }
+               }
+
+               /* We found some children, so keep searching. */
+               if (!smmu) {
+                       master = NULL;
+                       continue;
+               }
+
+               master = find_smmu_master(smmu, dev->of_node);
+               if (master)
+                       break;
+       }
+       spin_unlock(&arm_smmu_devices_lock);
+
+       if (!master)
+               return -ENODEV;
+
+       dev->archdata.iommu = smmu;
+       return 0;
+}
+
+static void arm_smmu_remove_device(struct device *dev)
+{
+       dev->archdata.iommu = NULL;
+}
+
+static struct iommu_ops arm_smmu_ops = {
+       .domain_init    = arm_smmu_domain_init,
+       .domain_destroy = arm_smmu_domain_destroy,
+       .attach_dev     = arm_smmu_attach_dev,
+       .detach_dev     = arm_smmu_detach_dev,
+       .map            = arm_smmu_map,
+       .unmap          = arm_smmu_unmap,
+       .iova_to_phys   = arm_smmu_iova_to_phys,
+       .domain_has_cap = arm_smmu_domain_has_cap,
+       .add_device     = arm_smmu_add_device,
+       .remove_device  = arm_smmu_remove_device,
+       .pgsize_bitmap  = (SECTION_SIZE |
+                          ARM_SMMU_PTE_CONT_SIZE |
+                          PAGE_SIZE),
+};
+
+static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
+{
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+       int i = 0;
+       u32 scr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sCR0);
+
+       /* Mark all SMRn as invalid and all S2CRn as bypass */
+       for (i = 0; i < smmu->num_mapping_groups; ++i) {
+               writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i));
+               writel_relaxed(S2CR_TYPE_BYPASS, gr0_base + ARM_SMMU_GR0_S2CR(i));
+       }
+
+       /* Invalidate the TLB, just in case */
+       writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL);
+       writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
+       writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
+
+       /* Enable fault reporting */
+       scr0 |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
+
+       /* Disable TLB broadcasting. */
+       scr0 |= (sCR0_VMIDPNE | sCR0_PTM);
+
+       /* Enable client access, but bypass when no mapping is found */
+       scr0 &= ~(sCR0_CLIENTPD | sCR0_USFCFG);
+
+       /* Disable forced broadcasting */
+       scr0 &= ~sCR0_FB;
+
+       /* Don't upgrade barriers */
+       scr0 &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
+
+       /* Push the button */
+       arm_smmu_tlb_sync(smmu);
+       writel(scr0, gr0_base + ARM_SMMU_GR0_sCR0);
+}
+
+static int arm_smmu_id_size_to_bits(int size)
+{
+       switch (size) {
+       case 0:
+               return 32;
+       case 1:
+               return 36;
+       case 2:
+               return 40;
+       case 3:
+               return 42;
+       case 4:
+               return 44;
+       case 5:
+       default:
+               return 48;
+       }
+}
+
+static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
+{
+       unsigned long size;
+       void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+       u32 id;
+
+       dev_notice(smmu->dev, "probing hardware configuration...\n");
+
+       /* Primecell ID */
+       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_PIDR2);
+       smmu->version = ((id >> PIDR2_ARCH_SHIFT) & PIDR2_ARCH_MASK) + 1;
+       dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
+
+       /* ID0 */
+       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
+#ifndef CONFIG_64BIT
+       if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) {
+               dev_err(smmu->dev, "\tno v7 descriptor support!\n");
+               return -ENODEV;
+       }
+#endif
+       if (id & ID0_S1TS) {
+               smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
+               dev_notice(smmu->dev, "\tstage 1 translation\n");
+       }
+
+       if (id & ID0_S2TS) {
+               smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
+               dev_notice(smmu->dev, "\tstage 2 translation\n");
+       }
+
+       if (id & ID0_NTS) {
+               smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
+               dev_notice(smmu->dev, "\tnested translation\n");
+       }
+
+       if (!(smmu->features &
+               (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 |
+                ARM_SMMU_FEAT_TRANS_NESTED))) {
+               dev_err(smmu->dev, "\tno translation support!\n");
+               return -ENODEV;
+       }
+
+       if (id & ID0_CTTW) {
+               smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
+               dev_notice(smmu->dev, "\tcoherent table walk\n");
+       }
+
+       if (id & ID0_SMS) {
+               u32 smr, sid, mask;
+
+               smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
+               smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
+                                          ID0_NUMSMRG_MASK;
+               if (smmu->num_mapping_groups == 0) {
+                       dev_err(smmu->dev,
+                               "stream-matching supported, but no SMRs present!\n");
+                       return -ENODEV;
+               }
+
+               smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
+               smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
+               writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+               smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+
+               mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
+               sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
+               if ((mask & sid) != sid) {
+                       dev_err(smmu->dev,
+                               "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
+                               mask, sid);
+                       return -ENODEV;
+               }
+
+               dev_notice(smmu->dev,
+                          "\tstream matching with %u register groups, mask 0x%x",
+                          smmu->num_mapping_groups, mask);
+       }
+
+       /* ID1 */
+       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
+       smmu->pagesize = (id & ID1_PAGESIZE) ? SZ_64K : SZ_4K;
+
+       /* Check that we ioremapped enough */
+       size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
+       size *= (smmu->pagesize << 1);
+       if (smmu->size < size)
+               dev_warn(smmu->dev,
+                        "device is 0x%lx bytes but only mapped 0x%lx!\n",
+                        size, smmu->size);
+
+       smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) &
+                                     ID1_NUMS2CB_MASK;
+       smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
+       if (smmu->num_s2_context_banks > smmu->num_context_banks) {
+               dev_err(smmu->dev, "impossible number of S2 context banks!\n");
+               return -ENODEV;
+       }
+       dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
+                  smmu->num_context_banks, smmu->num_s2_context_banks);
+
+       /* ID2 */
+       id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
+       size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
+
+       /*
+        * Stage-1 output limited by stage-2 input size due to pgd
+        * allocation (PTRS_PER_PGD).
+        */
+#ifdef CONFIG_64BIT
+       /* Current maximum output size of 39 bits */
+       smmu->s1_output_size = min(39UL, size);
+#else
+       smmu->s1_output_size = min(32UL, size);
+#endif
+
+       /* The stage-2 output mask is also applied for bypass */
+       size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
+       smmu->s2_output_size = min((unsigned long)PHYS_MASK_SHIFT, size);
+
+       if (smmu->version == 1) {
+               smmu->input_size = 32;
+       } else {
+#ifdef CONFIG_64BIT
+               size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
+               size = min(39, arm_smmu_id_size_to_bits(size));
+#else
+               size = 32;
+#endif
+               smmu->input_size = size;
+
+               if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) ||
+                   (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) ||
+                   (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) {
+                       dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n",
+                               PAGE_SIZE);
+                       return -ENODEV;
+               }
+       }
+
+       dev_notice(smmu->dev,
+                  "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n",
+                  smmu->input_size, smmu->s1_output_size, smmu->s2_output_size);
+       return 0;
+}
+
+static int arm_smmu_device_dt_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       struct arm_smmu_device *smmu;
+       struct device_node *dev_node;
+       struct device *dev = &pdev->dev;
+       struct rb_node *node;
+       struct of_phandle_args masterspec;
+       int num_irqs, i, err;
+
+       smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
+       if (!smmu) {
+               dev_err(dev, "failed to allocate arm_smmu_device\n");
+               return -ENOMEM;
+       }
+       smmu->dev = dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(dev, "missing base address/size\n");
+               return -ENODEV;
+       }
+
+       smmu->size = resource_size(res);
+       smmu->base = devm_request_and_ioremap(dev, res);
+       if (!smmu->base)
+               return -EADDRNOTAVAIL;
+
+       if (of_property_read_u32(dev->of_node, "#global-interrupts",
+                                &smmu->num_global_irqs)) {
+               dev_err(dev, "missing #global-interrupts property\n");
+               return -ENODEV;
+       }
+
+       num_irqs = 0;
+       while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
+               num_irqs++;
+               if (num_irqs > smmu->num_global_irqs)
+                       smmu->num_context_irqs++;
+       }
+
+       if (num_irqs < smmu->num_global_irqs) {
+               dev_warn(dev, "found %d interrupts but expected at least %d\n",
+                        num_irqs, smmu->num_global_irqs);
+               smmu->num_global_irqs = num_irqs;
+       }
+       smmu->num_context_irqs = num_irqs - smmu->num_global_irqs;
+
+       smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
+                                 GFP_KERNEL);
+       if (!smmu->irqs) {
+               dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < num_irqs; ++i) {
+               int irq = platform_get_irq(pdev, i);
+               if (irq < 0) {
+                       dev_err(dev, "failed to get irq index %d\n", i);
+                       return -ENODEV;
+               }
+               smmu->irqs[i] = irq;
+       }
+
+       i = 0;
+       smmu->masters = RB_ROOT;
+       while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
+                                          "#stream-id-cells", i,
+                                          &masterspec)) {
+               err = register_smmu_master(smmu, dev, &masterspec);
+               if (err) {
+                       dev_err(dev, "failed to add master %s\n",
+                               masterspec.np->name);
+                       goto out_put_masters;
+               }
+
+               i++;
+       }
+       dev_notice(dev, "registered %d master devices\n", i);
+
+       if ((dev_node = of_parse_phandle(dev->of_node, "smmu-parent", 0)))
+               smmu->parent_of_node = dev_node;
+
+       err = arm_smmu_device_cfg_probe(smmu);
+       if (err)
+               goto out_put_parent;
+
+       if (smmu->version > 1 &&
+           smmu->num_context_banks != smmu->num_context_irqs) {
+               dev_err(dev,
+                       "found only %d context interrupt(s) but %d required\n",
+                       smmu->num_context_irqs, smmu->num_context_banks);
+               goto out_put_parent;
+       }
+
+       arm_smmu_device_reset(smmu);
+
+       for (i = 0; i < smmu->num_global_irqs; ++i) {
+               err = request_irq(smmu->irqs[i],
+                                 arm_smmu_global_fault,
+                                 IRQF_SHARED,
+                                 "arm-smmu global fault",
+                                 smmu);
+               if (err) {
+                       dev_err(dev, "failed to request global IRQ %d (%u)\n",
+                               i, smmu->irqs[i]);
+                       goto out_free_irqs;
+               }
+       }
+
+       INIT_LIST_HEAD(&smmu->list);
+       spin_lock(&arm_smmu_devices_lock);
+       list_add(&smmu->list, &arm_smmu_devices);
+       spin_unlock(&arm_smmu_devices_lock);
+       return 0;
+
+out_free_irqs:
+       while (i--)
+               free_irq(smmu->irqs[i], smmu);
+
+out_put_parent:
+       if (smmu->parent_of_node)
+               of_node_put(smmu->parent_of_node);
+
+out_put_masters:
+       for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
+               struct arm_smmu_master *master;
+               master = container_of(node, struct arm_smmu_master, node);
+               of_node_put(master->of_node);
+       }
+
+       return err;
+}
+
+static int arm_smmu_device_remove(struct platform_device *pdev)
+{
+       int i;
+       struct device *dev = &pdev->dev;
+       struct arm_smmu_device *curr, *smmu = NULL;
+       struct rb_node *node;
+
+       spin_lock(&arm_smmu_devices_lock);
+       list_for_each_entry(curr, &arm_smmu_devices, list) {
+               if (curr->dev == dev) {
+                       smmu = curr;
+                       list_del(&smmu->list);
+                       break;
+               }
+       }
+       spin_unlock(&arm_smmu_devices_lock);
+
+       if (!smmu)
+               return -ENODEV;
+
+       if (smmu->parent_of_node)
+               of_node_put(smmu->parent_of_node);
+
+       for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
+               struct arm_smmu_master *master;
+               master = container_of(node, struct arm_smmu_master, node);
+               of_node_put(master->of_node);
+       }
+
+       if (!bitmap_empty(smmu->vmid_map, ARM_SMMU_NUM_VMIDS))
+               dev_err(dev, "removing device with active domains!\n");
+
+       for (i = 0; i < smmu->num_global_irqs; ++i)
+               free_irq(smmu->irqs[i], smmu);
+
+       /* Turn the thing off */
+       writel(sCR0_CLIENTPD, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_sCR0);
+       return 0;
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id arm_smmu_of_match[] = {
+       { .compatible = "arm,smmu-v1", },
+       { .compatible = "arm,smmu-v2", },
+       { .compatible = "arm,mmu-400", },
+       { .compatible = "arm,mmu-500", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
+#endif
+
+static struct platform_driver arm_smmu_driver = {
+       .driver = {
+               .owner          = THIS_MODULE,
+               .name           = "arm-smmu",
+               .of_match_table = of_match_ptr(arm_smmu_of_match),
+       },
+       .probe  = arm_smmu_device_dt_probe,
+       .remove = arm_smmu_device_remove,
+};
+
+static int __init arm_smmu_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&arm_smmu_driver);
+       if (ret)
+               return ret;
+
+       /* Oh, for a proper bus abstraction */
+       if (!iommu_present(&platform_bus_type));
+               bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+
+       if (!iommu_present(&amba_bustype));
+               bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+
+       return 0;
+}
+
+static void __exit arm_smmu_exit(void)
+{
+       return platform_driver_unregister(&arm_smmu_driver);
+}
+
+module_init(arm_smmu_init);
+module_exit(arm_smmu_exit);
+
+MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
+MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
+MODULE_LICENSE("GPL v2");
index d8f98b1..fbe9ca7 100644 (file)
@@ -754,6 +754,38 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
 }
 EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
 
+static size_t iommu_pgsize(struct iommu_domain *domain,
+                          unsigned long addr_merge, size_t size)
+{
+       unsigned int pgsize_idx;
+       size_t pgsize;
+
+       /* Max page size that still fits into 'size' */
+       pgsize_idx = __fls(size);
+
+       /* need to consider alignment requirements ? */
+       if (likely(addr_merge)) {
+               /* Max page size allowed by address */
+               unsigned int align_pgsize_idx = __ffs(addr_merge);
+               pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+       }
+
+       /* build a mask of acceptable page sizes */
+       pgsize = (1UL << (pgsize_idx + 1)) - 1;
+
+       /* throw away page sizes not supported by the hardware */
+       pgsize &= domain->ops->pgsize_bitmap;
+
+       /* make sure we're still sane */
+       BUG_ON(!pgsize);
+
+       /* pick the biggest page */
+       pgsize_idx = __fls(pgsize);
+       pgsize = 1UL << pgsize_idx;
+
+       return pgsize;
+}
+
 int iommu_map(struct iommu_domain *domain, unsigned long iova,
              phys_addr_t paddr, size_t size, int prot)
 {
@@ -775,45 +807,18 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
         * size of the smallest page supported by the hardware
         */
        if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
-               pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
-                       "0x%x\n", iova, (unsigned long)paddr,
-                       (unsigned long)size, min_pagesz);
+               pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n",
+                      iova, &paddr, size, min_pagesz);
                return -EINVAL;
        }
 
-       pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
-                               (unsigned long)paddr, (unsigned long)size);
+       pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size);
 
        while (size) {
-               unsigned long pgsize, addr_merge = iova | paddr;
-               unsigned int pgsize_idx;
-
-               /* Max page size that still fits into 'size' */
-               pgsize_idx = __fls(size);
-
-               /* need to consider alignment requirements ? */
-               if (likely(addr_merge)) {
-                       /* Max page size allowed by both iova and paddr */
-                       unsigned int align_pgsize_idx = __ffs(addr_merge);
-
-                       pgsize_idx = min(pgsize_idx, align_pgsize_idx);
-               }
-
-               /* build a mask of acceptable page sizes */
-               pgsize = (1UL << (pgsize_idx + 1)) - 1;
-
-               /* throw away page sizes not supported by the hardware */
-               pgsize &= domain->ops->pgsize_bitmap;
-
-               /* make sure we're still sane */
-               BUG_ON(!pgsize);
-
-               /* pick the biggest page */
-               pgsize_idx = __fls(pgsize);
-               pgsize = 1UL << pgsize_idx;
+               size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
 
-               pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
-                                       (unsigned long)paddr, pgsize);
+               pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n",
+                        iova, &paddr, pgsize);
 
                ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
                if (ret)
@@ -850,27 +855,26 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
         * by the hardware
         */
        if (!IS_ALIGNED(iova | size, min_pagesz)) {
-               pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
-                                       iova, (unsigned long)size, min_pagesz);
+               pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+                      iova, size, min_pagesz);
                return -EINVAL;
        }
 
-       pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
-                                                       (unsigned long)size);
+       pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
 
        /*
         * Keep iterating until we either unmap 'size' bytes (or more)
         * or we hit an area that isn't mapped.
         */
        while (unmapped < size) {
-               size_t left = size - unmapped;
+               size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
 
-               unmapped_page = domain->ops->unmap(domain, iova, left);
+               unmapped_page = domain->ops->unmap(domain, iova, pgsize);
                if (!unmapped_page)
                        break;
 
-               pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
-                                       (unsigned long)unmapped_page);
+               pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
+                        iova, unmapped_page);
 
                iova += unmapped_page;
                unmapped += unmapped_page;
index e02e5d7..0ba3766 100644 (file)
@@ -833,16 +833,15 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
        iopgd = iopgd_offset(obj, da);
 
        if (!iopgd_is_table(*iopgd)) {
-               dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
-                       "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
+               dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
+                               obj->name, errs, da, iopgd, *iopgd);
                return IRQ_NONE;
        }
 
        iopte = iopte_offset(iopgd, da);
 
-       dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
-               "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
-               iopte, *iopte);
+       dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
+                       obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
 
        return IRQ_NONE;
 }
@@ -1235,14 +1234,16 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
                else if (iopte_is_large(*pte))
                        ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
                else
-                       dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da);
+                       dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
+                                                       (unsigned long long)da);
        } else {
                if (iopgd_is_section(*pgd))
                        ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
                else if (iopgd_is_super(*pgd))
                        ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
                else
-                       dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da);
+                       dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
+                                                       (unsigned long long)da);
        }
 
        return ret;
index cd4ae9e..f4003d5 100644 (file)
@@ -95,4 +95,4 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
 #define iopte_offset(iopgd, da)        (iopgd_page_vaddr(iopgd) + iopte_index(da))
 
 #define to_iommu(dev)                                                  \
-       (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
+       ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
index 46d8756..d147259 100644 (file)
@@ -102,8 +102,8 @@ static size_t sgtable_len(const struct sg_table *sgt)
                }
 
                if (i && sg->offset) {
-                       pr_err("%s: sg[%d] offset not allowed in internal "
-                                       "entries\n", __func__, i);
+                       pr_err("%s: sg[%d] offset not allowed in internal entries\n",
+                               __func__, i);
                        return 0;
                }
 
index 1760ceb..19ceaa6 100644 (file)
@@ -705,7 +705,7 @@ static int gic_irq_domain_xlate(struct irq_domain *d,
 static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
                                        unsigned long action, void *hcpu)
 {
-       if (action == CPU_STARTING)
+       if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
                gic_cpu_init(&gic_data[0]);
        return NOTIFY_OK;
 }
index 7f5a7ca..8270388 100644 (file)
@@ -136,9 +136,9 @@ config DVB_NET
 
 # This Kconfig option is used by both PCI and USB drivers
 config TTPCI_EEPROM
-        tristate
-        depends on I2C
-        default n
+       tristate
+       depends on I2C
+       default n
 
 source "drivers/media/dvb-core/Kconfig"
 
@@ -189,6 +189,12 @@ config MEDIA_SUBDRV_AUTOSELECT
 
          If unsure say Y.
 
+config MEDIA_ATTACH
+       bool
+       depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
+       depends on MODULES
+       default MODULES
+
 source "drivers/media/i2c/Kconfig"
 source "drivers/media/tuners/Kconfig"
 source "drivers/media/dvb-frontends/Kconfig"
index cb52438..9eac531 100644 (file)
@@ -956,7 +956,7 @@ static int s5c73m3_oif_enum_frame_interval(struct v4l2_subdev *sd,
 
        if (fie->pad != OIF_SOURCE_PAD)
                return -EINVAL;
-       if (fie->index > ARRAY_SIZE(s5c73m3_intervals))
+       if (fie->index >= ARRAY_SIZE(s5c73m3_intervals))
                return -EINVAL;
 
        mutex_lock(&state->lock);
index 27d6262..aba5b1c 100644 (file)
@@ -615,7 +615,7 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
        int changed = 0;
        u32 old;
 
-       if (core->board.audio_chip == V4L2_IDENT_WM8775)
+       if (core->sd_wm8775)
                snd_cx88_wm8775_volume_put(kcontrol, value);
 
        left = value->value.integer.value[0] & 0x3f;
@@ -682,8 +682,7 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
                vol ^= bit;
                cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
                /* Pass mute onto any WM8775 */
-               if ((core->board.audio_chip == V4L2_IDENT_WM8775) &&
-                   ((1<<6) == bit))
+               if (core->sd_wm8775 && ((1<<6) == bit))
                        wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
                ret = 1;
        }
@@ -903,7 +902,7 @@ static int cx88_audio_initdev(struct pci_dev *pci,
                goto error;
 
        /* If there's a wm8775 then add a Line-In ALC switch */
-       if (core->board.audio_chip == V4L2_IDENT_WM8775)
+       if (core->sd_wm8775)
                snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
 
        strcpy (card->driver, "CX88x");
index 1b00615..c7a9be1 100644 (file)
@@ -385,8 +385,7 @@ int cx88_video_mux(struct cx88_core *core, unsigned int input)
                /* The wm8775 module has the "2" route hardwired into
                   the initialization. Some boards may use different
                   routes for different inputs. HVR-1300 surely does */
-               if (core->board.audio_chip &&
-                   core->board.audio_chip == V4L2_IDENT_WM8775) {
+               if (core->sd_wm8775) {
                        call_all(core, audio, s_routing,
                                 INPUT(input).audioroute, 0, 0);
                }
@@ -771,8 +770,7 @@ static int video_open(struct file *file)
                cx_write(MO_GP1_IO, core->board.radio.gpio1);
                cx_write(MO_GP2_IO, core->board.radio.gpio2);
                if (core->board.radio.audioroute) {
-                       if(core->board.audio_chip &&
-                               core->board.audio_chip == V4L2_IDENT_WM8775) {
+                       if (core->sd_wm8775) {
                                call_all(core, audio, s_routing,
                                        core->board.radio.audioroute, 0, 0);
                        }
@@ -959,7 +957,7 @@ static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl)
        u32 value,mask;
 
        /* Pass changes onto any WM8775 */
-       if (core->board.audio_chip == V4L2_IDENT_WM8775) {
+       if (core->sd_wm8775) {
                switch (ctrl->id) {
                case V4L2_CID_AUDIO_MUTE:
                        wm8775_s_ctrl(core, ctrl->id, ctrl->val);
index 48b8d7a..9d1481a 100644 (file)
@@ -576,6 +576,14 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
        return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
 }
 
+static int vidioc_create_bufs(struct file *file, void *priv,
+                             struct v4l2_create_buffers *create)
+{
+       struct coda_ctx *ctx = fh_to_ctx(priv);
+
+       return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create);
+}
+
 static int vidioc_streamon(struct file *file, void *priv,
                           enum v4l2_buf_type type)
 {
@@ -610,6 +618,7 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
 
        .vidioc_qbuf            = vidioc_qbuf,
        .vidioc_dqbuf           = vidioc_dqbuf,
+       .vidioc_create_bufs     = vidioc_create_bufs,
 
        .vidioc_streamon        = vidioc_streamon,
        .vidioc_streamoff       = vidioc_streamoff,
index 1802f11..d0b375c 100644 (file)
@@ -916,6 +916,21 @@ static int vpbe_display_s_fmt(struct file *file, void *priv,
        other video window */
 
        layer->pix_fmt = *pixfmt;
+       if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) {
+               struct vpbe_layer *otherlayer;
+
+               otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer);
+               /* if other layer is available, only
+                * claim it, do not configure it
+                */
+               ret = osd_device->ops.request_layer(osd_device,
+                                                   otherlayer->layer_info.id);
+               if (ret < 0) {
+                       v4l2_err(&vpbe_dev->v4l2_dev,
+                                "Display Manager failed to allocate layer\n");
+                       return -EBUSY;
+               }
+       }
 
        /* Get osd layer config */
        osd_device->ops.get_layer_config(osd_device,
index 8c50d30..9360909 100644 (file)
@@ -1837,7 +1837,7 @@ static int vpfe_probe(struct platform_device *pdev)
        if (NULL == ccdc_cfg) {
                v4l2_err(pdev->dev.driver,
                         "Memory allocation failed for ccdc_cfg\n");
-               goto probe_free_lock;
+               goto probe_free_dev_mem;
        }
 
        mutex_lock(&ccdc_lock);
@@ -1991,7 +1991,6 @@ probe_out_release_irq:
        free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
 probe_free_ccdc_cfg_mem:
        kfree(ccdc_cfg);
-probe_free_lock:
        mutex_unlock(&ccdc_lock);
 probe_free_dev_mem:
        kfree(vpfe_dev);
index b0ff67b..d05eaa2 100644 (file)
@@ -174,7 +174,7 @@ int fimc_is_hw_change_mode(struct fimc_is *is)
                HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO,
        };
 
-       if (WARN_ON(is->config_index > ARRAY_SIZE(cmd)))
+       if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd)))
                return -EINVAL;
 
        mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0));
index 47c6363..0741945 100644 (file)
@@ -48,7 +48,6 @@ static char *fimc_is_clocks[ISS_CLKS_MAX] = {
        [ISS_CLK_LITE0]                 = "lite0",
        [ISS_CLK_LITE1]                 = "lite1",
        [ISS_CLK_MPLL]                  = "mpll",
-       [ISS_CLK_SYSREG]                = "sysreg",
        [ISS_CLK_ISP]                   = "isp",
        [ISS_CLK_DRC]                   = "drc",
        [ISS_CLK_FD]                    = "fd",
@@ -71,7 +70,6 @@ static void fimc_is_put_clocks(struct fimc_is *is)
        for (i = 0; i < ISS_CLKS_MAX; i++) {
                if (IS_ERR(is->clocks[i]))
                        continue;
-               clk_unprepare(is->clocks[i]);
                clk_put(is->clocks[i]);
                is->clocks[i] = ERR_PTR(-EINVAL);
        }
@@ -90,12 +88,6 @@ static int fimc_is_get_clocks(struct fimc_is *is)
                        ret = PTR_ERR(is->clocks[i]);
                        goto err;
                }
-               ret = clk_prepare(is->clocks[i]);
-               if (ret < 0) {
-                       clk_put(is->clocks[i]);
-                       is->clocks[i] = ERR_PTR(-EINVAL);
-                       goto err;
-               }
        }
 
        return 0;
@@ -103,7 +95,7 @@ err:
        fimc_is_put_clocks(is);
        dev_err(&is->pdev->dev, "failed to get clock: %s\n",
                fimc_is_clocks[i]);
-       return -ENXIO;
+       return ret;
 }
 
 static int fimc_is_setup_clocks(struct fimc_is *is)
@@ -144,7 +136,7 @@ int fimc_is_enable_clocks(struct fimc_is *is)
        for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
                if (IS_ERR(is->clocks[i]))
                        continue;
-               ret = clk_enable(is->clocks[i]);
+               ret = clk_prepare_enable(is->clocks[i]);
                if (ret < 0) {
                        dev_err(&is->pdev->dev, "clock %s enable failed\n",
                                fimc_is_clocks[i]);
@@ -163,7 +155,7 @@ void fimc_is_disable_clocks(struct fimc_is *is)
 
        for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
                if (!IS_ERR(is->clocks[i])) {
-                       clk_disable(is->clocks[i]);
+                       clk_disable_unprepare(is->clocks[i]);
                        pr_debug("disabled clock: %s\n", fimc_is_clocks[i]);
                }
        }
@@ -326,6 +318,11 @@ int fimc_is_start_firmware(struct fimc_is *is)
        struct device *dev = &is->pdev->dev;
        int ret;
 
+       if (is->fw.f_w == NULL) {
+               dev_err(dev, "firmware is not loaded\n");
+               return -EINVAL;
+       }
+
        memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size);
        wmb();
 
@@ -837,23 +834,11 @@ static int fimc_is_probe(struct platform_device *pdev)
                goto err_clk;
        }
        pm_runtime_enable(dev);
-       /*
-        * Enable only the ISP power domain, keep FIMC-IS clocks off until
-        * the whole clock tree is configured. The ISP power domain needs
-        * be active in order to acces any CMU_ISP clock registers.
-        */
-       ret = pm_runtime_get_sync(dev);
-       if (ret < 0)
-               goto err_irq;
-
-       ret = fimc_is_setup_clocks(is);
-       pm_runtime_put_sync(dev);
 
+       ret = pm_runtime_get_sync(dev);
        if (ret < 0)
                goto err_irq;
 
-       is->clk_init = true;
-
        is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
        if (IS_ERR(is->alloc_ctx)) {
                ret = PTR_ERR(is->alloc_ctx);
@@ -875,6 +860,8 @@ static int fimc_is_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_dfs;
 
+       pm_runtime_put_sync(dev);
+
        dev_dbg(dev, "FIMC-IS registered successfully\n");
        return 0;
 
@@ -894,9 +881,11 @@ err_clk:
 static int fimc_is_runtime_resume(struct device *dev)
 {
        struct fimc_is *is = dev_get_drvdata(dev);
+       int ret;
 
-       if (!is->clk_init)
-               return 0;
+       ret = fimc_is_setup_clocks(is);
+       if (ret)
+               return ret;
 
        return fimc_is_enable_clocks(is);
 }
@@ -905,9 +894,7 @@ static int fimc_is_runtime_suspend(struct device *dev)
 {
        struct fimc_is *is = dev_get_drvdata(dev);
 
-       if (is->clk_init)
-               fimc_is_disable_clocks(is);
-
+       fimc_is_disable_clocks(is);
        return 0;
 }
 
@@ -941,7 +928,8 @@ static int fimc_is_remove(struct platform_device *pdev)
        vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
        fimc_is_put_clocks(is);
        fimc_is_debugfs_remove(is);
-       release_firmware(is->fw.f_w);
+       if (is->fw.f_w)
+               release_firmware(is->fw.f_w);
        fimc_is_free_cpu_memory(is);
 
        return 0;
index f5275a5..d7db133 100644 (file)
@@ -73,7 +73,6 @@ enum {
        ISS_CLK_LITE0,
        ISS_CLK_LITE1,
        ISS_CLK_MPLL,
-       ISS_CLK_SYSREG,
        ISS_CLK_ISP,
        ISS_CLK_DRC,
        ISS_CLK_FD,
@@ -265,7 +264,6 @@ struct fimc_is {
        spinlock_t                      slock;
 
        struct clk                      *clocks[ISS_CLKS_MAX];
-       bool                            clk_init;
        void __iomem                    *regs;
        void __iomem                    *pmu_regs;
        int                             irq;
index d63947f..7ede30b 100644 (file)
@@ -138,7 +138,7 @@ static int fimc_isp_subdev_get_fmt(struct v4l2_subdev *sd,
                return 0;
        }
 
-       mf->colorspace = V4L2_COLORSPACE_JPEG;
+       mf->colorspace = V4L2_COLORSPACE_SRGB;
 
        mutex_lock(&isp->subdev_lock);
        __is_get_frame_size(is, &cur_fmt);
@@ -194,7 +194,7 @@ static int fimc_isp_subdev_set_fmt(struct v4l2_subdev *sd,
        v4l2_dbg(1, debug, sd, "%s: pad%d: code: 0x%x, %dx%d\n",
                 __func__, fmt->pad, mf->code, mf->width, mf->height);
 
-       mf->colorspace = V4L2_COLORSPACE_JPEG;
+       mf->colorspace = V4L2_COLORSPACE_SRGB;
 
        mutex_lock(&isp->subdev_lock);
        __isp_subdev_try_format(isp, fmt);
index a2eda9d..254d70f 100644 (file)
@@ -746,7 +746,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
        node = v4l2_of_get_next_endpoint(node, NULL);
        if (!node) {
                dev_err(&pdev->dev, "No port node at %s\n",
-                                       node->full_name);
+                               pdev->dev.of_node->full_name);
                return -EINVAL;
        }
        /* Get port node and validate MIPI-CSI channel id. */
index 261134b..35d2fcd 100644 (file)
@@ -229,7 +229,7 @@ struct camif_vp {
        unsigned int            state;
        u16                     fmt_flags;
        u8                      id;
-       u                     rotation;
+       u16                     rotation;
        u8                      hflip;
        u8                      vflip;
        unsigned int            offset;
index ddc2900..d18cb5e 100644 (file)
@@ -1,2 +1,2 @@
 s5p-jpeg-objs := jpeg-core.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o
index 379008c..15f59b3 100644 (file)
@@ -1,4 +1,4 @@
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o
 s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o
 s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
 s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o
index 01f9ae0..d12faa6 100644 (file)
@@ -397,7 +397,7 @@ static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
 leave_handle_frame:
        spin_unlock_irqrestore(&dev->irqlock, flags);
        if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
-                                   || ctx->dst_queue_cnt < ctx->dpb_count)
+                                   || ctx->dst_queue_cnt < ctx->pb_count)
                clear_work_bit(ctx);
        s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
        wake_up_ctx(ctx, reason, err);
@@ -473,7 +473,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
 
                s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx);
 
-               ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
+               ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
                                dev);
                ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
                                dev);
@@ -562,7 +562,7 @@ static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
        struct s5p_mfc_dev *dev = ctx->dev;
        struct s5p_mfc_buf *mb_entry;
 
-       mfc_debug(2, "Stream completed");
+       mfc_debug(2, "Stream completed\n");
 
        s5p_mfc_clear_int_flags(dev);
        ctx->int_type = reason;
@@ -1362,7 +1362,6 @@ static struct s5p_mfc_variant mfc_drvdata_v5 = {
        .port_num       = MFC_NUM_PORTS,
        .buf_size       = &buf_size_v5,
        .buf_align      = &mfc_buf_align_v5,
-       .mclk_name      = "sclk_mfc",
        .fw_name        = "s5p-mfc.fw",
 };
 
@@ -1389,7 +1388,6 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = {
        .port_num       = MFC_NUM_PORTS_V6,
        .buf_size       = &buf_size_v6,
        .buf_align      = &mfc_buf_align_v6,
-       .mclk_name      = "aclk_333",
        .fw_name        = "s5p-mfc-v6.fw",
 };
 
index 202d1d7..ef4074c 100644 (file)
@@ -138,6 +138,7 @@ enum s5p_mfc_inst_state {
        MFCINST_INIT = 100,
        MFCINST_GOT_INST,
        MFCINST_HEAD_PARSED,
+       MFCINST_HEAD_PRODUCED,
        MFCINST_BUFS_SET,
        MFCINST_RUNNING,
        MFCINST_FINISHING,
@@ -231,7 +232,6 @@ struct s5p_mfc_variant {
        unsigned int port_num;
        struct s5p_mfc_buf_size *buf_size;
        struct s5p_mfc_buf_align *buf_align;
-       char    *mclk_name;
        char    *fw_name;
 };
 
@@ -438,7 +438,7 @@ struct s5p_mfc_enc_params {
        u32 rc_framerate_num;
        u32 rc_framerate_denom;
 
-       union {
+       struct {
                struct s5p_mfc_h264_enc_params h264;
                struct s5p_mfc_mpeg4_enc_params mpeg4;
        } codec;
@@ -602,7 +602,7 @@ struct s5p_mfc_ctx {
        int after_packed_pb;
        int sei_fp_parse;
 
-       int dpb_count;
+       int pb_count;
        int total_dpb_count;
        int mv_count;
        /* Buffers */
index 2e5f30b..dc1fc94 100644 (file)
@@ -38,7 +38,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
        dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size,
                                        &dev->bank1, GFP_KERNEL);
 
-       if (IS_ERR(dev->fw_virt_addr)) {
+       if (IS_ERR_OR_NULL(dev->fw_virt_addr)) {
                dev->fw_virt_addr = NULL;
                mfc_err("Allocating bitprocessor buffer failed\n");
                return -ENOMEM;
index bd5cd4a..8e608f5 100644 (file)
@@ -30,8 +30,8 @@ extern int debug;
 #define mfc_debug(level, fmt, args...)
 #endif
 
-#define mfc_debug_enter() mfc_debug(5, "enter")
-#define mfc_debug_leave() mfc_debug(5, "leave")
+#define mfc_debug_enter() mfc_debug(5, "enter\n")
+#define mfc_debug_leave() mfc_debug(5, "leave\n")
 
 #define mfc_err(fmt, args...)                          \
        do {                                            \
index 4af53bd..00b0703 100644 (file)
@@ -210,11 +210,11 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
        /* Context is to decode a frame */
        if (ctx->src_queue_cnt >= 1 &&
            ctx->state == MFCINST_RUNNING &&
-           ctx->dst_queue_cnt >= ctx->dpb_count)
+           ctx->dst_queue_cnt >= ctx->pb_count)
                return 1;
        /* Context is to return last frame */
        if (ctx->state == MFCINST_FINISHING &&
-           ctx->dst_queue_cnt >= ctx->dpb_count)
+           ctx->dst_queue_cnt >= ctx->pb_count)
                return 1;
        /* Context is to set buffers */
        if (ctx->src_queue_cnt >= 1 &&
@@ -224,7 +224,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
        /* Resolution change */
        if ((ctx->state == MFCINST_RES_CHANGE_INIT ||
                ctx->state == MFCINST_RES_CHANGE_FLUSH) &&
-               ctx->dst_queue_cnt >= ctx->dpb_count)
+               ctx->dst_queue_cnt >= ctx->pb_count)
                return 1;
        if (ctx->state == MFCINST_RES_CHANGE_END &&
                ctx->src_queue_cnt >= 1)
@@ -537,7 +537,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
                        mfc_err("vb2_reqbufs on capture failed\n");
                        return ret;
                }
-               if (reqbufs->count < ctx->dpb_count) {
+               if (reqbufs->count < ctx->pb_count) {
                        mfc_err("Not enough buffers allocated\n");
                        reqbufs->count = 0;
                        s5p_mfc_clock_on();
@@ -751,7 +751,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
        case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
                if (ctx->state >= MFCINST_HEAD_PARSED &&
                    ctx->state < MFCINST_ABORT) {
-                       ctrl->val = ctx->dpb_count;
+                       ctrl->val = ctx->pb_count;
                        break;
                } else if (ctx->state != MFCINST_INIT) {
                        v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
@@ -763,7 +763,7 @@ static int s5p_mfc_dec_g_v_ctrl(struct v4l2_ctrl *ctrl)
                                S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
                if (ctx->state >= MFCINST_HEAD_PARSED &&
                    ctx->state < MFCINST_ABORT) {
-                       ctrl->val = ctx->dpb_count;
+                       ctrl->val = ctx->pb_count;
                } else {
                        v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
                        return -EINVAL;
@@ -924,10 +924,10 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
                /* Output plane count is 2 - one for Y and one for CbCr */
                *plane_count = 2;
                /* Setup buffer count */
-               if (*buf_count < ctx->dpb_count)
-                       *buf_count = ctx->dpb_count;
-               if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB)
-                       *buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB;
+               if (*buf_count < ctx->pb_count)
+                       *buf_count = ctx->pb_count;
+               if (*buf_count > ctx->pb_count + MFC_MAX_EXTRA_DPB)
+                       *buf_count = ctx->pb_count + MFC_MAX_EXTRA_DPB;
                if (*buf_count > MFC_MAX_BUFFERS)
                        *buf_count = MFC_MAX_BUFFERS;
        } else {
index 4f6b553..2549967 100644 (file)
@@ -592,7 +592,7 @@ static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx)
                return 1;
        /* context is ready to encode a frame */
        if ((ctx->state == MFCINST_RUNNING ||
-               ctx->state == MFCINST_HEAD_PARSED) &&
+               ctx->state == MFCINST_HEAD_PRODUCED) &&
                ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
                return 1;
        /* context is ready to encode remaining frames */
@@ -649,6 +649,7 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
        struct s5p_mfc_enc_params *p = &ctx->enc_params;
        struct s5p_mfc_buf *dst_mb;
        unsigned long flags;
+       unsigned int enc_pb_count;
 
        if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) {
                spin_lock_irqsave(&dev->irqlock, flags);
@@ -661,18 +662,19 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
                vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE);
                spin_unlock_irqrestore(&dev->irqlock, flags);
        }
-       if (IS_MFCV6(dev)) {
-               ctx->state = MFCINST_HEAD_PARSED; /* for INIT_BUFFER cmd */
-       } else {
+
+       if (!IS_MFCV6(dev)) {
                ctx->state = MFCINST_RUNNING;
                if (s5p_mfc_ctx_ready(ctx))
                        set_work_bit_irqsave(ctx);
                s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
-       }
-
-       if (IS_MFCV6(dev))
-               ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops,
+       } else {
+               enc_pb_count = s5p_mfc_hw_call(dev->mfc_ops,
                                get_enc_dpb_count, dev);
+               if (ctx->pb_count < enc_pb_count)
+                       ctx->pb_count = enc_pb_count;
+               ctx->state = MFCINST_HEAD_PRODUCED;
+       }
 
        return 0;
 }
@@ -717,9 +719,9 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
 
        slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev);
        strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev);
-       mfc_debug(2, "Encoded slice type: %d", slice_type);
-       mfc_debug(2, "Encoded stream size: %d", strm_size);
-       mfc_debug(2, "Display order: %d",
+       mfc_debug(2, "Encoded slice type: %d\n", slice_type);
+       mfc_debug(2, "Encoded stream size: %d\n", strm_size);
+       mfc_debug(2, "Display order: %d\n",
                  mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT));
        spin_lock_irqsave(&dev->irqlock, flags);
        if (slice_type >= 0) {
@@ -1055,15 +1057,13 @@ static int vidioc_reqbufs(struct file *file, void *priv,
                }
                ctx->capture_state = QUEUE_BUFS_REQUESTED;
 
-               if (!IS_MFCV6(dev)) {
-                       ret = s5p_mfc_hw_call(ctx->dev->mfc_ops,
-                                       alloc_codec_buffers, ctx);
-                       if (ret) {
-                               mfc_err("Failed to allocate encoding buffers\n");
-                               reqbufs->count = 0;
-                               ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
-                               return -ENOMEM;
-                       }
+               ret = s5p_mfc_hw_call(ctx->dev->mfc_ops,
+                               alloc_codec_buffers, ctx);
+               if (ret) {
+                       mfc_err("Failed to allocate encoding buffers\n");
+                       reqbufs->count = 0;
+                       ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+                       return -ENOMEM;
                }
        } else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
                if (ctx->output_state != QUEUE_FREE) {
@@ -1071,6 +1071,19 @@ static int vidioc_reqbufs(struct file *file, void *priv,
                                                        ctx->output_state);
                        return -EINVAL;
                }
+
+               if (IS_MFCV6(dev)) {
+                       /* Check for min encoder buffers */
+                       if (ctx->pb_count &&
+                               (reqbufs->count < ctx->pb_count)) {
+                               reqbufs->count = ctx->pb_count;
+                               mfc_debug(2, "Minimum %d output buffers needed\n",
+                                               ctx->pb_count);
+                       } else {
+                               ctx->pb_count = reqbufs->count;
+                       }
+               }
+
                ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
                if (ret != 0) {
                        mfc_err("error in vb2_reqbufs() for E(S)\n");
@@ -1533,14 +1546,14 @@ int vidioc_encoder_cmd(struct file *file, void *priv,
 
                spin_lock_irqsave(&dev->irqlock, flags);
                if (list_empty(&ctx->src_queue)) {
-                       mfc_debug(2, "EOS: empty src queue, entering finishing state");
+                       mfc_debug(2, "EOS: empty src queue, entering finishing state\n");
                        ctx->state = MFCINST_FINISHING;
                        if (s5p_mfc_ctx_ready(ctx))
                                set_work_bit_irqsave(ctx);
                        spin_unlock_irqrestore(&dev->irqlock, flags);
                        s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
                } else {
-                       mfc_debug(2, "EOS: marking last buffer of stream");
+                       mfc_debug(2, "EOS: marking last buffer of stream\n");
                        buf = list_entry(ctx->src_queue.prev,
                                                struct s5p_mfc_buf, list);
                        if (buf->flags & MFC_BUF_FLAG_USED)
@@ -1609,9 +1622,9 @@ static int check_vb_with_fmt(struct s5p_mfc_fmt *fmt, struct vb2_buffer *vb)
                        mfc_err("failed to get plane cookie\n");
                        return -EINVAL;
                }
-               mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx",
-                               vb->v4l2_buf.index, i,
-                               vb2_dma_contig_plane_dma_addr(vb, i));
+               mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx\n",
+                         vb->v4l2_buf.index, i,
+                         vb2_dma_contig_plane_dma_addr(vb, i));
        }
        return 0;
 }
@@ -1760,11 +1773,27 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
        struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
        struct s5p_mfc_dev *dev = ctx->dev;
 
-       v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+       if (IS_MFCV6(dev) && (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
+
+               if ((ctx->state == MFCINST_GOT_INST) &&
+                       (dev->curr_ctx == ctx->num) && dev->hw_lock) {
+                       s5p_mfc_wait_for_done_ctx(ctx,
+                                               S5P_MFC_R2H_CMD_SEQ_DONE_RET,
+                                               0);
+               }
+
+               if (ctx->src_bufs_cnt < ctx->pb_count) {
+                       mfc_err("Need minimum %d OUTPUT buffers\n",
+                                       ctx->pb_count);
+                       return -EINVAL;
+               }
+       }
+
        /* If context is ready then dev = work->data;schedule it to run */
        if (s5p_mfc_ctx_ready(ctx))
                set_work_bit_irqsave(ctx);
        s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+
        return 0;
 }
 
@@ -1920,6 +1949,7 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
                if (controls[i].is_volatile && ctx->ctrls[i])
                        ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE;
        }
+       v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
        return 0;
 }
 
index 0af05a2..368582b 100644 (file)
@@ -1275,8 +1275,8 @@ static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
        spin_unlock_irqrestore(&dev->irqlock, flags);
        dev->curr_ctx = ctx->num;
        s5p_mfc_clean_ctx_int_flags(ctx);
-       mfc_debug(2, "encoding buffer with index=%d state=%d",
-                       src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
+       mfc_debug(2, "encoding buffer with index=%d state=%d\n",
+                 src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
        s5p_mfc_encode_one_frame_v5(ctx);
        return 0;
 }
index 7e76fce..66f0d04 100644 (file)
@@ -62,12 +62,6 @@ static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx)
        /* NOP */
 }
 
-static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev)
-{
-       /* NOP */
-       return -1;
-}
-
 /* Allocate codec buffers */
 static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
 {
@@ -167,7 +161,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
                                S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
                ctx->bank1.size =
                        ctx->scratch_buf_size + ctx->tmv_buffer_size +
-                       (ctx->dpb_count * (ctx->luma_dpb_size +
+                       (ctx->pb_count * (ctx->luma_dpb_size +
                        ctx->chroma_dpb_size + ctx->me_buffer_size));
                ctx->bank2.size = 0;
                break;
@@ -181,7 +175,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
                                S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
                ctx->bank1.size =
                        ctx->scratch_buf_size + ctx->tmv_buffer_size +
-                       (ctx->dpb_count * (ctx->luma_dpb_size +
+                       (ctx->pb_count * (ctx->luma_dpb_size +
                        ctx->chroma_dpb_size + ctx->me_buffer_size));
                ctx->bank2.size = 0;
                break;
@@ -198,7 +192,6 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
                }
                BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
        }
-
        return 0;
 }
 
@@ -449,8 +442,8 @@ static int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx,
        WRITEL(addr, S5P_FIMV_E_STREAM_BUFFER_ADDR_V6); /* 16B align */
        WRITEL(size, S5P_FIMV_E_STREAM_BUFFER_SIZE_V6);
 
-       mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d",
-               addr, size);
+       mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d\n",
+                 addr, size);
 
        return 0;
 }
@@ -463,8 +456,8 @@ static void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
        WRITEL(y_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6); /* 256B align */
        WRITEL(c_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6);
 
-       mfc_debug(2, "enc src y buf addr: 0x%08lx", y_addr);
-       mfc_debug(2, "enc src c buf addr: 0x%08lx", c_addr);
+       mfc_debug(2, "enc src y buf addr: 0x%08lx\n", y_addr);
+       mfc_debug(2, "enc src c buf addr: 0x%08lx\n", c_addr);
 }
 
 static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
@@ -479,8 +472,8 @@ static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
        enc_recon_y_addr = READL(S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6);
        enc_recon_c_addr = READL(S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6);
 
-       mfc_debug(2, "recon y addr: 0x%08lx", enc_recon_y_addr);
-       mfc_debug(2, "recon c addr: 0x%08lx", enc_recon_c_addr);
+       mfc_debug(2, "recon y addr: 0x%08lx\n", enc_recon_y_addr);
+       mfc_debug(2, "recon c addr: 0x%08lx\n", enc_recon_c_addr);
 }
 
 /* Set encoding ref & codec buffer */
@@ -497,7 +490,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
 
        mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
 
-       for (i = 0; i < ctx->dpb_count; i++) {
+       for (i = 0; i < ctx->pb_count; i++) {
                WRITEL(buf_addr1, S5P_FIMV_E_LUMA_DPB_V6 + (4 * i));
                buf_addr1 += ctx->luma_dpb_size;
                WRITEL(buf_addr1, S5P_FIMV_E_CHROMA_DPB_V6 + (4 * i));
@@ -520,7 +513,7 @@ static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx)
        buf_size1 -= ctx->tmv_buffer_size;
 
        mfc_debug(2, "Buf1: %u, buf_size1: %d (ref frames %d)\n",
-                       buf_addr1, buf_size1, ctx->dpb_count);
+                       buf_addr1, buf_size1, ctx->pb_count);
        if (buf_size1 < 0) {
                mfc_debug(2, "Not enough memory has been allocated.\n");
                return -ENOMEM;
@@ -1431,8 +1424,8 @@ static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx)
        src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
        src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
 
-       mfc_debug(2, "enc src y addr: 0x%08lx", src_y_addr);
-       mfc_debug(2, "enc src c addr: 0x%08lx", src_c_addr);
+       mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr);
+       mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr);
 
        s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr);
 
@@ -1522,22 +1515,6 @@ static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx)
        struct s5p_mfc_dev *dev = ctx->dev;
        int ret;
 
-       ret = s5p_mfc_alloc_codec_buffers_v6(ctx);
-       if (ret) {
-               mfc_err("Failed to allocate encoding buffers.\n");
-               return -ENOMEM;
-       }
-
-       /* Header was generated now starting processing
-        * First set the reference frame buffers
-        */
-       if (ctx->capture_state != QUEUE_BUFS_REQUESTED) {
-               mfc_err("It seems that destionation buffers were not\n"
-                       "requested.MFC requires that header should be generated\n"
-                       "before allocating codec buffer.\n");
-               return -EAGAIN;
-       }
-
        dev->curr_ctx = ctx->num;
        s5p_mfc_clean_ctx_int_flags(ctx);
        ret = s5p_mfc_set_enc_ref_buffer_v6(ctx);
@@ -1582,7 +1559,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
        mfc_debug(1, "Seting new context to %p\n", ctx);
        /* Got context to run in ctx */
        mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n",
-               ctx->dst_queue_cnt, ctx->dpb_count, ctx->src_queue_cnt);
+               ctx->dst_queue_cnt, ctx->pb_count, ctx->src_queue_cnt);
        mfc_debug(1, "ctx->state=%d\n", ctx->state);
        /* Last frame has already been sent to MFC
         * Now obtaining frames from MFC buffer */
@@ -1647,7 +1624,7 @@ static void s5p_mfc_try_run_v6(struct s5p_mfc_dev *dev)
                case MFCINST_GOT_INST:
                        s5p_mfc_run_init_enc(ctx);
                        break;
-               case MFCINST_HEAD_PARSED: /* Only for MFC6.x */
+               case MFCINST_HEAD_PRODUCED:
                        ret = s5p_mfc_run_init_enc_buffers(ctx);
                        break;
                default:
@@ -1730,7 +1707,7 @@ static int s5p_mfc_get_dspl_status_v6(struct s5p_mfc_dev *dev)
        return mfc_read(dev, S5P_FIMV_D_DISPLAY_STATUS_V6);
 }
 
-static int s5p_mfc_get_decoded_status_v6(struct s5p_mfc_dev *dev)
+static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev)
 {
        return mfc_read(dev, S5P_FIMV_D_DECODED_STATUS_V6);
 }
index 6aa38a5..11d5f1d 100644 (file)
@@ -50,19 +50,6 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
                goto err_p_ip_clk;
        }
 
-       pm->clock = clk_get(&dev->plat_dev->dev, dev->variant->mclk_name);
-       if (IS_ERR(pm->clock)) {
-               mfc_err("Failed to get MFC clock\n");
-               ret = PTR_ERR(pm->clock);
-               goto err_g_ip_clk_2;
-       }
-
-       ret = clk_prepare(pm->clock);
-       if (ret) {
-               mfc_err("Failed to prepare MFC clock\n");
-               goto err_p_ip_clk_2;
-       }
-
        atomic_set(&pm->power, 0);
 #ifdef CONFIG_PM_RUNTIME
        pm->device = &dev->plat_dev->dev;
@@ -72,10 +59,6 @@ int s5p_mfc_init_pm(struct s5p_mfc_dev *dev)
        atomic_set(&clk_ref, 0);
 #endif
        return 0;
-err_p_ip_clk_2:
-       clk_put(pm->clock);
-err_g_ip_clk_2:
-       clk_unprepare(pm->clock_gate);
 err_p_ip_clk:
        clk_put(pm->clock_gate);
 err_g_ip_clk:
@@ -86,8 +69,6 @@ void s5p_mfc_final_pm(struct s5p_mfc_dev *dev)
 {
        clk_unprepare(pm->clock_gate);
        clk_put(pm->clock_gate);
-       clk_unprepare(pm->clock);
-       clk_put(pm->clock);
 #ifdef CONFIG_PM_RUNTIME
        pm_runtime_disable(pm->device);
 #endif
@@ -98,7 +79,7 @@ int s5p_mfc_clock_on(void)
        int ret;
 #ifdef CLK_DEBUG
        atomic_inc(&clk_ref);
-       mfc_debug(3, "+ %d", atomic_read(&clk_ref));
+       mfc_debug(3, "+ %d\n", atomic_read(&clk_ref));
 #endif
        ret = clk_enable(pm->clock_gate);
        return ret;
@@ -108,7 +89,7 @@ void s5p_mfc_clock_off(void)
 {
 #ifdef CLK_DEBUG
        atomic_dec(&clk_ref);
-       mfc_debug(3, "- %d", atomic_read(&clk_ref));
+       mfc_debug(3, "- %d\n", atomic_read(&clk_ref));
 #endif
        clk_disable(pm->clock_gate);
 }
index 0b32cc3..59a9dee 100644 (file)
@@ -905,11 +905,11 @@ static int sh_veu_queue_setup(struct vb2_queue *vq,
                if (ftmp.fmt.pix.width != pix->width ||
                    ftmp.fmt.pix.height != pix->height)
                        return -EINVAL;
-               size = pix->bytesperline ? pix->bytesperline * pix->height :
-                       pix->width * pix->height * fmt->depth >> 3;
+               size = pix->bytesperline ? pix->bytesperline * pix->height * fmt->depth / fmt->ydepth :
+                       pix->width * pix->height * fmt->depth / fmt->ydepth;
        } else {
                vfmt = sh_veu_get_vfmt(veu, vq->type);
-               size = vfmt->bytesperline * vfmt->frame.height;
+               size = vfmt->bytesperline * vfmt->frame.height * vfmt->fmt->depth / vfmt->fmt->ydepth;
        }
 
        if (count < 2)
@@ -1033,8 +1033,6 @@ static int sh_veu_release(struct file *file)
 
        dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
 
-       pm_runtime_put(veu->dev);
-
        if (veu_file == veu->capture) {
                veu->capture = NULL;
                vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
@@ -1050,6 +1048,8 @@ static int sh_veu_release(struct file *file)
                veu->m2m_ctx = NULL;
        }
 
+       pm_runtime_put(veu->dev);
+
        kfree(veu_file);
 
        return 0;
@@ -1138,10 +1138,7 @@ static irqreturn_t sh_veu_isr(int irq, void *dev_id)
 
        veu->xaction++;
 
-       if (!veu->aborting)
-               return IRQ_WAKE_THREAD;
-
-       return IRQ_HANDLED;
+       return IRQ_WAKE_THREAD;
 }
 
 static int sh_veu_probe(struct platform_device *pdev)
index eea832c..3a4efbd 100644 (file)
@@ -643,9 +643,9 @@ static int soc_camera_close(struct file *file)
 
                if (ici->ops->init_videobuf2)
                        vb2_queue_release(&icd->vb2_vidq);
-               ici->ops->remove(icd);
-
                __soc_camera_power_off(icd);
+
+               ici->ops->remove(icd);
        }
 
        if (icd->streamer == file)
index c0beee2..d529ba7 100644 (file)
@@ -22,6 +22,7 @@ config RADIO_SI476X
        tristate "Silicon Laboratories Si476x I2C FM Radio"
        depends on I2C && VIDEO_V4L2
        depends on MFD_SI476X_CORE
+       depends on SND_SOC
        select SND_SOC_SI476X
        ---help---
          Choose Y here if you have this FM radio chip.
index 9430c6a..9dc8baf 100644 (file)
@@ -44,7 +44,7 @@
 
 #define FREQ_MUL (10000000 / 625)
 
-#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0b10000000 & (status))
+#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0x80 & (status))
 
 #define DRIVER_NAME "si476x-radio"
 #define DRIVER_CARD "SI476x AM/FM Receiver"
index f6768ca..15665de 100644 (file)
@@ -1,23 +1,3 @@
-config MEDIA_ATTACH
-       bool "Load and attach frontend and tuner driver modules as needed"
-       depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
-       depends on MODULES
-       default y if !EXPERT
-       help
-         Remove the static dependency of DVB card drivers on all
-         frontend modules for all possible card variants. Instead,
-         allow the card drivers to only load the frontend modules
-         they require.
-
-         Also, tuner module will automatically load a tuner driver
-         when needed, for analog mode.
-
-         This saves several KBytes of memory.
-
-         Note: You will need module-init-tools v3.2 or later for this feature.
-
-         If unsure say Y.
-
 # Analog TV tuners, auto-loaded via tuner.ko
 config MEDIA_TUNER
        tristate
index 22015fe..2cc8ec7 100644 (file)
@@ -376,7 +376,7 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
        struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf};
        struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf};
        struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf};
-       struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 5, buf};
+       struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf};
 
        dev_dbg(&d->udev->dev, "%s:\n", __func__);
 
@@ -481,9 +481,9 @@ static int rtl2832u_read_config(struct dvb_usb_device *d)
                goto found;
        }
 
-       /* check R820T by reading tuner stats at I2C addr 0x1a */
+       /* check R820T ID register; reg=00 val=69 */
        ret = rtl28xxu_ctrl_msg(d, &req_r820t);
-       if (ret == 0) {
+       if (ret == 0 && buf[0] == 0x69) {
                priv->tuner = TUNER_RTL2832_R820T;
                priv->tuner_name = "R820T";
                goto found;
index 3fe207e..d7ff3b9 100644 (file)
@@ -1159,6 +1159,13 @@ static int sd_start(struct gspca_dev *gspca_dev)
                        regs[0x01] = 0x44; /* Select 24 Mhz clock */
                        regs[0x12] = 0x02; /* Set hstart to 2 */
                }
+               break;
+       case SENSOR_PAS202:
+               /* For some unknown reason we need to increase hstart by 1 on
+                  the sn9c103, otherwise we get wrong colors (bayer shift). */
+               if (sd->bridge == BRIDGE_103)
+                       regs[0x12] += 1;
+               break;
        }
        /* Disable compression when the raw bayer format has been selected */
        if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW)
index 7a6a0d3..81b017a 100644 (file)
@@ -226,7 +226,7 @@ struct pwc_device
        struct list_head queued_bufs;
        spinlock_t queued_bufs_lock; /* Protects queued_bufs */
 
-       /* Note if taking both locks v4l2_lock must always be locked first! */
+       /* If taking both locks vb_queue_lock must always be locked first! */
        struct mutex v4l2_lock;      /* Protects everything else */
        struct mutex vb_queue_lock;  /* Protects vb_queue and capt_file */
 
index ebb8e48..fccd08b 100644 (file)
@@ -1835,6 +1835,8 @@ bool v4l2_ctrl_radio_filter(const struct v4l2_ctrl *ctrl)
 {
        if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_TX)
                return true;
+       if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_RX)
+               return true;
        switch (ctrl->id) {
        case V4L2_CID_AUDIO_MUTE:
        case V4L2_CID_AUDIO_VOLUME:
index f81bda1..7658586 100644 (file)
@@ -243,7 +243,6 @@ static void v4l_print_format(const void *arg, bool write_only)
        const struct v4l2_vbi_format *vbi;
        const struct v4l2_sliced_vbi_format *sliced;
        const struct v4l2_window *win;
-       const struct v4l2_clip *clip;
        unsigned i;
 
        pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -253,7 +252,7 @@ static void v4l_print_format(const void *arg, bool write_only)
                pix = &p->fmt.pix;
                pr_cont(", width=%u, height=%u, "
                        "pixelformat=%c%c%c%c, field=%s, "
-                       "bytesperline=%u sizeimage=%u, colorspace=%d\n",
+                       "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
                        pix->width, pix->height,
                        (pix->pixelformat & 0xff),
                        (pix->pixelformat >>  8) & 0xff,
@@ -284,20 +283,14 @@ static void v4l_print_format(const void *arg, bool write_only)
        case V4L2_BUF_TYPE_VIDEO_OVERLAY:
        case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
                win = &p->fmt.win;
-               pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, "
-                       "chromakey=0x%08x, bitmap=%p, "
-                       "global_alpha=0x%02x\n",
-                       win->w.width, win->w.height,
-                       win->w.left, win->w.top,
+               /* Note: we can't print the clip list here since the clips
+                * pointer is a userspace pointer, not a kernelspace
+                * pointer. */
+               pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, clipcount=%u, clips=%p, bitmap=%p, global_alpha=0x%02x\n",
+                       win->w.width, win->w.height, win->w.left, win->w.top,
                        prt_names(win->field, v4l2_field_names),
-                       win->chromakey, win->bitmap, win->global_alpha);
-               clip = win->clips;
-               for (i = 0; i < win->clipcount; i++) {
-                       printk(KERN_DEBUG "clip %u: wxh=%dx%d, x,y=%d,%d\n",
-                                       i, clip->c.width, clip->c.height,
-                                       clip->c.left, clip->c.top);
-                       clip = clip->next;
-               }
+                       win->chromakey, win->clipcount, win->clips,
+                       win->bitmap, win->global_alpha);
                break;
        case V4L2_BUF_TYPE_VBI_CAPTURE:
        case V4L2_BUF_TYPE_VBI_OUTPUT:
@@ -332,7 +325,7 @@ static void v4l_print_framebuffer(const void *arg, bool write_only)
 
        pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, "
                "height=%u, pixelformat=%c%c%c%c, "
-               "bytesperline=%u sizeimage=%u, colorspace=%d\n",
+               "bytesperline=%u, sizeimage=%u, colorspace=%d\n",
                        p->capability, p->flags, p->base,
                        p->fmt.width, p->fmt.height,
                        (p->fmt.pixelformat & 0xff),
@@ -353,7 +346,7 @@ static void v4l_print_modulator(const void *arg, bool write_only)
        const struct v4l2_modulator *p = arg;
 
        if (write_only)
-               pr_cont("index=%u, txsubchans=0x%x", p->index, p->txsubchans);
+               pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
        else
                pr_cont("index=%u, name=%.*s, capability=0x%x, "
                        "rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
@@ -445,13 +438,13 @@ static void v4l_print_buffer(const void *arg, bool write_only)
                for (i = 0; i < p->length; ++i) {
                        plane = &p->m.planes[i];
                        printk(KERN_DEBUG
-                               "plane %d: bytesused=%d, data_offset=0x%08x "
+                               "plane %d: bytesused=%d, data_offset=0x%08x, "
                                "offset/userptr=0x%lx, length=%d\n",
                                i, plane->bytesused, plane->data_offset,
                                plane->m.userptr, plane->length);
                }
        } else {
-               pr_cont("bytesused=%d, offset/userptr=0x%lx, length=%d\n",
+               pr_cont("bytesused=%d, offset/userptr=0x%lx, length=%d\n",
                        p->bytesused, p->m.userptr, p->length);
        }
 
@@ -504,6 +497,8 @@ static void v4l_print_streamparm(const void *arg, bool write_only)
                        c->capability, c->outputmode,
                        c->timeperframe.numerator, c->timeperframe.denominator,
                        c->extendedmode, c->writebuffers);
+       } else {
+               pr_cont("\n");
        }
 }
 
@@ -734,11 +729,11 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only)
                        p->type);
        switch (p->type) {
        case V4L2_FRMSIZE_TYPE_DISCRETE:
-               pr_cont(" wxh=%ux%u\n",
+               pr_cont(", wxh=%ux%u\n",
                        p->discrete.width, p->discrete.height);
                break;
        case V4L2_FRMSIZE_TYPE_STEPWISE:
-               pr_cont(" min=%ux%u, max=%ux%u, step=%ux%u\n",
+               pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n",
                                p->stepwise.min_width,  p->stepwise.min_height,
                                p->stepwise.step_width, p->stepwise.step_height,
                                p->stepwise.max_width,  p->stepwise.max_height);
@@ -764,12 +759,12 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only)
                        p->width, p->height, p->type);
        switch (p->type) {
        case V4L2_FRMIVAL_TYPE_DISCRETE:
-               pr_cont(" fps=%d/%d\n",
+               pr_cont(", fps=%d/%d\n",
                                p->discrete.numerator,
                                p->discrete.denominator);
                break;
        case V4L2_FRMIVAL_TYPE_STEPWISE:
-               pr_cont(" min=%d/%d, max=%d/%d, step=%d/%d\n",
+               pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n",
                                p->stepwise.min.numerator,
                                p->stepwise.min.denominator,
                                p->stepwise.max.numerator,
@@ -807,8 +802,8 @@ static void v4l_print_event(const void *arg, bool write_only)
                        pr_cont("value64=%lld, ", c->value64);
                else
                        pr_cont("value=%d, ", c->value);
-               pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d,"
-                               " default_value=%d\n",
+               pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, "
+                       "default_value=%d\n",
                        c->flags, c->minimum, c->maximum,
                        c->step, c->default_value);
                break;
@@ -845,7 +840,7 @@ static void v4l_print_freq_band(const void *arg, bool write_only)
        const struct v4l2_frequency_band *p = arg;
 
        pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, "
-                       "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
+               "rangelow=%u, rangehigh=%u, modulation=0x%x\n",
                        p->tuner, p->type, p->index,
                        p->capability, p->rangelow,
                        p->rangehigh, p->modulation);
index 66f599f..e96497f 100644 (file)
@@ -205,7 +205,7 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
 static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
 {
        struct v4l2_m2m_dev *m2m_dev;
-       unsigned long flags_job, flags;
+       unsigned long flags_job, flags_out, flags_cap;
 
        m2m_dev = m2m_ctx->m2m_dev;
        dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
@@ -223,23 +223,26 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
                return;
        }
 
-       spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+       spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
        if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
-               spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+               spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
+                                       flags_out);
                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
                dprintk("No input buffers available\n");
                return;
        }
-       spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
+       spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
        if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
-               spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
-               spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+               spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
+                                       flags_cap);
+               spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
+                                       flags_out);
                spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
                dprintk("No output buffers available\n");
                return;
        }
-       spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
-       spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+       spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
+       spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
 
        if (m2m_dev->m2m_ops->job_ready
                && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
@@ -372,6 +375,20 @@ int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
 
 /**
+ * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
+ * on the type
+ */
+int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+                        struct v4l2_create_buffers *create)
+{
+       struct vb2_queue *vq;
+
+       vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
+       return vb2_create_bufs(vq, create);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
+
+/**
  * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
  * the type
  */
@@ -486,8 +503,10 @@ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
        if (m2m_ctx->m2m_dev->m2m_ops->unlock)
                m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
 
-       poll_wait(file, &src_q->done_wq, wait);
-       poll_wait(file, &dst_q->done_wq, wait);
+       if (list_empty(&src_q->done_list))
+               poll_wait(file, &src_q->done_wq, wait);
+       if (list_empty(&dst_q->done_list))
+               poll_wait(file, &dst_q->done_wq, wait);
 
        if (m2m_ctx->m2m_dev->m2m_ops->lock)
                m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
index 7d833ee..e3bdc3b 100644 (file)
@@ -2014,7 +2014,8 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
        if (list_empty(&q->queued_list))
                return res | POLLERR;
 
-       poll_wait(file, &q->done_wq, wait);
+       if (list_empty(&q->done_list))
+               poll_wait(file, &q->done_wq, wait);
 
        /*
         * Take first buffer available for dequeuing.
index 6e8bc9d..94d957d 100644 (file)
@@ -244,7 +244,7 @@ bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
                file->f_pos += offset;
                break;
        case 2:
-               file->f_pos = debug->buffer_len - offset;
+               file->f_pos = debug->buffer_len + offset;
                break;
        default:
                return -EINVAL;
index 9544cdc..e79e006 100644 (file)
@@ -811,6 +811,70 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
        return pcidev->irq;
 }
 
+static struct iosapic_info *first_isi = NULL;
+
+#ifdef CONFIG_64BIT
+int iosapic_serial_irq(int num)
+{
+       struct iosapic_info *isi = first_isi;
+       struct irt_entry *irte = NULL;  /* only used if PAT PDC */
+       struct vector_info *vi;
+       int isi_line;   /* line used by device */
+
+       /* lookup IRT entry for isi/slot/pin set */
+       irte = &irt_cell[num];
+
+       DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n",
+               irte,
+               irte->entry_type,
+               irte->entry_length,
+               irte->polarity_trigger,
+               irte->src_bus_irq_devno,
+               irte->src_bus_id,
+               irte->src_seg_id,
+               irte->dest_iosapic_intin,
+               (u32) irte->dest_iosapic_addr);
+       isi_line = irte->dest_iosapic_intin;
+
+       /* get vector info for this input line */
+       vi = isi->isi_vector + isi_line;
+       DBG_IRT("iosapic_serial_irq:  line %d vi 0x%p\n", isi_line, vi);
+
+       /* If this IRQ line has already been setup, skip it */
+       if (vi->irte)
+               goto out;
+
+       vi->irte = irte;
+
+       /*
+        * Allocate processor IRQ
+        *
+        * XXX/FIXME The txn_alloc_irq() code and related code should be
+        * moved to enable_irq(). That way we only allocate processor IRQ
+        * bits for devices that actually have drivers claiming them.
+        * Right now we assign an IRQ to every PCI device present,
+        * regardless of whether it's used or not.
+        */
+       vi->txn_irq = txn_alloc_irq(8);
+
+       if (vi->txn_irq < 0)
+               panic("I/O sapic: couldn't get TXN IRQ\n");
+
+       /* enable_irq() will use txn_* to program IRdT */
+       vi->txn_addr = txn_alloc_addr(vi->txn_irq);
+       vi->txn_data = txn_alloc_data(vi->txn_irq);
+
+       vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI;
+       vi->eoi_data = cpu_to_le32(vi->txn_data);
+
+       cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi);
+
+ out:
+
+       return vi->txn_irq;
+}
+#endif
+
 
 /*
 ** squirrel away the I/O Sapic Version
@@ -877,6 +941,8 @@ void *iosapic_register(unsigned long hpa)
                vip->irqline = (unsigned char) cnt;
                vip->iosapic = isi;
        }
+       if (!first_isi)
+               first_isi = isi;
        return isi;
 }
 
index 439c012..b63d534 100644 (file)
@@ -186,7 +186,7 @@ bfad_debugfs_lseek(struct file *file, loff_t offset, int orig)
                file->f_pos += offset;
                break;
        case 2:
-               file->f_pos = debug->buffer_len - offset;
+               file->f_pos = debug->buffer_len + offset;
                break;
        default:
                return -EINVAL;
index adc1f7f..85e1ffd 100644 (file)
@@ -174,7 +174,7 @@ static loff_t fnic_trace_debugfs_lseek(struct file *file,
                pos = file->f_pos + offset;
                break;
        case 2:
-               pos = fnic_dbg_prt->buffer_len - offset;
+               pos = fnic_dbg_prt->buffer_len + offset;
        }
        return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
                          -EINVAL : (file->f_pos = pos);
index f63f5ff..f525ecb 100644 (file)
@@ -1178,7 +1178,7 @@ lpfc_debugfs_lseek(struct file *file, loff_t off, int whence)
                pos = file->f_pos + off;
                break;
        case 2:
-               pos = debug->len - off;
+               pos = debug->len + off;
        }
        return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
 }
index 7a3870f..66b0b26 100644 (file)
@@ -688,8 +688,12 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
                 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
                 * for qla_tgt_xmit_response LLD code
                 */
+               if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+                       se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT;
+                       se_cmd->residual_count = 0;
+               }
                se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
-               se_cmd->residual_count = se_cmd->data_length;
+               se_cmd->residual_count += se_cmd->data_length;
 
                cmd->bufflen = 0;
        }
index 2e4a28b..12f321d 100644 (file)
@@ -1,6 +1,6 @@
 config VIDEO_DM365_VPFE
        tristate "DM365 VPFE Media Controller Capture Driver"
-       depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_VPFE_CAPTURE
+       depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF
        select VIDEOBUF2_DMA_CONTIG
        help
          Support for DM365 VPFE based Media Controller Capture driver.
index b88e1dd..d8ce20d 100644 (file)
@@ -639,7 +639,8 @@ static int vpfe_probe(struct platform_device *pdev)
        if (ret)
                goto probe_free_dev_mem;
 
-       if (vpfe_initialize_modules(vpfe_dev, pdev))
+       ret = vpfe_initialize_modules(vpfe_dev, pdev);
+       if (ret)
                goto probe_disable_clock;
 
        vpfe_dev->media_dev.dev = vpfe_dev->pdev;
@@ -663,7 +664,8 @@ static int vpfe_probe(struct platform_device *pdev)
        /* set the driver data in platform device */
        platform_set_drvdata(pdev, vpfe_dev);
        /* register subdevs/entities */
-       if (vpfe_register_entities(vpfe_dev))
+       ret = vpfe_register_entities(vpfe_dev);
+       if (ret)
                goto probe_out_v4l2_unregister;
 
        ret = vpfe_attach_irq(vpfe_dev);
index df6569b..34f3b6d 100644 (file)
@@ -5,6 +5,7 @@ config SOLO6X10
        select VIDEOBUF2_DMA_SG
        select VIDEOBUF2_DMA_CONTIG
        select SND_PCM
+       select FONT_8x16
        ---help---
          This driver supports the Softlogic based MPEG-4 and h.264 codec
          cards.
index 13e9e71..8d8b3ff 100644 (file)
@@ -155,7 +155,7 @@ static ssize_t lio_target_np_store_iser(
        struct iscsi_tpg_np *tpg_np_iser = NULL;
        char *endptr;
        u32 op;
-       int rc;
+       int rc = 0;
 
        op = simple_strtoul(page, &endptr, 0);
        if ((op != 1) && (op != 0)) {
@@ -174,31 +174,32 @@ static ssize_t lio_target_np_store_iser(
                return -EINVAL;
 
        if (op) {
-               int rc = request_module("ib_isert");
-               if (rc != 0)
+               rc = request_module("ib_isert");
+               if (rc != 0) {
                        pr_warn("Unable to request_module for ib_isert\n");
+                       rc = 0;
+               }
 
                tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
                                np->np_ip, tpg_np, ISCSI_INFINIBAND);
-               if (!tpg_np_iser || IS_ERR(tpg_np_iser))
+               if (IS_ERR(tpg_np_iser)) {
+                       rc = PTR_ERR(tpg_np_iser);
                        goto out;
+               }
        } else {
                tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
-               if (!tpg_np_iser)
-                       goto out;
-
-               rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
-               if (rc < 0)
-                       goto out;
+               if (tpg_np_iser) {
+                       rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
+                       if (rc < 0)
+                               goto out;
+               }
        }
 
-       printk("lio_target_np_store_iser() done, op: %d\n", op);
-
        iscsit_put_tpg(tpg);
        return count;
 out:
        iscsit_put_tpg(tpg);
-       return -EINVAL;
+       return rc;
 }
 
 TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR);
index 8e6298c..dcb199d 100644 (file)
@@ -842,11 +842,11 @@ int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
                return 0;
 
        sess->time2retain_timer_flags |= ISCSI_TF_STOP;
-       spin_unlock_bh(&se_tpg->session_lock);
+       spin_unlock(&se_tpg->session_lock);
 
        del_timer_sync(&sess->time2retain_timer);
 
-       spin_lock_bh(&se_tpg->session_lock);
+       spin_lock(&se_tpg->session_lock);
        sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
        pr_debug("Stopped Time2Retain Timer for SID: %u\n",
                        sess->sid);
index bb5d5c5..3402241 100644 (file)
@@ -984,8 +984,6 @@ int iscsi_target_setup_login_socket(
        }
 
        np->np_transport = t;
-       printk("Set np->np_transport to %p -> %s\n", np->np_transport,
-                               np->np_transport->name);
        return 0;
 }
 
@@ -1002,7 +1000,6 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
 
        conn->sock = new_sock;
        conn->login_family = np->np_sockaddr.ss_family;
-       printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock);
 
        if (np->np_sockaddr.ss_family == AF_INET6) {
                memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
index 7ad9120..cd5018f 100644 (file)
@@ -721,9 +721,6 @@ int iscsi_target_locate_portal(
 
                start += strlen(key) + strlen(value) + 2;
        }
-
-       printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf);
-
        /*
         * See 5.3.  Login Phase.
         */
index 59bfaec..abfd990 100644 (file)
@@ -244,14 +244,9 @@ static void pty_flush_buffer(struct tty_struct *tty)
 
 static int pty_open(struct tty_struct *tty, struct file *filp)
 {
-       int     retval = -ENODEV;
-
        if (!tty || !tty->link)
-               goto out;
-
-       set_bit(TTY_IO_ERROR, &tty->flags);
+               return -ENODEV;
 
-       retval = -EIO;
        if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
                goto out;
        if (test_bit(TTY_PTY_LOCK, &tty->link->flags))
@@ -262,9 +257,11 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
        clear_bit(TTY_IO_ERROR, &tty->flags);
        clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
        set_bit(TTY_THROTTLED, &tty->flags);
-       retval = 0;
+       return 0;
+
 out:
-       return retval;
+       set_bit(TTY_IO_ERROR, &tty->flags);
+       return -EIO;
 }
 
 static void pty_set_termios(struct tty_struct *tty,
index 097dff9..bb91b47 100644 (file)
@@ -30,6 +30,12 @@ static int __init serial_init_chip(struct parisc_device *dev)
        unsigned long address;
        int err;
 
+#ifdef CONFIG_64BIT
+       extern int iosapic_serial_irq(int cellnum);
+       if (!dev->irq && (dev->id.sversion == 0xad))
+               dev->irq = iosapic_serial_irq(dev->mod_index-1);
+#endif
+
        if (!dev->irq) {
                /* We find some unattached serial ports by walking native
                 * busses.  These should be silently ignored.  Otherwise,
@@ -51,7 +57,8 @@ static int __init serial_init_chip(struct parisc_device *dev)
        memset(&uart, 0, sizeof(uart));
        uart.port.iotype        = UPIO_MEM;
        /* 7.272727MHz on Lasi.  Assumed the same for Dino, Wax and Timi. */
-       uart.port.uartclk       = 7272727;
+       uart.port.uartclk       = (dev->id.sversion != 0xad) ?
+                                       7272727 : 1843200;
        uart.port.mapbase       = address;
        uart.port.membase       = ioremap_nocache(address, 16);
        uart.port.irq   = dev->irq;
@@ -73,6 +80,7 @@ static struct parisc_device_id serial_tbl[] = {
        { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 },
        { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c },
        { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d },
+       { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x000ad },
        { 0 }
 };
 
index fc2c06c..2bd78e2 100644 (file)
@@ -289,13 +289,10 @@ static int vt_disallocate(unsigned int vc_num)
        struct vc_data *vc = NULL;
        int ret = 0;
 
-       if (!vc_num)
-               return 0;
-
        console_lock();
        if (VT_BUSY(vc_num))
                ret = -EBUSY;
-       else
+       else if (vc_num)
                vc = vc_deallocate(vc_num);
        console_unlock();
 
index 7ef3eb8..2311b1e 100644 (file)
@@ -4,11 +4,17 @@
 menuconfig USB_PHY
        bool "USB Physical Layer drivers"
        help
-         USB controllers (those which are host, device or DRD) need a
-         device to handle the physical layer signalling, commonly called
-         a PHY.
+         Most USB controllers have the physical layer signalling part
+         (commonly called a PHY) built in.  However, dual-role devices
+         (a.k.a. USB on-the-go) which support being USB master or slave
+         with the same connector often use an external PHY.
 
-         The following drivers add support for such PHY devices.
+         The drivers in this submenu add support for such PHY devices.
+         They are not needed for standard master-only (or the vast
+         majority of slave-only) USB interfaces.
+
+         If you're not sure if this applies to you, it probably doesn't;
+         say N here.
 
 if USB_PHY
 
index c92c5ed..e581c25 100644 (file)
@@ -172,7 +172,8 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = {
        { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
        { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
-       { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+       { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
+       { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
        { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
 };
 
index b353e7e..4a2423e 100644 (file)
@@ -52,7 +52,9 @@
 
 /* Abbott Diabetics vendor and product ids */
 #define ABBOTT_VENDOR_ID               0x1a61
-#define ABBOTT_PRODUCT_ID              0x3410
+#define ABBOTT_STEREO_PLUG_ID          0x3410
+#define ABBOTT_PRODUCT_ID              ABBOTT_STEREO_PLUG_ID
+#define ABBOTT_STRIP_PORT_ID           0x3420
 
 /* Commands */
 #define TI_GET_VERSION                 0x01
index eaa75f7..6812158 100644 (file)
@@ -132,6 +132,12 @@ extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
 extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
 
 /*
+ * splice.c
+ */
+extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+               loff_t *opos, size_t len, unsigned int flags);
+
+/*
  * pipe.c
  */
 extern const struct file_operations pipefifo_fops;
index 0343000..2cefa41 100644 (file)
@@ -1064,6 +1064,7 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
        struct fd in, out;
        struct inode *in_inode, *out_inode;
        loff_t pos;
+       loff_t out_pos;
        ssize_t retval;
        int fl;
 
@@ -1077,12 +1078,14 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
        if (!(in.file->f_mode & FMODE_READ))
                goto fput_in;
        retval = -ESPIPE;
-       if (!ppos)
-               ppos = &in.file->f_pos;
-       else
+       if (!ppos) {
+               pos = in.file->f_pos;
+       } else {
+               pos = *ppos;
                if (!(in.file->f_mode & FMODE_PREAD))
                        goto fput_in;
-       retval = rw_verify_area(READ, in.file, ppos, count);
+       }
+       retval = rw_verify_area(READ, in.file, &pos, count);
        if (retval < 0)
                goto fput_in;
        count = retval;
@@ -1099,7 +1102,8 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
        retval = -EINVAL;
        in_inode = file_inode(in.file);
        out_inode = file_inode(out.file);
-       retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count);
+       out_pos = out.file->f_pos;
+       retval = rw_verify_area(WRITE, out.file, &out_pos, count);
        if (retval < 0)
                goto fput_out;
        count = retval;
@@ -1107,7 +1111,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
        if (!max)
                max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
 
-       pos = *ppos;
        if (unlikely(pos + count > max)) {
                retval = -EOVERFLOW;
                if (pos >= max)
@@ -1126,18 +1129,23 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos,
        if (in.file->f_flags & O_NONBLOCK)
                fl = SPLICE_F_NONBLOCK;
 #endif
-       retval = do_splice_direct(in.file, ppos, out.file, count, fl);
+       retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl);
 
        if (retval > 0) {
                add_rchar(current, retval);
                add_wchar(current, retval);
                fsnotify_access(in.file);
                fsnotify_modify(out.file);
+               out.file->f_pos = out_pos;
+               if (ppos)
+                       *ppos = pos;
+               else
+                       in.file->f_pos = pos;
        }
 
        inc_syscr(current);
        inc_syscw(current);
-       if (*ppos > max)
+       if (pos > max)
                retval = -EOVERFLOW;
 
 fput_out:
index e6b2559..9eca476 100644 (file)
@@ -1274,7 +1274,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
 {
        struct file *file = sd->u.file;
 
-       return do_splice_from(pipe, file, &file->f_pos, sd->total_len,
+       return do_splice_from(pipe, file, sd->opos, sd->total_len,
                              sd->flags);
 }
 
@@ -1294,7 +1294,7 @@ static int direct_splice_actor(struct pipe_inode_info *pipe,
  *
  */
 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
-                     size_t len, unsigned int flags)
+                     loff_t *opos, size_t len, unsigned int flags)
 {
        struct splice_desc sd = {
                .len            = len,
@@ -1302,6 +1302,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
                .flags          = flags,
                .pos            = *ppos,
                .u.file         = out,
+               .opos           = opos,
        };
        long ret;
 
@@ -1325,7 +1326,7 @@ static long do_splice(struct file *in, loff_t __user *off_in,
 {
        struct pipe_inode_info *ipipe;
        struct pipe_inode_info *opipe;
-       loff_t offset, *off;
+       loff_t offset;
        long ret;
 
        ipipe = get_pipe_info(in);
@@ -1356,13 +1357,15 @@ static long do_splice(struct file *in, loff_t __user *off_in,
                                return -EINVAL;
                        if (copy_from_user(&offset, off_out, sizeof(loff_t)))
                                return -EFAULT;
-                       off = &offset;
-               } else
-                       off = &out->f_pos;
+               } else {
+                       offset = out->f_pos;
+               }
 
-               ret = do_splice_from(ipipe, out, off, len, flags);
+               ret = do_splice_from(ipipe, out, &offset, len, flags);
 
-               if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
+               if (!off_out)
+                       out->f_pos = offset;
+               else if (copy_to_user(off_out, &offset, sizeof(loff_t)))
                        ret = -EFAULT;
 
                return ret;
@@ -1376,13 +1379,15 @@ static long do_splice(struct file *in, loff_t __user *off_in,
                                return -EINVAL;
                        if (copy_from_user(&offset, off_in, sizeof(loff_t)))
                                return -EFAULT;
-                       off = &offset;
-               } else
-                       off = &in->f_pos;
+               } else {
+                       offset = in->f_pos;
+               }
 
-               ret = do_splice_to(in, off, opipe, len, flags);
+               ret = do_splice_to(in, &offset, opipe, len, flags);
 
-               if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
+               if (!off_in)
+                       in->f_pos = offset;
+               else if (copy_to_user(off_in, &offset, sizeof(loff_t)))
                        ret = -EFAULT;
 
                return ret;
index 636c59f..c13c919 100644 (file)
@@ -382,6 +382,7 @@ const char *acpi_power_state_string(int state);
 int acpi_device_get_power(struct acpi_device *device, int *state);
 int acpi_device_set_power(struct acpi_device *device, int state);
 int acpi_bus_init_power(struct acpi_device *device);
+int acpi_device_fix_up_power(struct acpi_device *device);
 int acpi_bus_update_power(acpi_handle handle, int *state_p);
 bool acpi_bus_power_manageable(acpi_handle handle);
 
index 365f4a6..fc09d7b 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/sched.h>
 #include <linux/percpu.h>
+#include <linux/vtime.h>
 #include <asm/ptrace.h>
 
 struct context_tracking {
@@ -19,6 +20,26 @@ struct context_tracking {
        } state;
 };
 
+static inline void __guest_enter(void)
+{
+       /*
+        * This is running in ioctl context so we can avoid
+        * the call to vtime_account() with its unnecessary idle check.
+        */
+       vtime_account_system(current);
+       current->flags |= PF_VCPU;
+}
+
+static inline void __guest_exit(void)
+{
+       /*
+        * This is running in ioctl context so we can avoid
+        * the call to vtime_account() with its unnecessary idle check.
+        */
+       vtime_account_system(current);
+       current->flags &= ~PF_VCPU;
+}
+
 #ifdef CONFIG_CONTEXT_TRACKING
 DECLARE_PER_CPU(struct context_tracking, context_tracking);
 
@@ -35,6 +56,9 @@ static inline bool context_tracking_active(void)
 extern void user_enter(void);
 extern void user_exit(void);
 
+extern void guest_enter(void);
+extern void guest_exit(void);
+
 static inline enum ctx_state exception_enter(void)
 {
        enum ctx_state prev_ctx;
@@ -57,6 +81,17 @@ extern void context_tracking_task_switch(struct task_struct *prev,
 static inline bool context_tracking_in_user(void) { return false; }
 static inline void user_enter(void) { }
 static inline void user_exit(void) { }
+
+static inline void guest_enter(void)
+{
+       __guest_enter();
+}
+
+static inline void guest_exit(void)
+{
+       __guest_exit();
+}
+
 static inline enum ctx_state exception_enter(void) { return 0; }
 static inline void exception_exit(enum ctx_state prev_ctx) { }
 static inline void context_tracking_task_switch(struct task_struct *prev,
index 43db02e..65c2be2 100644 (file)
@@ -2414,8 +2414,6 @@ extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
                struct file *, loff_t *, size_t, unsigned int);
 extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
                struct file *out, loff_t *, size_t len, unsigned int flags);
-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
-               size_t len, unsigned int flags);
 
 extern void
 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
index f0eea07..8db53cf 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/ratelimit.h>
 #include <linux/err.h>
 #include <linux/irqflags.h>
+#include <linux/context_tracking.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -760,42 +761,6 @@ static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
 }
 #endif
 
-static inline void __guest_enter(void)
-{
-       /*
-        * This is running in ioctl context so we can avoid
-        * the call to vtime_account() with its unnecessary idle check.
-        */
-       vtime_account_system(current);
-       current->flags |= PF_VCPU;
-}
-
-static inline void __guest_exit(void)
-{
-       /*
-        * This is running in ioctl context so we can avoid
-        * the call to vtime_account() with its unnecessary idle check.
-        */
-       vtime_account_system(current);
-       current->flags &= ~PF_VCPU;
-}
-
-#ifdef CONFIG_CONTEXT_TRACKING
-extern void guest_enter(void);
-extern void guest_exit(void);
-
-#else /* !CONFIG_CONTEXT_TRACKING */
-static inline void guest_enter(void)
-{
-       __guest_enter();
-}
-
-static inline void guest_exit(void)
-{
-       __guest_exit();
-}
-#endif /* !CONFIG_CONTEXT_TRACKING */
-
 static inline void kvm_guest_enter(void)
 {
        unsigned long flags;
index f463a46..c5b6dbf 100644 (file)
@@ -389,8 +389,7 @@ struct perf_event {
        /* mmap bits */
        struct mutex                    mmap_mutex;
        atomic_t                        mmap_count;
-       int                             mmap_locked;
-       struct user_struct              *mmap_user;
+
        struct ring_buffer              *rb;
        struct list_head                rb_entry;
 
index 87a03c7..f5d4723 100644 (file)
@@ -33,9 +33,25 @@ do { \
                preempt_schedule(); \
 } while (0)
 
+#ifdef CONFIG_CONTEXT_TRACKING
+
+void preempt_schedule_context(void);
+
+#define preempt_check_resched_context() \
+do { \
+       if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+               preempt_schedule_context(); \
+} while (0)
+#else
+
+#define preempt_check_resched_context() preempt_check_resched()
+
+#endif /* CONFIG_CONTEXT_TRACKING */
+
 #else /* !CONFIG_PREEMPT */
 
 #define preempt_check_resched()                do { } while (0)
+#define preempt_check_resched_context()        do { } while (0)
 
 #endif /* CONFIG_PREEMPT */
 
@@ -88,7 +104,7 @@ do { \
 do { \
        preempt_enable_no_resched_notrace(); \
        barrier(); \
-       preempt_check_resched(); \
+       preempt_check_resched_context(); \
 } while (0)
 
 #else /* !CONFIG_PREEMPT_COUNT */
index 09a545a..74575cb 100644 (file)
@@ -35,6 +35,7 @@ struct splice_desc {
                void *data;             /* cookie */
        } u;
        loff_t pos;                     /* file position */
+       loff_t *opos;                   /* sendfile: output position */
        size_t num_spliced;             /* number of bytes already spliced */
        bool need_wakeup;               /* need to wake up writer */
 };
index 71a5782..b1dd2db 100644 (file)
@@ -34,7 +34,7 @@ static inline void vtime_user_exit(struct task_struct *tsk)
 }
 extern void vtime_guest_enter(struct task_struct *tsk);
 extern void vtime_guest_exit(struct task_struct *tsk);
-extern void vtime_init_idle(struct task_struct *tsk);
+extern void vtime_init_idle(struct task_struct *tsk, int cpu);
 #else
 static inline void vtime_account_irq_exit(struct task_struct *tsk)
 {
@@ -45,7 +45,7 @@ static inline void vtime_user_enter(struct task_struct *tsk) { }
 static inline void vtime_user_exit(struct task_struct *tsk) { }
 static inline void vtime_guest_enter(struct task_struct *tsk) { }
 static inline void vtime_guest_exit(struct task_struct *tsk) { }
-static inline void vtime_init_idle(struct task_struct *tsk) { }
+static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
 #endif
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
index d3eef01..0f4555b 100644 (file)
@@ -110,6 +110,8 @@ int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
                  struct v4l2_buffer *buf);
 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
                   struct v4l2_buffer *buf);
+int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+                        struct v4l2_create_buffers *create);
 
 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
                   struct v4l2_exportbuffer *eb);
index 65349f0..383f823 100644 (file)
@@ -15,7 +15,6 @@
  */
 
 #include <linux/context_tracking.h>
-#include <linux/kvm_host.h>
 #include <linux/rcupdate.h>
 #include <linux/sched.h>
 #include <linux/hardirq.h>
@@ -71,6 +70,46 @@ void user_enter(void)
        local_irq_restore(flags);
 }
 
+#ifdef CONFIG_PREEMPT
+/**
+ * preempt_schedule_context - preempt_schedule called by tracing
+ *
+ * The tracing infrastructure uses preempt_enable_notrace to prevent
+ * recursion and tracing preempt enabling caused by the tracing
+ * infrastructure itself. But as tracing can happen in areas coming
+ * from userspace or just about to enter userspace, a preempt enable
+ * can occur before user_exit() is called. This will cause the scheduler
+ * to be called when the system is still in usermode.
+ *
+ * To prevent this, the preempt_enable_notrace will use this function
+ * instead of preempt_schedule() to exit user context if needed before
+ * calling the scheduler.
+ */
+void __sched notrace preempt_schedule_context(void)
+{
+       struct thread_info *ti = current_thread_info();
+       enum ctx_state prev_ctx;
+
+       if (likely(ti->preempt_count || irqs_disabled()))
+               return;
+
+       /*
+        * Need to disable preemption in case user_exit() is traced
+        * and the tracer calls preempt_enable_notrace() causing
+        * an infinite recursion.
+        */
+       preempt_disable_notrace();
+       prev_ctx = exception_enter();
+       preempt_enable_no_resched_notrace();
+
+       preempt_schedule();
+
+       preempt_disable_notrace();
+       exception_exit(prev_ctx);
+       preempt_enable_notrace();
+}
+EXPORT_SYMBOL_GPL(preempt_schedule_context);
+#endif /* CONFIG_PREEMPT */
 
 /**
  * user_exit - Inform the context tracking that the CPU is
index d5585f5..e695c0a 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/cpu.h>
 #include <linux/tick.h>
 #include <linux/mm.h>
+#include <linux/stackprotector.h>
 
 #include <asm/tlb.h>
 
@@ -58,6 +59,7 @@ void __weak arch_cpu_idle_dead(void) { }
 void __weak arch_cpu_idle(void)
 {
        cpu_idle_force_poll = 1;
+       local_irq_enable();
 }
 
 /*
@@ -112,6 +114,21 @@ static void cpu_idle_loop(void)
 
 void cpu_startup_entry(enum cpuhp_state state)
 {
+       /*
+        * This #ifdef needs to die, but it's too late in the cycle to
+        * make this generic (arm and sh have never invoked the canary
+        * init for the non boot cpus!). Will be fixed in 3.11
+        */
+#ifdef CONFIG_X86
+       /*
+        * If we're the non-boot CPU, nothing set the stack canary up
+        * for us. The boot CPU already has it initialized but no harm
+        * in doing it again. This is a good place for updating it, as
+        * we wont ever return from this function (so the invalid
+        * canaries already on the stack wont ever trigger).
+        */
+       boot_init_stack_canary();
+#endif
        current_set_polling();
        arch_cpu_idle_prepare();
        cpu_idle_loop();
index 9dc297f..b391907 100644 (file)
@@ -196,9 +196,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
 static void update_context_time(struct perf_event_context *ctx);
 static u64 perf_event_time(struct perf_event *event);
 
-static void ring_buffer_attach(struct perf_event *event,
-                              struct ring_buffer *rb);
-
 void __weak perf_event_print_debug(void)       { }
 
 extern __weak const char *perf_pmu_name(void)
@@ -2918,6 +2915,7 @@ static void free_event_rcu(struct rcu_head *head)
 }
 
 static void ring_buffer_put(struct ring_buffer *rb);
+static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
 
 static void free_event(struct perf_event *event)
 {
@@ -2942,15 +2940,30 @@ static void free_event(struct perf_event *event)
                if (has_branch_stack(event)) {
                        static_key_slow_dec_deferred(&perf_sched_events);
                        /* is system-wide event */
-                       if (!(event->attach_state & PERF_ATTACH_TASK))
+                       if (!(event->attach_state & PERF_ATTACH_TASK)) {
                                atomic_dec(&per_cpu(perf_branch_stack_events,
                                                    event->cpu));
+                       }
                }
        }
 
        if (event->rb) {
-               ring_buffer_put(event->rb);
-               event->rb = NULL;
+               struct ring_buffer *rb;
+
+               /*
+                * Can happen when we close an event with re-directed output.
+                *
+                * Since we have a 0 refcount, perf_mmap_close() will skip
+                * over us; possibly making our ring_buffer_put() the last.
+                */
+               mutex_lock(&event->mmap_mutex);
+               rb = event->rb;
+               if (rb) {
+                       rcu_assign_pointer(event->rb, NULL);
+                       ring_buffer_detach(event, rb);
+                       ring_buffer_put(rb); /* could be last */
+               }
+               mutex_unlock(&event->mmap_mutex);
        }
 
        if (is_cgroup_event(event))
@@ -3188,30 +3201,13 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
        unsigned int events = POLL_HUP;
 
        /*
-        * Race between perf_event_set_output() and perf_poll(): perf_poll()
-        * grabs the rb reference but perf_event_set_output() overrides it.
-        * Here is the timeline for two threads T1, T2:
-        * t0: T1, rb = rcu_dereference(event->rb)
-        * t1: T2, old_rb = event->rb
-        * t2: T2, event->rb = new rb
-        * t3: T2, ring_buffer_detach(old_rb)
-        * t4: T1, ring_buffer_attach(rb1)
-        * t5: T1, poll_wait(event->waitq)
-        *
-        * To avoid this problem, we grab mmap_mutex in perf_poll()
-        * thereby ensuring that the assignment of the new ring buffer
-        * and the detachment of the old buffer appear atomic to perf_poll()
+        * Pin the event->rb by taking event->mmap_mutex; otherwise
+        * perf_event_set_output() can swizzle our rb and make us miss wakeups.
         */
        mutex_lock(&event->mmap_mutex);
-
-       rcu_read_lock();
-       rb = rcu_dereference(event->rb);
-       if (rb) {
-               ring_buffer_attach(event, rb);
+       rb = event->rb;
+       if (rb)
                events = atomic_xchg(&rb->poll, 0);
-       }
-       rcu_read_unlock();
-
        mutex_unlock(&event->mmap_mutex);
 
        poll_wait(file, &event->waitq, wait);
@@ -3521,16 +3517,12 @@ static void ring_buffer_attach(struct perf_event *event,
                return;
 
        spin_lock_irqsave(&rb->event_lock, flags);
-       if (!list_empty(&event->rb_entry))
-               goto unlock;
-
-       list_add(&event->rb_entry, &rb->event_list);
-unlock:
+       if (list_empty(&event->rb_entry))
+               list_add(&event->rb_entry, &rb->event_list);
        spin_unlock_irqrestore(&rb->event_lock, flags);
 }
 
-static void ring_buffer_detach(struct perf_event *event,
-                              struct ring_buffer *rb)
+static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
 {
        unsigned long flags;
 
@@ -3549,13 +3541,10 @@ static void ring_buffer_wakeup(struct perf_event *event)
 
        rcu_read_lock();
        rb = rcu_dereference(event->rb);
-       if (!rb)
-               goto unlock;
-
-       list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
-               wake_up_all(&event->waitq);
-
-unlock:
+       if (rb) {
+               list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+                       wake_up_all(&event->waitq);
+       }
        rcu_read_unlock();
 }
 
@@ -3584,18 +3573,10 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event)
 
 static void ring_buffer_put(struct ring_buffer *rb)
 {
-       struct perf_event *event, *n;
-       unsigned long flags;
-
        if (!atomic_dec_and_test(&rb->refcount))
                return;
 
-       spin_lock_irqsave(&rb->event_lock, flags);
-       list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
-               list_del_init(&event->rb_entry);
-               wake_up_all(&event->waitq);
-       }
-       spin_unlock_irqrestore(&rb->event_lock, flags);
+       WARN_ON_ONCE(!list_empty(&rb->event_list));
 
        call_rcu(&rb->rcu_head, rb_free_rcu);
 }
@@ -3605,26 +3586,100 @@ static void perf_mmap_open(struct vm_area_struct *vma)
        struct perf_event *event = vma->vm_file->private_data;
 
        atomic_inc(&event->mmap_count);
+       atomic_inc(&event->rb->mmap_count);
 }
 
+/*
+ * A buffer can be mmap()ed multiple times; either directly through the same
+ * event, or through other events by use of perf_event_set_output().
+ *
+ * In order to undo the VM accounting done by perf_mmap() we need to destroy
+ * the buffer here, where we still have a VM context. This means we need
+ * to detach all events redirecting to us.
+ */
 static void perf_mmap_close(struct vm_area_struct *vma)
 {
        struct perf_event *event = vma->vm_file->private_data;
 
-       if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
-               unsigned long size = perf_data_size(event->rb);
-               struct user_struct *user = event->mmap_user;
-               struct ring_buffer *rb = event->rb;
+       struct ring_buffer *rb = event->rb;
+       struct user_struct *mmap_user = rb->mmap_user;
+       int mmap_locked = rb->mmap_locked;
+       unsigned long size = perf_data_size(rb);
+
+       atomic_dec(&rb->mmap_count);
+
+       if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
+               return;
 
-               atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
-               vma->vm_mm->pinned_vm -= event->mmap_locked;
-               rcu_assign_pointer(event->rb, NULL);
-               ring_buffer_detach(event, rb);
+       /* Detach current event from the buffer. */
+       rcu_assign_pointer(event->rb, NULL);
+       ring_buffer_detach(event, rb);
+       mutex_unlock(&event->mmap_mutex);
+
+       /* If there's still other mmap()s of this buffer, we're done. */
+       if (atomic_read(&rb->mmap_count)) {
+               ring_buffer_put(rb); /* can't be last */
+               return;
+       }
+
+       /*
+        * No other mmap()s, detach from all other events that might redirect
+        * into the now unreachable buffer. Somewhat complicated by the
+        * fact that rb::event_lock otherwise nests inside mmap_mutex.
+        */
+again:
+       rcu_read_lock();
+       list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
+               if (!atomic_long_inc_not_zero(&event->refcount)) {
+                       /*
+                        * This event is en-route to free_event() which will
+                        * detach it and remove it from the list.
+                        */
+                       continue;
+               }
+               rcu_read_unlock();
+
+               mutex_lock(&event->mmap_mutex);
+               /*
+                * Check we didn't race with perf_event_set_output() which can
+                * swizzle the rb from under us while we were waiting to
+                * acquire mmap_mutex.
+                *
+                * If we find a different rb; ignore this event, a next
+                * iteration will no longer find it on the list. We have to
+                * still restart the iteration to make sure we're not now
+                * iterating the wrong list.
+                */
+               if (event->rb == rb) {
+                       rcu_assign_pointer(event->rb, NULL);
+                       ring_buffer_detach(event, rb);
+                       ring_buffer_put(rb); /* can't be last, we still have one */
+               }
                mutex_unlock(&event->mmap_mutex);
+               put_event(event);
 
-               ring_buffer_put(rb);
-               free_uid(user);
+               /*
+                * Restart the iteration; either we're on the wrong list or
+                * destroyed its integrity by doing a deletion.
+                */
+               goto again;
        }
+       rcu_read_unlock();
+
+       /*
+        * It could be there's still a few 0-ref events on the list; they'll
+        * get cleaned up by free_event() -- they'll also still have their
+        * ref on the rb and will free it whenever they are done with it.
+        *
+        * Aside from that, this buffer is 'fully' detached and unmapped,
+        * undo the VM accounting.
+        */
+
+       atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
+       vma->vm_mm->pinned_vm -= mmap_locked;
+       free_uid(mmap_user);
+
+       ring_buffer_put(rb); /* could be last */
 }
 
 static const struct vm_operations_struct perf_mmap_vmops = {
@@ -3674,12 +3729,24 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
                return -EINVAL;
 
        WARN_ON_ONCE(event->ctx->parent_ctx);
+again:
        mutex_lock(&event->mmap_mutex);
        if (event->rb) {
-               if (event->rb->nr_pages == nr_pages)
-                       atomic_inc(&event->rb->refcount);
-               else
+               if (event->rb->nr_pages != nr_pages) {
                        ret = -EINVAL;
+                       goto unlock;
+               }
+
+               if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
+                       /*
+                        * Raced against perf_mmap_close() through
+                        * perf_event_set_output(). Try again, hope for better
+                        * luck.
+                        */
+                       mutex_unlock(&event->mmap_mutex);
+                       goto again;
+               }
+
                goto unlock;
        }
 
@@ -3720,12 +3787,16 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
                ret = -ENOMEM;
                goto unlock;
        }
-       rcu_assign_pointer(event->rb, rb);
+
+       atomic_set(&rb->mmap_count, 1);
+       rb->mmap_locked = extra;
+       rb->mmap_user = get_current_user();
 
        atomic_long_add(user_extra, &user->locked_vm);
-       event->mmap_locked = extra;
-       event->mmap_user = get_current_user();
-       vma->vm_mm->pinned_vm += event->mmap_locked;
+       vma->vm_mm->pinned_vm += extra;
+
+       ring_buffer_attach(event, rb);
+       rcu_assign_pointer(event->rb, rb);
 
        perf_event_update_userpage(event);
 
@@ -3734,7 +3805,11 @@ unlock:
                atomic_inc(&event->mmap_count);
        mutex_unlock(&event->mmap_mutex);
 
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       /*
+        * Since pinned accounting is per vm we cannot allow fork() to copy our
+        * vma.
+        */
+       vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_ops = &perf_mmap_vmops;
 
        return ret;
@@ -6412,6 +6487,8 @@ set:
        if (atomic_read(&event->mmap_count))
                goto unlock;
 
+       old_rb = event->rb;
+
        if (output_event) {
                /* get the rb we want to redirect to */
                rb = ring_buffer_get(output_event);
@@ -6419,16 +6496,28 @@ set:
                        goto unlock;
        }
 
-       old_rb = event->rb;
-       rcu_assign_pointer(event->rb, rb);
        if (old_rb)
                ring_buffer_detach(event, old_rb);
+
+       if (rb)
+               ring_buffer_attach(event, rb);
+
+       rcu_assign_pointer(event->rb, rb);
+
+       if (old_rb) {
+               ring_buffer_put(old_rb);
+               /*
+                * Since we detached before setting the new rb, so that we
+                * could attach the new rb, we could have missed a wakeup.
+                * Provide it now.
+                */
+               wake_up_all(&event->waitq);
+       }
+
        ret = 0;
 unlock:
        mutex_unlock(&event->mmap_mutex);
 
-       if (old_rb)
-               ring_buffer_put(old_rb);
 out:
        return ret;
 }
index eb675c4..ca65997 100644 (file)
@@ -31,6 +31,10 @@ struct ring_buffer {
        spinlock_t                      event_lock;
        struct list_head                event_list;
 
+       atomic_t                        mmap_count;
+       unsigned long                   mmap_locked;
+       struct user_struct              *mmap_user;
+
        struct perf_event_mmap_page     *user_page;
        void                            *data_pages[0];
 };
index 3fed7f0..bddf3b2 100644 (file)
@@ -467,6 +467,7 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
 /* Optimization staging list, protected by kprobe_mutex */
 static LIST_HEAD(optimizing_list);
 static LIST_HEAD(unoptimizing_list);
+static LIST_HEAD(freeing_list);
 
 static void kprobe_optimizer(struct work_struct *work);
 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
@@ -504,7 +505,7 @@ static __kprobes void do_optimize_kprobes(void)
  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
  * if need) kprobes listed on unoptimizing_list.
  */
-static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
+static __kprobes void do_unoptimize_kprobes(void)
 {
        struct optimized_kprobe *op, *tmp;
 
@@ -515,9 +516,9 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
        /* Ditto to do_optimize_kprobes */
        get_online_cpus();
        mutex_lock(&text_mutex);
-       arch_unoptimize_kprobes(&unoptimizing_list, free_list);
+       arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
        /* Loop free_list for disarming */
-       list_for_each_entry_safe(op, tmp, free_list, list) {
+       list_for_each_entry_safe(op, tmp, &freeing_list, list) {
                /* Disarm probes if marked disabled */
                if (kprobe_disabled(&op->kp))
                        arch_disarm_kprobe(&op->kp);
@@ -536,11 +537,11 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
 }
 
 /* Reclaim all kprobes on the free_list */
-static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
+static __kprobes void do_free_cleaned_kprobes(void)
 {
        struct optimized_kprobe *op, *tmp;
 
-       list_for_each_entry_safe(op, tmp, free_list, list) {
+       list_for_each_entry_safe(op, tmp, &freeing_list, list) {
                BUG_ON(!kprobe_unused(&op->kp));
                list_del_init(&op->list);
                free_aggr_kprobe(&op->kp);
@@ -556,8 +557,6 @@ static __kprobes void kick_kprobe_optimizer(void)
 /* Kprobe jump optimizer */
 static __kprobes void kprobe_optimizer(struct work_struct *work)
 {
-       LIST_HEAD(free_list);
-
        mutex_lock(&kprobe_mutex);
        /* Lock modules while optimizing kprobes */
        mutex_lock(&module_mutex);
@@ -566,7 +565,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
         * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
         * kprobes before waiting for quiesence period.
         */
-       do_unoptimize_kprobes(&free_list);
+       do_unoptimize_kprobes();
 
        /*
         * Step 2: Wait for quiesence period to ensure all running interrupts
@@ -581,7 +580,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
        do_optimize_kprobes();
 
        /* Step 4: Free cleaned kprobes after quiesence period */
-       do_free_cleaned_kprobes(&free_list);
+       do_free_cleaned_kprobes();
 
        mutex_unlock(&module_mutex);
        mutex_unlock(&kprobe_mutex);
@@ -723,8 +722,19 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p)
        if (!list_empty(&op->list))
                /* Dequeue from the (un)optimization queue */
                list_del_init(&op->list);
-
        op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+
+       if (kprobe_unused(p)) {
+               /* Enqueue if it is unused */
+               list_add(&op->list, &freeing_list);
+               /*
+                * Remove unused probes from the hash list. After waiting
+                * for synchronization, this probe is reclaimed.
+                * (reclaiming is done by do_free_cleaned_kprobes().)
+                */
+               hlist_del_rcu(&op->kp.hlist);
+       }
+
        /* Don't touch the code, because it is already freed. */
        arch_remove_optimized_kprobe(op);
 }
index eb911db..322ea8e 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/sort.h>
-
+#include <linux/string.h>
 #include <linux/range.h>
 
 int add_range(struct range *range, int az, int nr_range, u64 start, u64 end)
@@ -32,9 +32,8 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
        if (start >= end)
                return nr_range;
 
-       /* Try to merge it with old one: */
+       /* get new start/end: */
        for (i = 0; i < nr_range; i++) {
-               u64 final_start, final_end;
                u64 common_start, common_end;
 
                if (!range[i].end)
@@ -45,14 +44,16 @@ int add_range_with_merge(struct range *range, int az, int nr_range,
                if (common_start > common_end)
                        continue;
 
-               final_start = min(range[i].start, start);
-               final_end = max(range[i].end, end);
+               /* new start/end, will add it back at last */
+               start = min(range[i].start, start);
+               end = max(range[i].end, end);
 
-               /* clear it and add it back for further merge */
-               range[i].start = 0;
-               range[i].end =  0;
-               return add_range_with_merge(range, az, nr_range,
-                       final_start, final_end);
+               memmove(&range[i], &range[i + 1],
+                       (nr_range - (i + 1)) * sizeof(range[i]));
+               range[nr_range - 1].start = 0;
+               range[nr_range - 1].end   = 0;
+               nr_range--;
+               i--;
        }
 
        /* Need to add it: */
index 58453b8..e8b3350 100644 (file)
@@ -633,7 +633,19 @@ void wake_up_nohz_cpu(int cpu)
 static inline bool got_nohz_idle_kick(void)
 {
        int cpu = smp_processor_id();
-       return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+
+       if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+               return false;
+
+       if (idle_cpu(cpu) && !need_resched())
+               return true;
+
+       /*
+        * We can't run Idle Load Balance on this CPU for this time so we
+        * cancel it and clear NOHZ_BALANCE_KICK
+        */
+       clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+       return false;
 }
 
 #else /* CONFIG_NO_HZ_COMMON */
@@ -1393,8 +1405,9 @@ static void sched_ttwu_pending(void)
 
 void scheduler_ipi(void)
 {
-       if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()
-           && !tick_nohz_full_cpu(smp_processor_id()))
+       if (llist_empty(&this_rq()->wake_list)
+                       && !tick_nohz_full_cpu(smp_processor_id())
+                       && !got_nohz_idle_kick())
                return;
 
        /*
@@ -1417,7 +1430,7 @@ void scheduler_ipi(void)
        /*
         * Check if someone kicked us for doing the nohz idle load balance.
         */
-       if (unlikely(got_nohz_idle_kick() && !need_resched())) {
+       if (unlikely(got_nohz_idle_kick())) {
                this_rq()->idle_balance = 1;
                raise_softirq_irqoff(SCHED_SOFTIRQ);
        }
@@ -4745,7 +4758,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
         */
        idle->sched_class = &idle_sched_class;
        ftrace_graph_init_idle_task(idle, cpu);
-       vtime_init_idle(idle);
+       vtime_init_idle(idle, cpu);
 #if defined(CONFIG_SMP)
        sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
 #endif
index cc2dc3e..b5ccba2 100644 (file)
@@ -747,17 +747,17 @@ void arch_vtime_task_switch(struct task_struct *prev)
 
        write_seqlock(&current->vtime_seqlock);
        current->vtime_snap_whence = VTIME_SYS;
-       current->vtime_snap = sched_clock();
+       current->vtime_snap = sched_clock_cpu(smp_processor_id());
        write_sequnlock(&current->vtime_seqlock);
 }
 
-void vtime_init_idle(struct task_struct *t)
+void vtime_init_idle(struct task_struct *t, int cpu)
 {
        unsigned long flags;
 
        write_seqlock_irqsave(&t->vtime_seqlock, flags);
        t->vtime_snap_whence = VTIME_SYS;
-       t->vtime_snap = sched_clock();
+       t->vtime_snap = sched_clock_cpu(cpu);
        write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
 }
 
index 0c73942..b4c2455 100644 (file)
@@ -698,10 +698,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 
                bc->event_handler = tick_handle_oneshot_broadcast;
 
-               /* Take the do_timer update */
-               if (!tick_nohz_full_cpu(cpu))
-                       tick_do_timer_cpu = cpu;
-
                /*
                 * We must be careful here. There might be other CPUs
                 * waiting for periodic broadcast. We need to set the
index f420813..0cf1c14 100644 (file)
@@ -306,7 +306,7 @@ static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb,
                 * we can't safely shutdown that CPU.
                 */
                if (have_nohz_full_mask && tick_do_timer_cpu == cpu)
-                       return -EINVAL;
+                       return NOTIFY_BAD;
                break;
        }
        return NOTIFY_OK;
index ff3218a..2d41450 100644 (file)
@@ -373,8 +373,10 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
 {
        int index;
 
-       if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
+       if (size > KMALLOC_MAX_SIZE) {
+               WARN_ON_ONCE(!(flags & __GFP_NOWARN));
                return NULL;
+       }
 
        if (size <= 192) {
                if (!size)
index bd8d46c..cccaf9c 100644 (file)
@@ -58,6 +58,7 @@ enum {
        CS420X_GPIO_23,
        CS420X_MBP101,
        CS420X_MBP81,
+       CS420X_MBA42,
        CS420X_AUTO,
        /* aliases */
        CS420X_IMAC27_122 = CS420X_GPIO_23,
@@ -346,6 +347,7 @@ static const struct hda_model_fixup cs420x_models[] = {
        { .id = CS420X_APPLE, .name = "apple" },
        { .id = CS420X_MBP101, .name = "mbp101" },
        { .id = CS420X_MBP81, .name = "mbp81" },
+       { .id = CS420X_MBA42, .name = "mba42" },
        {}
 };
 
@@ -361,6 +363,7 @@ static const struct snd_pci_quirk cs420x_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
        SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
        SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
+       SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
        SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
        {} /* terminator */
 };
@@ -414,6 +417,20 @@ static const struct hda_pintbl mbp101_pincfgs[] = {
        {} /* terminator */
 };
 
+static const struct hda_pintbl mba42_pincfgs[] = {
+       { 0x09, 0x012b4030 }, /* HP */
+       { 0x0a, 0x400000f0 },
+       { 0x0b, 0x90100120 }, /* speaker */
+       { 0x0c, 0x400000f0 },
+       { 0x0d, 0x90a00110 }, /* mic */
+       { 0x0e, 0x400000f0 },
+       { 0x0f, 0x400000f0 },
+       { 0x10, 0x400000f0 },
+       { 0x12, 0x400000f0 },
+       { 0x15, 0x400000f0 },
+       {} /* terminator */
+};
+
 static void cs420x_fixup_gpio_13(struct hda_codec *codec,
                                 const struct hda_fixup *fix, int action)
 {
@@ -482,6 +499,12 @@ static const struct hda_fixup cs420x_fixups[] = {
                .chained = true,
                .chain_id = CS420X_GPIO_13,
        },
+       [CS420X_MBA42] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = mba42_pincfgs,
+               .chained = true,
+               .chain_id = CS420X_GPIO_13,
+       },
 };
 
 static struct cs_spec *cs_alloc_spec(struct hda_codec *codec, int vendor_nid)
index 02e22b4..403010c 100644 (file)
@@ -3483,6 +3483,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x05e0, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -3494,6 +3495,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05f8, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
        SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
@@ -3596,6 +3599,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC269_FIXUP_INV_DMIC, .name = "inv-dmic"},
        {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
        {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
+       {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
+       {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
        {}
 };
 
@@ -4275,6 +4280,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
        {.id = ALC662_FIXUP_ASUS_MODE7, .name = "asus-mode7"},
        {.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"},
        {.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"},
+       {.id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
        {}
 };
 
index 1a03317..64952e2 100644 (file)
@@ -147,14 +147,32 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
                return -EINVAL;
        }
 
+       alts = &iface->altsetting[0];
+       altsd = get_iface_desc(alts);
+
+       /*
+        * Android with both accessory and audio interfaces enabled gets the
+        * interface numbers wrong.
+        */
+       if ((chip->usb_id == USB_ID(0x18d1, 0x2d04) ||
+            chip->usb_id == USB_ID(0x18d1, 0x2d05)) &&
+           interface == 0 &&
+           altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
+           altsd->bInterfaceSubClass == USB_SUBCLASS_VENDOR_SPEC) {
+               interface = 2;
+               iface = usb_ifnum_to_if(dev, interface);
+               if (!iface)
+                       return -EINVAL;
+               alts = &iface->altsetting[0];
+               altsd = get_iface_desc(alts);
+       }
+
        if (usb_interface_claimed(iface)) {
                snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n",
                                                dev->devnum, ctrlif, interface);
                return -EINVAL;
        }
 
-       alts = &iface->altsetting[0];
-       altsd = get_iface_desc(alts);
        if ((altsd->bInterfaceClass == USB_CLASS_AUDIO ||
             altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) &&
            altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) {
index e5c7f9f..d543808 100644 (file)
@@ -885,6 +885,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
 
        case USB_ID(0x046d, 0x0808):
        case USB_ID(0x046d, 0x0809):
+       case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
        case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
        case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
        case USB_ID(0x046d, 0x0991):