Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 27 Jan 2017 20:54:16 +0000 (12:54 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 27 Jan 2017 20:54:16 +0000 (12:54 -0800)
Pull networking fixes from David Miller:

 1) GTP fixes from Andreas Schultz (missing genl module alias, clear IP
    DF on transmit).

 2) Netfilter needs to reflect the fwmark when sending resets, from Pau
    Espin Pedrol.

 3) nftable dump OOPS fix from Liping Zhang.

 4) Fix erroneous setting of VIRTIO_NET_HDR_F_DATA_VALID on transmit,
    from Rolf Neugebauer.

 5) Fix build error of ipt_CLUSTERIP when procfs is disabled, from Arnd
    Bergmann.

 6) Fix regression in handling of NETIF_F_SG in harmonize_features(),
    from Eric Dumazet.

 7) Fix RTNL deadlock wrt. lwtunnel module loading, from David Ahern.

 8) tcp_fastopen_create_child() needs to setup tp->max_window, from
    Alexey Kodanev.

 9) Missing kmemdup() failure check in ipv6 segment routing code, from
    Eric Dumazet.

10) Don't execute unix_bind() under the bindlock, otherwise we deadlock
    with splice. From WANG Cong.

11) ip6_tnl_parse_tlv_enc_lim() potentially reallocates the skb buffer,
    therefore callers must reload cached header pointers into that skb.
    Fix from Eric Dumazet.

12) Fix various bugs in legacy IRQ fallback handling in alx driver, from
    Tobias Regnery.

13) Do not allow lwtunnel drivers to be unloaded while they are
    referenced by active instances, from Robert Shearman.

14) Fix truncated PHY LED trigger names, from Geert Uytterhoeven.

15) Fix a few regressions from virtio_net XDP support, from John
    Fastabend and Jakub Kicinski.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (102 commits)
  ISDN: eicon: silence misleading array-bounds warning
  net: phy: micrel: add support for KSZ8795
  gtp: fix cross netns recv on gtp socket
  gtp: clear DF bit on GTP packet tx
  gtp: add genl family modules alias
  tcp: don't annotate mark on control socket from tcp_v6_send_response()
  ravb: unmap descriptors when freeing rings
  virtio_net: reject XDP programs using header adjustment
  virtio_net: use dev_kfree_skb for small buffer XDP receive
  r8152: check rx after napi is enabled
  r8152: re-schedule napi for tx
  r8152: avoid start_xmit to schedule napi when napi is disabled
  r8152: avoid start_xmit to call napi_schedule during autosuspend
  net: dsa: Bring back device detaching in dsa_slave_suspend()
  net: phy: leds: Fix truncated LED trigger names
  net: phy: leds: Break dependency of phy.h on phy_led_triggers.h
  net: phy: leds: Clear phy_num_led_triggers on failure to avoid crash
  net-next: ethernet: mediatek: change the compatible string
  Documentation: devicetree: change the mediatek ethernet compatible string
  bnxt_en: Fix RTNL lock usage on bnxt_get_port_module_status().
  ...

374 files changed:
Documentation/devicetree/bindings/spi/sh-msiof.txt
Documentation/filesystems/proc.txt
Documentation/power/states.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/include/asm/cache.h
arch/arc/include/asm/entry-arcv2.h
arch/arc/include/asm/module.h
arch/arc/include/asm/ptrace.h
arch/arc/include/asm/setup.h
arch/arc/kernel/intc-arcv2.c
arch/arc/kernel/intc-compact.c
arch/arc/kernel/mcip.c
arch/arc/kernel/module.c
arch/arc/mm/cache.c
arch/arc/mm/init.c
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-icev2.dts
arch/arm/boot/dts/bcm-nsp.dtsi
arch/arm/boot/dts/da850-evm.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra72-evm-revc.dts
arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/qcom-mdm9615.dtsi
arch/arm/boot/dts/sun6i-a31-hummingbird.dts
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/s3c2410_defconfig
arch/arm/include/asm/cputype.h
arch/arm/include/asm/ftrace.h
arch/arm/include/asm/virt.h
arch/arm/include/uapi/asm/types.h [moved from arch/arm/include/asm/types.h with 94% similarity]
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/smp_tlb.c
arch/arm/kvm/arm.c
arch/arm/mach-omap1/dma.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mach-ux500/pm.c
arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
arch/arm64/boot/dts/exynos/exynos5433.dtsi
arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
arch/arm64/boot/dts/xilinx/zynqmp.dtsi
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/virt.h
arch/arm64/include/uapi/asm/ptrace.h
arch/arm64/kernel/entry.S
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/traps.c
arch/arm64/mm/init.c
arch/frv/include/asm/atomic.h
arch/mn10300/include/asm/switch_to.h
arch/powerpc/include/asm/book3s/64/hash-4k.h
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/include/asm/hugetlb.h
arch/powerpc/include/asm/nohash/pgtable.h
arch/powerpc/include/asm/page.h
arch/powerpc/include/asm/perf_event_server.h
arch/powerpc/include/asm/pgtable-be-types.h
arch/powerpc/include/asm/pgtable-types.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/kernel/eeh.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugetlbpage-hash64.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/init-common.c
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/power9-events-list.h
arch/powerpc/perf/power9-pmu.c
arch/powerpc/sysdev/xics/icp-opal.c
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/defconfig
arch/s390/include/asm/ctl_reg.h
arch/s390/kernel/ptrace.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/pgtable.c
arch/tile/kernel/ptrace.c
arch/x86/events/amd/ibs.c
arch/x86/events/intel/core.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kvm/x86.c
arch/x86/pci/acpi.c
block/blk-mq.c
drivers/acpi/acpica/tbdata.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/sleep.c
drivers/acpi/video_detect.c
drivers/base/memory.c
drivers/block/nbd.c
drivers/block/xen-blkfront.c
drivers/char/virtio_console.c
drivers/clk/samsung/clk-exynos5420.c
drivers/clocksource/exynos_mct.c
drivers/cpufreq/intel_pstate.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/si_dpm.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
drivers/gpu/drm/ast/ast_drv.h
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_post.c
drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
drivers/gpu/drm/cirrus/Kconfig
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_probe_helper.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/i915/gvt/aperture_gm.c
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/handlers.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/mmio.c
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/gvt/reg.h
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_gem.c
drivers/gpu/drm/vc4/vc4_render_cl.c
drivers/gpu/drm/virtio/virtgpu_fb.c
drivers/hid/hid-corsair.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/umem.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/qedr/qedr.h
drivers/infiniband/hw/qedr/qedr_cm.c
drivers/infiniband/hw/qedr/verbs.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/media/cec/cec-adap.c
drivers/media/dvb-core/dvb_net.c
drivers/media/i2c/Kconfig
drivers/media/i2c/smiapp/smiapp-core.c
drivers/media/i2c/tvp5150.c
drivers/media/i2c/tvp5150_reg.h
drivers/media/pci/cobalt/cobalt-driver.c
drivers/media/pci/cobalt/cobalt-driver.h
drivers/media/usb/dvb-usb/pctv452e.c
drivers/memstick/core/memstick.c
drivers/mmc/host/dw_mmc.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pmem.c
drivers/nvme/host/fc.c
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/nvme/target/fc.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/rdma.c
drivers/pci/host/pci-xgene-msi.c
drivers/pci/host/pcie-designware.c
drivers/pci/probe.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-broxton.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/meson/pinctrl-meson-gxbb.c
drivers/pinctrl/meson/pinctrl-meson-gxl.c
drivers/pinctrl/pinctrl-amd.c
drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel_mid_powerbtn.c
drivers/platform/x86/mlx-platform.c
drivers/platform/x86/surface3-wmi.c
drivers/s390/virtio/virtio_ccw.c
drivers/scsi/bfa/bfad_bsg.c
drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_nx2.c
drivers/scsi/qla2xxx/qla_nx2.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/qla_tmpl.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/qla2xxx/tcm_qla2xxx.h
drivers/scsi/sd.c
drivers/scsi/ses.c
drivers/soc/ti/wkup_m3_ipc.c
drivers/spi/Kconfig
drivers/spi/spi-armada-3700.c
drivers/spi/spi-axi-spi-engine.c
drivers/spi/spi-davinci.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-dw.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-sh-msiof.c
drivers/thermal/rockchip_thermal.c
drivers/thermal/thermal_core.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc2/hcd.c
drivers/usb/dwc2/params.c
drivers/usb/dwc3/dwc3-exynos.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/atmel_usba_udc.h
drivers/usb/host/xhci-plat.c
drivers/vfio/vfio_iommu_spapr_tce.c
drivers/vhost/scsi.c
drivers/vhost/vsock.c
drivers/video/fbdev/core/fbcmap.c
drivers/virtio/virtio_mmio.c
drivers/virtio/virtio_ring.c
drivers/xen/platform-pci.c
drivers/xen/swiotlb-xen.c
fs/Kconfig
fs/block_dev.c
fs/btrfs/inode.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/dax.c
fs/ext2/Kconfig
fs/ext4/Kconfig
fs/fuse/dev.c
fs/fuse/dir.c
fs/overlayfs/namei.c
fs/proc/base.c
fs/romfs/super.c
fs/ubifs/Kconfig
fs/ubifs/dir.c
fs/ubifs/ioctl.c
fs/ubifs/journal.c
fs/ubifs/tnc.c
fs/userfaultfd.c
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_attr.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_dir2.c
fs/xfs/libxfs/xfs_dir2.h
fs/xfs/libxfs/xfs_ialloc_btree.c
fs/xfs/libxfs/xfs_ialloc_btree.h
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_sb.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_linux.h
fs/xfs/xfs_mount.h
fs/xfs/xfs_qm.c
include/drm/drm_atomic.h
include/drm/drm_mode_config.h
include/kvm/arm_arch_timer.h
include/linux/cpuhotplug.h
include/linux/gpio/driver.h
include/linux/kernel.h
include/linux/memory_hotplug.h
include/linux/mmzone.h
include/linux/nmi.h
include/linux/rcupdate.h
include/linux/suspend.h
include/rdma/ib_verbs.h
include/scsi/libfc.h
include/uapi/linux/cec-funcs.h
include/uapi/rdma/Kbuild
include/uapi/rdma/cxgb3-abi.h
kernel/cpu.c
kernel/module.c
kernel/panic.c
kernel/power/suspend.c
kernel/rcu/rcu.h
kernel/rcu/tiny.c
kernel/rcu/tiny_plugin.h
kernel/rcu/tree.c
kernel/rcu/tree_exp.h
kernel/rcu/tree_plugin.h
kernel/rcu/update.c
kernel/sysctl.c
kernel/ucount.c
kernel/watchdog.c
kernel/watchdog_hld.c
lib/ioremap.c
lib/radix-tree.c
mm/huge_memory.c
mm/memcontrol.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/page_alloc.c
mm/slub.c
net/ceph/crypto.c
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/probe-finder.h
tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
tools/virtio/ringtest/main.h
tools/virtio/ringtest/run-on-all.sh
virt/kvm/arm/arch_timer.c
virt/kvm/arm/hyp/timer-sr.c
virt/kvm/arm/vgic/vgic-init.c
virt/kvm/arm/vgic/vgic-v2.c
virt/kvm/arm/vgic/vgic-v3.c

index da6614c..dc97506 100644 (file)
@@ -1,17 +1,23 @@
 Renesas MSIOF spi controller
 
 Required properties:
-- compatible           : "renesas,msiof-<soctype>" for SoCs,
-                        "renesas,sh-msiof" for SuperH, or
-                        "renesas,sh-mobile-msiof" for SH Mobile series.
-                        Examples with soctypes are:
-                        "renesas,msiof-r8a7790" (R-Car H2)
+- compatible           : "renesas,msiof-r8a7790" (R-Car H2)
                         "renesas,msiof-r8a7791" (R-Car M2-W)
                         "renesas,msiof-r8a7792" (R-Car V2H)
                         "renesas,msiof-r8a7793" (R-Car M2-N)
                         "renesas,msiof-r8a7794" (R-Car E2)
                         "renesas,msiof-r8a7796" (R-Car M3-W)
                         "renesas,msiof-sh73a0" (SH-Mobile AG5)
+                        "renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
+                        "renesas,rcar-gen2-msiof" (generic R-Car Gen2 compatible device)
+                        "renesas,rcar-gen3-msiof" (generic R-Car Gen3 compatible device)
+                        "renesas,sh-msiof"      (deprecated)
+
+                        When compatible with the generic version, nodes
+                        must list the SoC-specific version corresponding
+                        to the platform first followed by the generic
+                        version.
+
 - reg                  : A list of offsets and lengths of the register sets for
                         the device.
                         If only one register set is present, it is to be used
@@ -61,7 +67,8 @@ Documentation/devicetree/bindings/pinctrl/renesas,*.
 Example:
 
        msiof0: spi@e6e20000 {
-               compatible = "renesas,msiof-r8a7791";
+               compatible = "renesas,msiof-r8a7791",
+                            "renesas,rcar-gen2-msiof";
                reg = <0 0xe6e20000 0 0x0064>;
                interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
index 72624a1..c94b467 100644 (file)
@@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise
 snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
 It's slow but very precise.
 
-Table 1-2: Contents of the status files (as of 4.1)
+Table 1-2: Contents of the status files (as of 4.8)
 ..............................................................................
  Field                       Content
  Name                        filename of the executable
+ Umask                       file mode creation mask
  State                       state (R is running, S is sleeping, D is sleeping
                              in an uninterruptible wait, Z is zombie,
                             T is traced or stopped)
@@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1)
  TracerPid                   PID of process tracing this process (0 if not)
  Uid                         Real, effective, saved set, and  file system UIDs
  Gid                         Real, effective, saved set, and  file system GIDs
- Umask                       file mode creation mask
  FDSize                      number of file descriptor slots currently allocated
  Groups                      supplementary group list
  NStgid                      descendant namespace thread group ID hierarchy
@@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1)
  VmPeak                      peak virtual memory size
  VmSize                      total program size
  VmLck                       locked memory size
+ VmPin                       pinned memory size
  VmHWM                       peak resident set size ("high water mark")
  VmRSS                       size of memory portions. It contains the three
                              following parts (VmRSS = RssAnon + RssFile + RssShmem)
index 8a39ce4..008ecb5 100644 (file)
@@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
 The default suspend mode (ie. the one to be used without writing anything into
 /sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
 "s2idle", but it can be overridden by the value of the "mem_sleep_default"
-parameter in the kernel command line.  On some ACPI-based systems, depending on
-the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
-is supported.
+parameter in the kernel command line.
 
 The properties of all of the sleep states are described below.
 
index af3456d..5f10c28 100644 (file)
@@ -976,6 +976,7 @@ M:  Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.armlinux.org.uk/
 S:     Maintained
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git
 F:     arch/arm/
 
 ARM SUB-ARCHITECTURES
@@ -1153,6 +1154,7 @@ ARM/CLKDEV SUPPORT
 M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev
 F:     arch/arm/include/asm/clkdev.h
 F:     drivers/clk/clkdev.c
 
@@ -1688,6 +1690,7 @@ M:        Krzysztof Kozlowski <krzk@kernel.org>
 R:     Javier Martinez Canillas <javier@osg.samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+Q:     https://patchwork.kernel.org/project/linux-samsung-soc/list/
 S:     Maintained
 F:     arch/arm/boot/dts/s3c*
 F:     arch/arm/boot/dts/s5p*
@@ -4097,12 +4100,18 @@ F:      drivers/gpu/drm/bridge/
 
 DRM DRIVER FOR BOCHS VIRTUAL GPU
 M:     Gerd Hoffmann <kraxel@redhat.com>
-S:     Odd Fixes
+L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
+S:     Maintained
 F:     drivers/gpu/drm/bochs/
 
 DRM DRIVER FOR QEMU'S CIRRUS DEVICE
 M:     Dave Airlie <airlied@redhat.com>
-S:     Odd Fixes
+M:     Gerd Hoffmann <kraxel@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
+S:     Obsolete
+W:     https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
 F:     drivers/gpu/drm/cirrus/
 
 RADEON and AMDGPU DRM DRIVERS
@@ -4144,7 +4153,7 @@ F:        Documentation/gpu/i915.rst
 INTEL GVT-g DRIVERS (Intel GPU Virtualization)
 M:      Zhenyu Wang <zhenyuw@linux.intel.com>
 M:      Zhi Wang <zhi.a.wang@intel.com>
-L:      igvt-g-dev@lists.01.org
+L:      intel-gvt-dev@lists.freedesktop.org
 L:      intel-gfx@lists.freedesktop.org
 W:      https://01.org/igvt-g
 T:      git https://github.com/01org/gvt-linux.git
@@ -4295,7 +4304,10 @@ F:       Documentation/devicetree/bindings/display/renesas,du.txt
 
 DRM DRIVER FOR QXL VIRTUAL GPU
 M:     Dave Airlie <airlied@redhat.com>
-S:     Odd Fixes
+M:     Gerd Hoffmann <kraxel@redhat.com>
+L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
+S:     Maintained
 F:     drivers/gpu/drm/qxl/
 F:     include/uapi/drm/qxl_drm.h
 
@@ -7697,8 +7709,10 @@ F:       drivers/net/dsa/mv88e6xxx/
 F:     Documentation/devicetree/bindings/net/dsa/marvell.txt
 
 MARVELL ARMADA DRM SUPPORT
-M:     Russell King <rmk+kernel@armlinux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-devel
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-fixes
 F:     drivers/gpu/drm/armada/
 F:     include/uapi/drm/armada_drm.h
 F:     Documentation/devicetree/bindings/display/armada/
@@ -8903,8 +8917,10 @@ S:       Supported
 F:     drivers/nfc/nxp-nci
 
 NXP TDA998X DRM DRIVER
-M:     Russell King <rmk+kernel@armlinux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Supported
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
+T:     git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
 F:     drivers/gpu/drm/i2c/tda998x_drv.c
 F:     include/drm/i2c/tda998x.h
 
@@ -13085,6 +13101,7 @@ M:      David Airlie <airlied@linux.ie>
 M:     Gerd Hoffmann <kraxel@redhat.com>
 L:     dri-devel@lists.freedesktop.org
 L:     virtualization@lists.linux-foundation.org
+T:     git git://git.kraxel.org/linux drm-qemu
 S:     Maintained
 F:     drivers/gpu/drm/virtio/
 F:     include/uapi/linux/virtio_gpu.h
@@ -13436,6 +13453,7 @@ F:      arch/x86/
 
 X86 PLATFORM DRIVERS
 M:     Darren Hart <dvhart@infradead.org>
+M:     Andy Shevchenko <andy@infradead.org>
 L:     platform-driver-x86@vger.kernel.org
 T:     git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
 S:     Maintained
@@ -13607,6 +13625,7 @@ F:      drivers/net/hamradio/z8530.h
 
 ZBUD COMPRESSED PAGE ALLOCATOR
 M:     Seth Jennings <sjenning@redhat.com>
+M:     Dan Streetman <ddstreet@ieee.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zbud.c
@@ -13662,6 +13681,7 @@ F:      Documentation/vm/zsmalloc.txt
 
 ZSWAP COMPRESSED SWAP CACHING
 M:     Seth Jennings <sjenning@redhat.com>
+M:     Dan Streetman <ddstreet@ieee.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zswap.c
index 96e2352..0988400 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 4
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
-NAME = Roaring Lionus
+EXTRAVERSION = -rc5
+NAME = Anniversary Edition
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
index c75d290..283099c 100644 (file)
@@ -29,7 +29,7 @@ config ARC
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select HAVE_MEMBLOCK
-       select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
+       select HAVE_MOD_ARCH_SPECIFIC
        select HAVE_OPROFILE
        select HAVE_PERF_EVENTS
        select HANDLE_DOMAIN_IRQ
index b3410ff..5008021 100644 (file)
@@ -67,7 +67,7 @@ extern unsigned long perip_base, perip_end;
 #define ARC_REG_IC_PTAG_HI     0x1F
 
 /* Bit val in IC_CTRL */
-#define IC_CTRL_CACHE_DISABLE   0x1
+#define IC_CTRL_DIS            0x1
 
 /* Data cache related Auxiliary registers */
 #define ARC_REG_DC_BCR         0x72    /* Build Config reg */
@@ -80,8 +80,9 @@ extern unsigned long perip_base, perip_end;
 #define ARC_REG_DC_PTAG_HI     0x5F
 
 /* Bit val in DC_CTRL */
-#define DC_CTRL_INV_MODE_FLUSH  0x40
-#define DC_CTRL_FLUSH_STATUS    0x100
+#define DC_CTRL_DIS            0x001
+#define DC_CTRL_INV_MODE_FLUSH 0x040
+#define DC_CTRL_FLUSH_STATUS   0x100
 
 /*System-level cache (L2 cache) related Auxiliary registers */
 #define ARC_REG_SLC_CFG                0x901
@@ -92,8 +93,8 @@ extern unsigned long perip_base, perip_end;
 #define ARC_REG_SLC_RGN_END    0x916
 
 /* Bit val in SLC_CONTROL */
+#define SLC_CTRL_DIS           0x001
 #define SLC_CTRL_IM            0x040
-#define SLC_CTRL_DISABLE       0x001
 #define SLC_CTRL_BUSY          0x100
 #define SLC_CTRL_RGN_OP_INV    0x200
 
index b5ff87e..aee1a77 100644 (file)
@@ -16,6 +16,7 @@
        ;
        ; Now manually save: r12, sp, fp, gp, r25
 
+       PUSH    r30
        PUSH    r12
 
        ; Saving pt_regs->sp correctly requires some extra work due to the way
@@ -72,6 +73,7 @@
        POPAX   AUX_USER_SP
 1:
        POP     r12
+       POP     r30
 
 .endm
 
index 6e91d8b..567590e 100644 (file)
 
 #include <asm-generic/module.h>
 
-#ifdef CONFIG_ARC_DW2_UNWIND
 struct mod_arch_specific {
+#ifdef CONFIG_ARC_DW2_UNWIND
        void *unw_info;
        int unw_sec_idx;
+#endif
        const char *secstr;
 };
-#endif
 
 #define MODULE_PROC_FAMILY "ARC700"
 
index 69095da..47111d5 100644 (file)
@@ -84,7 +84,7 @@ struct pt_regs {
        unsigned long fp;
        unsigned long sp;       /* user/kernel sp depending on where we came from  */
 
-       unsigned long r12;
+       unsigned long r12, r30;
 
        /*------- Below list auto saved by h/w -----------*/
        unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
index cb954cd..c568a9d 100644 (file)
@@ -31,6 +31,7 @@ extern int root_mountflags, end_mem;
 
 void setup_processor(void);
 void __init setup_arch_memory(void);
+long __init arc_get_mem_sz(void);
 
 /* Helpers used in arc_*_mumbojumbo routines */
 #define IS_AVAIL1(v, s)                ((v) ? s : "")
index 994dca7..ecef0fb 100644 (file)
@@ -77,20 +77,20 @@ void arc_init_IRQ(void)
 
 static void arcv2_irq_mask(struct irq_data *data)
 {
-       write_aux_reg(AUX_IRQ_SELECT, data->irq);
+       write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
        write_aux_reg(AUX_IRQ_ENABLE, 0);
 }
 
 static void arcv2_irq_unmask(struct irq_data *data)
 {
-       write_aux_reg(AUX_IRQ_SELECT, data->irq);
+       write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
        write_aux_reg(AUX_IRQ_ENABLE, 1);
 }
 
 void arcv2_irq_enable(struct irq_data *data)
 {
        /* set default priority */
-       write_aux_reg(AUX_IRQ_SELECT, data->irq);
+       write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
        write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
 
        /*
index ce9deb9..8c1fd5c 100644 (file)
@@ -57,7 +57,7 @@ static void arc_irq_mask(struct irq_data *data)
        unsigned int ienb;
 
        ienb = read_aux_reg(AUX_IENABLE);
-       ienb &= ~(1 << data->irq);
+       ienb &= ~(1 << data->hwirq);
        write_aux_reg(AUX_IENABLE, ienb);
 }
 
@@ -66,7 +66,7 @@ static void arc_irq_unmask(struct irq_data *data)
        unsigned int ienb;
 
        ienb = read_aux_reg(AUX_IENABLE);
-       ienb |= (1 << data->irq);
+       ienb |= (1 << data->hwirq);
        write_aux_reg(AUX_IENABLE, ienb);
 }
 
index 560c4af..9274f8a 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <linux/smp.h>
 #include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
 #include <linux/spinlock.h>
 #include <soc/arc/mcip.h>
 #include <asm/irqflags-arcv2.h>
@@ -221,10 +222,13 @@ static irq_hw_number_t idu_first_hwirq;
 static void idu_cascade_isr(struct irq_desc *desc)
 {
        struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+       struct irq_chip *core_chip = irq_desc_get_chip(desc);
        irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
        irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
 
+       chained_irq_enter(core_chip, desc);
        generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
+       chained_irq_exit(core_chip, desc);
 }
 
 static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
index 42e964d..3d99a60 100644 (file)
@@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
 #ifdef CONFIG_ARC_DW2_UNWIND
        mod->arch.unw_sec_idx = 0;
        mod->arch.unw_info = NULL;
-       mod->arch.secstr = secstr;
 #endif
+       mod->arch.secstr = secstr;
        return 0;
 }
 
@@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
 
        }
 
+#ifdef CONFIG_ARC_DW2_UNWIND
        if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
                module->arch.unw_sec_idx = tgtsec;
+#endif
 
        return 0;
 
index ec86ac0..d408fa2 100644 (file)
@@ -23,7 +23,7 @@
 
 static int l2_line_sz;
 static int ioc_exists;
-int slc_enable = 1, ioc_enable = 0;
+int slc_enable = 1, ioc_enable = 1;
 unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
 unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
 
@@ -271,7 +271,11 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
 
 /*
  * For ARC700 MMUv3 I-cache and D-cache flushes
- * Also reused for HS38 aliasing I-cache configuration
+ *  - ARC700 programming model requires paddr and vaddr be passed in seperate
+ *    AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
+ *    caches actually alias or not.
+ * -  For HS38, only the aliasing I-cache configuration uses the PTAG reg
+ *    (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
  */
 static inline
 void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
@@ -458,6 +462,21 @@ static inline void __dc_entire_op(const int op)
        __after_dc_op(op);
 }
 
+static inline void __dc_disable(void)
+{
+       const int r = ARC_REG_DC_CTRL;
+
+       __dc_entire_op(OP_FLUSH_N_INV);
+       write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
+}
+
+static void __dc_enable(void)
+{
+       const int r = ARC_REG_DC_CTRL;
+
+       write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
+}
+
 /* For kernel mappings cache operation: index is same as paddr */
 #define __dc_line_op_k(p, sz, op)      __dc_line_op(p, p, sz, op)
 
@@ -483,6 +502,8 @@ static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
 #else
 
 #define __dc_entire_op(op)
+#define __dc_disable()
+#define __dc_enable()
 #define __dc_line_op(paddr, vaddr, sz, op)
 #define __dc_line_op_k(paddr, sz, op)
 
@@ -597,6 +618,40 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
 #endif
 }
 
+noinline static void slc_entire_op(const int op)
+{
+       unsigned int ctrl, r = ARC_REG_SLC_CTRL;
+
+       ctrl = read_aux_reg(r);
+
+       if (!(op & OP_FLUSH))           /* i.e. OP_INV */
+               ctrl &= ~SLC_CTRL_IM;   /* clear IM: Disable flush before Inv */
+       else
+               ctrl |= SLC_CTRL_IM;
+
+       write_aux_reg(r, ctrl);
+
+       write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
+
+       /* Important to wait for flush to complete */
+       while (read_aux_reg(r) & SLC_CTRL_BUSY);
+}
+
+static inline void arc_slc_disable(void)
+{
+       const int r = ARC_REG_SLC_CTRL;
+
+       slc_entire_op(OP_FLUSH_N_INV);
+       write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
+}
+
+static inline void arc_slc_enable(void)
+{
+       const int r = ARC_REG_SLC_CTRL;
+
+       write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
+}
+
 /***********************************************************
  * Exported APIs
  */
@@ -923,21 +978,54 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
        return 0;
 }
 
-void arc_cache_init(void)
+/*
+ * IO-Coherency (IOC) setup rules:
+ *
+ * 1. Needs to be at system level, so only once by Master core
+ *    Non-Masters need not be accessing caches at that time
+ *    - They are either HALT_ON_RESET and kick started much later or
+ *    - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
+ *      doesn't perturb caches or coherency unit
+ *
+ * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
+ *    otherwise any straggler data might behave strangely post IOC enabling
+ *
+ * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
+ *    Coherency transactions
+ */
+noinline void __init arc_ioc_setup(void)
 {
-       unsigned int __maybe_unused cpu = smp_processor_id();
-       char str[256];
+       unsigned int ap_sz;
 
-       printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+       /* Flush + invalidate + disable L1 dcache */
+       __dc_disable();
+
+       /* Flush + invalidate SLC */
+       if (read_aux_reg(ARC_REG_SLC_BCR))
+               slc_entire_op(OP_FLUSH_N_INV);
+
+       /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */
+       write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
 
        /*
-        * Only master CPU needs to execute rest of function:
-        *  - Assume SMP so all cores will have same cache config so
-        *    any geomtry checks will be same for all
-        *  - IOC setup / dma callbacks only need to be setup once
+        * IOC Aperture size:
+        *   decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M
+        * TBD: fix for PGU + 1GB of low mem
+        * TBD: fix for PAE
         */
-       if (cpu)
-               return;
+       ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2;
+       write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz);
+
+       write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
+       write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
+
+       /* Re-enable L1 dcache */
+       __dc_enable();
+}
+
+void __init arc_cache_init_master(void)
+{
+       unsigned int __maybe_unused cpu = smp_processor_id();
 
        if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
                struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
@@ -985,30 +1073,14 @@ void arc_cache_init(void)
                }
        }
 
-       if (is_isa_arcv2() && l2_line_sz && !slc_enable) {
-
-               /* IM set : flush before invalidate */
-               write_aux_reg(ARC_REG_SLC_CTRL,
-                       read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
+       /* Note that SLC disable not formally supported till HS 3.0 */
+       if (is_isa_arcv2() && l2_line_sz && !slc_enable)
+               arc_slc_disable();
 
-               write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
-
-               /* Important to wait for flush to complete */
-               while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
-               write_aux_reg(ARC_REG_SLC_CTRL,
-                       read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
-       }
+       if (is_isa_arcv2() && ioc_enable)
+               arc_ioc_setup();
 
        if (is_isa_arcv2() && ioc_enable) {
-               /* IO coherency base - 0x8z */
-               write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
-               /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
-               write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
-               /* Enable partial writes */
-               write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
-               /* Enable IO coherency */
-               write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
-
                __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
                __dma_cache_inv = __dma_cache_inv_ioc;
                __dma_cache_wback = __dma_cache_wback_ioc;
@@ -1022,3 +1094,20 @@ void arc_cache_init(void)
                __dma_cache_wback = __dma_cache_wback_l1;
        }
 }
+
+void __ref arc_cache_init(void)
+{
+       unsigned int __maybe_unused cpu = smp_processor_id();
+       char str[256];
+
+       printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+       /*
+        * Only master CPU needs to execute rest of function:
+        *  - Assume SMP so all cores will have same cache config so
+        *    any geomtry checks will be same for all
+        *  - IOC setup / dma callbacks only need to be setup once
+        */
+       if (!cpu)
+               arc_cache_init_master();
+}
index 399e2f2..8c9415e 100644 (file)
@@ -40,6 +40,11 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
 EXPORT_SYMBOL(node_data);
 #endif
 
+long __init arc_get_mem_sz(void)
+{
+       return low_mem_sz;
+}
+
 /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
 static int __init setup_mem_sz(char *str)
 {
index 7327250..f10fe85 100644 (file)
@@ -846,6 +846,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
        sun8i-a83t-allwinner-h8homlet-v2.dtb \
        sun8i-a83t-cubietruck-plus.dtb \
        sun8i-h3-bananapi-m2-plus.dtb \
+       sun8i-h3-nanopi-m1.dtb  \
        sun8i-h3-nanopi-neo.dtb \
        sun8i-h3-orangepi-2.dtb \
        sun8i-h3-orangepi-lite.dtb \
index 1463df3..8ed46f9 100644 (file)
                        AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */
                        AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */
                        AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */
-                       AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE5) /* (C15) spi0_cs1.mmc0_sdcd */
                >;
        };
 
index b6142bd..15f07f9 100644 (file)
 
        axi {
                compatible = "simple-bus";
-               ranges = <0x00000000 0x18000000 0x0011c40a>;
+               ranges = <0x00000000 0x18000000 0x0011c40c>;
                #address-cells = <1>;
                #size-cells = <1>;
 
index 41de15f..78492a0 100644 (file)
@@ -99,6 +99,7 @@
                                #size-cells = <1>;
                                compatible = "m25p64";
                                spi-max-frequency = <30000000>;
+                               m25p,fast-read;
                                reg = <0>;
                                partition@0 {
                                        label = "U-Boot-SPL";
index 1faf24a..5ba1616 100644 (file)
                        phy-names = "sata-phy";
                        clocks = <&sata_ref_clk>;
                        ti,hwmods = "sata";
+                       ports-implemented = <0x1>;
                };
 
                rtc: rtc@48838000 {
index c3d939c..3f808a4 100644 (file)
@@ -75,6 +75,6 @@
                ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
                ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
                ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
-               ti,min-output-imepdance;
+               ti,min-output-impedance;
        };
 };
index 34887a1..47ba972 100644 (file)
                compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
                             "fsl,imx-audio-sgtl5000";
                model = "imx6q-nitrogen6_max-sgtl5000";
-               pinctrl-names = "default";
-               pinctrl-0 = <&pinctrl_sgtl5000>;
                ssi-controller = <&ssi1>;
                audio-codec = <&codec>;
                audio-routing =
 
        codec: sgtl5000@0a {
                compatible = "fsl,sgtl5000";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_sgtl5000>;
                reg = <0x0a>;
                clocks = <&clks IMX6QDL_CLK_CKO>;
                VDDA-supply = <&reg_2p5v>;
index d80f21a..31d4cc6 100644 (file)
                compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000",
                             "fsl,imx-audio-sgtl5000";
                model = "imx6q-nitrogen6_som2-sgtl5000";
-               pinctrl-names = "default";
-               pinctrl-0 = <&pinctrl_sgtl5000>;
                ssi-controller = <&ssi1>;
                audio-codec = <&codec>;
                audio-routing =
 
        codec: sgtl5000@0a {
                compatible = "fsl,sgtl5000";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_sgtl5000>;
                reg = <0x0a>;
                clocks = <&clks IMX6QDL_CLK_CKO>;
                VDDA-supply = <&reg_2p5v>;
index da85984..38faa90 100644 (file)
 &mmc1 {
        interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
        pinctrl-names = "default";
-       pinctrl-0 = <&mmc1_pins &mmc1_cd>;
+       pinctrl-0 = <&mmc1_pins>;
        wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>;                /* gpio_126 */
        cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>;              /* gpio_110 */
        vmmc-supply = <&vmmc1>;
                        OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0)        /* sdmmc1_dat1.sdmmc1_dat1 */
                        OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0)        /* sdmmc1_dat2.sdmmc1_dat2 */
                        OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0)        /* sdmmc1_dat3.sdmmc1_dat3 */
-                       OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 sdmmc1_wp*/
+                       OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 */
+                       OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
                >;
        };
 
                        OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4)       /* sys_boot6.gpio_8 */
                >;
        };
-
-       mmc1_cd: pinmux_mmc1_cd {
-               pinctrl-single,pins = <
-                       OMAP3_WKUP_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4)  /* cam_d11.gpio_110 */
-               >;
-       };
 };
 
 
index 7cd92ba..0844737 100644 (file)
                        phy-names = "sata-phy";
                        clocks = <&sata_ref_clk>;
                        ti,hwmods = "sata";
+                       ports-implemented = <0x1>;
                };
 
                dss: dss@58000000 {
index 5ae4ec5..c852b69 100644 (file)
                };
 
                amba {
-                       compatible = "arm,amba-bus";
+                       compatible = "simple-bus";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
index 735914f..7cae328 100644 (file)
        cpu-supply = <&reg_dcdc3>;
 };
 
+&de {
+       status = "okay";
+};
+
 &ehci0 {
        status = "okay";
 };
index 2b26175..e78faaf 100644 (file)
        de: display-engine {
                compatible = "allwinner,sun6i-a31-display-engine";
                allwinner,pipelines = <&fe0>;
+               status = "disabled";
        };
 
        soc@01c00000 {
index 5ea4915..10d3074 100644 (file)
@@ -56,7 +56,7 @@
 };
 
 &pio {
-       mmc2_pins_nrst: mmc2@0 {
+       mmc2_pins_nrst: mmc2-rst-pin {
                allwinner,pins = "PC16";
                allwinner,function = "gpio_out";
                allwinner,drive = <SUN4I_PINCTRL_10_MA>;
index b01a438..028d2b7 100644 (file)
@@ -471,7 +471,7 @@ CONFIG_MESON_WATCHDOG=y
 CONFIG_DW_WATCHDOG=y
 CONFIG_DIGICOLOR_WATCHDOG=y
 CONFIG_BCM2835_WDT=y
-CONFIG_BCM47XX_WATCHDOG=y
+CONFIG_BCM47XX_WDT=y
 CONFIG_BCM7038_WDT=m
 CONFIG_BCM_KONA_WDT=y
 CONFIG_MFD_ACT8945A=y
@@ -893,7 +893,7 @@ CONFIG_BCM2835_MBOX=y
 CONFIG_RASPBERRYPI_FIRMWARE=y
 CONFIG_EFI_VARS=m
 CONFIG_EFI_CAPSULE_LOADER=m
-CONFIG_CONFIG_BCM47XX_NVRAM=y
+CONFIG_BCM47XX_NVRAM=y
 CONFIG_BCM47XX_SPROM=y
 CONFIG_EXT4_FS=y
 CONFIG_AUTOFS4_FS=y
index 4364040..1e6c48d 100644 (file)
@@ -86,9 +86,9 @@ CONFIG_IPV6_TUNNEL=m
 CONFIG_NETFILTER=y
 CONFIG_NF_CONNTRACK=m
 CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
index 522b5fe..b62eaeb 100644 (file)
@@ -94,6 +94,9 @@
 #define ARM_CPU_XSCALE_ARCH_V2         0x4000
 #define ARM_CPU_XSCALE_ARCH_V3         0x6000
 
+/* Qualcomm implemented cores */
+#define ARM_CPU_PART_SCORPION          0x510002d0
+
 extern unsigned int processor_id;
 
 #ifdef CONFIG_CPU_CP15
index bfe2a2f..22b7311 100644 (file)
@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
 
 #define ftrace_return_address(n) return_address(n)
 
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+
+static inline bool arch_syscall_match_sym_name(const char *sym,
+                                              const char *name)
+{
+       if (!strcmp(sym, "sys_mmap2"))
+               sym = "sys_mmap_pgoff";
+       else if (!strcmp(sym, "sys_statfs64_wrapper"))
+               sym = "sys_statfs64";
+       else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
+               sym = "sys_fstatfs64";
+       else if (!strcmp(sym, "sys_arm_fadvise64_64"))
+               sym = "sys_fadvise64_64";
+
+       /* Ignore case since sym may start with "SyS" instead of "sys" */
+       return !strcasecmp(sym, name);
+}
+
 #endif /* ifndef __ASSEMBLY__ */
 
 #endif /* _ASM_ARM_FTRACE */
index a2e75b8..6dae195 100644 (file)
@@ -80,6 +80,11 @@ static inline bool is_kernel_in_hyp_mode(void)
        return false;
 }
 
+static inline bool has_vhe(void)
+{
+       return false;
+}
+
 /* The section containing the hypervisor idmap text */
 extern char __hyp_idmap_text_start[];
 extern char __hyp_idmap_text_end[];
similarity index 94%
rename from arch/arm/include/asm/types.h
rename to arch/arm/include/uapi/asm/types.h
index a53cdb8..9435a42 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef _ASM_TYPES_H
-#define _ASM_TYPES_H
+#ifndef _UAPI_ASM_TYPES_H
+#define _UAPI_ASM_TYPES_H
 
 #include <asm-generic/int-ll64.h>
 
@@ -37,4 +37,4 @@
 #define __UINTPTR_TYPE__       unsigned long
 #endif
 
-#endif /* _ASM_TYPES_H */
+#endif /* _UAPI_ASM_TYPES_H */
index 188180b..be3b3fb 100644 (file)
@@ -1063,6 +1063,22 @@ static int __init arch_hw_breakpoint_init(void)
                return 0;
        }
 
+       /*
+        * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
+        * whenever a WFI is issued, even if the core is not powered down, in
+        * violation of the architecture.  When DBGPRSR.SPD is set, accesses to
+        * breakpoint and watchpoint registers are treated as undefined, so
+        * this results in boot time and runtime failures when these are
+        * accessed and we unexpectedly take a trap.
+        *
+        * It's not clear if/how this can be worked around, so we blacklist
+        * Scorpion CPUs to avoid these issues.
+       */
+       if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
+               pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
+               return 0;
+       }
+
        has_ossr = core_has_os_save_restore();
 
        /* Determine how many BRPs/WRPs are available. */
index 22313cb..9af0701 100644 (file)
@@ -9,6 +9,7 @@
  */
 #include <linux/preempt.h>
 #include <linux/smp.h>
+#include <linux/uaccess.h>
 
 #include <asm/smp_plat.h>
 #include <asm/tlbflush.h>
@@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
 static inline void ipi_flush_tlb_page(void *arg)
 {
        struct tlb_args *ta = (struct tlb_args *)arg;
+       unsigned int __ua_flags = uaccess_save_and_enable();
 
        local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+
+       uaccess_restore(__ua_flags);
 }
 
 static inline void ipi_flush_tlb_kernel_page(void *arg)
@@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
 static inline void ipi_flush_tlb_range(void *arg)
 {
        struct tlb_args *ta = (struct tlb_args *)arg;
+       unsigned int __ua_flags = uaccess_save_and_enable();
 
        local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+
+       uaccess_restore(__ua_flags);
 }
 
 static inline void ipi_flush_tlb_kernel_range(void *arg)
index 1167678..9d74464 100644 (file)
@@ -1099,6 +1099,9 @@ static void cpu_init_hyp_mode(void *dummy)
        __cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
        __cpu_init_stage2();
 
+       if (is_kernel_in_hyp_mode())
+               kvm_timer_init_vhe();
+
        kvm_arm_init_debug();
 }
 
index f6ba589..c821c1d 100644 (file)
@@ -32,7 +32,6 @@
 #include "soc.h"
 
 #define OMAP1_DMA_BASE                 (0xfffed800)
-#define OMAP1_LOGICAL_DMA_CH_COUNT     17
 
 static u32 enable_1510_mode;
 
@@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void)
                goto exit_iounmap;
        }
 
-       d->lch_count            = OMAP1_LOGICAL_DMA_CH_COUNT;
-
        /* Valid attributes for omap1 plus processors */
        if (cpu_is_omap15xx())
                d->dev_caps = ENABLE_1510_MODE;
@@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void)
        d->dev_caps             |= CLEAR_CSR_ON_READ;
        d->dev_caps             |= IS_WORD_16;
 
-       if (cpu_is_omap15xx())
-               d->chan_count = 9;
-       else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
-               if (!(d->dev_caps & ENABLE_1510_MODE))
-                       d->chan_count = 16;
+       /* available logical channels */
+       if (cpu_is_omap15xx()) {
+               d->lch_count = 9;
+       } else {
+               if (d->dev_caps & ENABLE_1510_MODE)
+                       d->lch_count = 9;
                else
-                       d->chan_count = 9;
+                       d->lch_count = 16;
        }
 
        p = dma_plat_info;
index 477910a..70c0047 100644 (file)
@@ -161,7 +161,7 @@ static struct ti_st_plat_data wilink7_pdata = {
        .nshutdown_gpio = 162,
        .dev_name = "/dev/ttyO1",
        .flow_cntrl = 1,
-       .baud_rate = 300000,
+       .baud_rate = 3000000,
 };
 
 static struct platform_device wl128x_device = {
index 8538910..a970e7f 100644 (file)
@@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
  */
 bool prcmu_is_cpu_in_wfi(int cpu)
 {
-       return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
-                    PRCM_ARM_WFI_STANDBY_WFI0;
+       return readl(PRCM_ARM_WFI_STANDBY) &
+               (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
 }
 
 /*
index 238fbea..5d28e1c 100644 (file)
        };
 };
 
+&scpi_clocks {
+       status = "disabled";
+};
+
 &uart_AO {
        status = "okay";
        pinctrl-0 = <&uart_ao_a_pins>;
index 596240c..b353073 100644 (file)
@@ -55,7 +55,7 @@
                mboxes = <&mailbox 1 &mailbox 2>;
                shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
 
-               clocks {
+               scpi_clocks: clocks {
                        compatible = "arm,scpi-clocks";
 
                        scpi_dvfs: scpi_clocks@0 {
index 64226d5..135890c 100644 (file)
                };
 
                amba {
-                       compatible = "arm,amba-bus";
+                       compatible = "simple-bus";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges;
index 3580896..ef1b9e5 100644 (file)
@@ -27,7 +27,7 @@
                stdout-path = "serial0:115200n8";
        };
 
-       memory {
+       memory@0 {
                device_type = "memory";
                reg = <0x0 0x0 0x0 0x40000000>;
        };
index 68a9083..54dc283 100644 (file)
@@ -72,7 +72,7 @@
                             <1 10 0xf08>;
        };
 
-       amba_apu {
+       amba_apu: amba_apu@0 {
                compatible = "simple-bus";
                #address-cells = <2>;
                #size-cells = <1>;
                };
 
                i2c0: i2c@ff020000 {
-                       compatible = "cdns,i2c-r1p10";
+                       compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
                        status = "disabled";
                        interrupt-parent = <&gic>;
                        interrupts = <0 17 4>;
                };
 
                i2c1: i2c@ff030000 {
-                       compatible = "cdns,i2c-r1p10";
+                       compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
                        status = "disabled";
                        interrupt-parent = <&gic>;
                        interrupts = <0 18 4>;
index bfe6328..90c39a6 100644 (file)
@@ -222,7 +222,7 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define _virt_addr_valid(kaddr)        pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 #else
 #define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(page)   (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+#define __page_to_voff(kaddr)  (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
 
 #define page_to_virt(page)     ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
 #define virt_to_page(vaddr)    ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
index fea1073..439f6b5 100644 (file)
@@ -47,6 +47,7 @@
 #include <asm/ptrace.h>
 #include <asm/sections.h>
 #include <asm/sysreg.h>
+#include <asm/cpufeature.h>
 
 /*
  * __boot_cpu_mode records what mode CPUs were booted in.
@@ -80,6 +81,14 @@ static inline bool is_kernel_in_hyp_mode(void)
        return read_sysreg(CurrentEL) == CurrentEL_EL2;
 }
 
+static inline bool has_vhe(void)
+{
+       if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
+               return true;
+
+       return false;
+}
+
 #ifdef CONFIG_ARM64_VHE
 extern void verify_cpu_run_el(void);
 #else
index b5c3933..d1ff83d 100644 (file)
@@ -77,6 +77,7 @@ struct user_fpsimd_state {
        __uint128_t     vregs[32];
        __u32           fpsr;
        __u32           fpcr;
+       __u32           __reserved[2];
 };
 
 struct user_hwdebug_state {
index 923841f..43512d4 100644 (file)
@@ -683,7 +683,7 @@ el0_inv:
        mov     x0, sp
        mov     x1, #BAD_SYNC
        mov     x2, x25
-       bl      bad_mode
+       bl      bad_el0_sync
        b       ret_to_user
 ENDPROC(el0_sync)
 
index fc35e06..a22161c 100644 (file)
@@ -551,6 +551,8 @@ static int hw_break_set(struct task_struct *target,
        /* (address, ctrl) registers */
        limit = regset->n * regset->size;
        while (count && offset < limit) {
+               if (count < PTRACE_HBP_ADDR_SZ)
+                       return -EINVAL;
                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
                                         offset, offset + PTRACE_HBP_ADDR_SZ);
                if (ret)
@@ -560,6 +562,8 @@ static int hw_break_set(struct task_struct *target,
                        return ret;
                offset += PTRACE_HBP_ADDR_SZ;
 
+               if (!count)
+                       break;
                ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
                                         offset, offset + PTRACE_HBP_CTRL_SZ);
                if (ret)
@@ -596,7 +600,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
                   const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       struct user_pt_regs newregs;
+       struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
        if (ret)
@@ -626,7 +630,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
                   const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       struct user_fpsimd_state newstate;
+       struct user_fpsimd_state newstate =
+               target->thread.fpsimd_state.user_fpsimd;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
        if (ret)
@@ -650,7 +655,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
                   const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       unsigned long tls;
+       unsigned long tls = target->thread.tp_value;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
        if (ret)
@@ -676,7 +681,8 @@ static int system_call_set(struct task_struct *target,
                           unsigned int pos, unsigned int count,
                           const void *kbuf, const void __user *ubuf)
 {
-       int syscallno, ret;
+       int syscallno = task_pt_regs(target)->syscallno;
+       int ret;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
        if (ret)
@@ -948,7 +954,7 @@ static int compat_tls_set(struct task_struct *target,
                          const void __user *ubuf)
 {
        int ret;
-       compat_ulong_t tls;
+       compat_ulong_t tls = target->thread.tp_value;
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
        if (ret)
index 5b830be..659b2e6 100644 (file)
@@ -604,17 +604,34 @@ const char *esr_get_class_string(u32 esr)
 }
 
 /*
- * bad_mode handles the impossible case in the exception vector.
+ * bad_mode handles the impossible case in the exception vector. This is always
+ * fatal.
  */
 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 {
-       siginfo_t info;
-       void __user *pc = (void __user *)instruction_pointer(regs);
        console_verbose();
 
        pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
                handler[reason], smp_processor_id(), esr,
                esr_get_class_string(esr));
+
+       die("Oops - bad mode", regs, 0);
+       local_irq_disable();
+       panic("bad mode");
+}
+
+/*
+ * bad_el0_sync handles unexpected, but potentially recoverable synchronous
+ * exceptions taken from EL0. Unlike bad_mode, this returns.
+ */
+asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
+{
+       siginfo_t info;
+       void __user *pc = (void __user *)instruction_pointer(regs);
+       console_verbose();
+
+       pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
+               smp_processor_id(), esr, esr_get_class_string(esr));
        __show_regs(regs);
 
        info.si_signo = SIGILL;
@@ -622,7 +639,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
        info.si_code  = ILL_ILLOPC;
        info.si_addr  = pc;
 
-       arm64_notify_die("Oops - bad mode", regs, &info, 0);
+       current->thread.fault_address = 0;
+       current->thread.fault_code = 0;
+
+       force_sig_info(info.si_signo, &info, current);
 }
 
 void __pte_error(const char *file, int line, unsigned long val)
index 716d122..380ebe7 100644 (file)
@@ -404,6 +404,8 @@ void __init mem_init(void)
        if (swiotlb_force == SWIOTLB_FORCE ||
            max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
                swiotlb_init(1);
+       else
+               swiotlb_force = SWIOTLB_NO_FORCE;
 
        set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
 
index 1c2a5e2..e93c949 100644 (file)
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
 #define atomic64_sub_and_test(i,v)     (atomic64_sub_return((i), (v)) == 0)
 #define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
 #define atomic64_inc_and_test(v)       (atomic64_inc_return((v)) == 0)
-
+#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
 
 #define atomic_cmpxchg(v, old, new)    (cmpxchg(&(v)->counter, old, new))
 #define atomic_xchg(v, new)            (xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
        return c;
 }
 
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+{
+       long long c, old;
+
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == u))
+                       break;
+               old = atomic64_cmpxchg(v, c, c + i);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != u;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+       long long c, old, dec;
+
+       c = atomic64_read(v);
+       for (;;) {
+               dec = c - 1;
+               if (unlikely(dec < 0))
+                       break;
+               old = atomic64_cmpxchg((v), c, dec);
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+               return dec;
+}
+
 #define ATOMIC_OP(op)                                                  \
 static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
 {                                                                      \
index 393d311..67e333a 100644 (file)
@@ -16,7 +16,7 @@
 struct task_struct;
 struct thread_struct;
 
-#if !defined(CONFIG_LAZY_SAVE_FPU)
+#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
 struct fpu_state_struct;
 extern asmlinkage void fpu_save(struct fpu_state_struct *);
 #define switch_fpu(prev, next)                                         \
index 1c64bc6..0c4e470 100644 (file)
 #ifdef CONFIG_HUGETLB_PAGE
 static inline int hash__hugepd_ok(hugepd_t hpd)
 {
+       unsigned long hpdval = hpd_val(hpd);
        /*
         * if it is not a pte and have hugepd shift mask
         * set, then it is a hugepd directory pointer
         */
-       if (!(hpd.pd & _PAGE_PTE) &&
-           ((hpd.pd & HUGEPD_SHIFT_MASK) != 0))
+       if (!(hpdval & _PAGE_PTE) &&
+           ((hpdval & HUGEPD_SHIFT_MASK) != 0))
                return true;
        return false;
 }
index f61cad3..4c935f7 100644 (file)
@@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
                                              unsigned long phys);
 extern void hash__vmemmap_remove_mapping(unsigned long start,
                                     unsigned long page_size);
+
+int hash__create_section_mapping(unsigned long start, unsigned long end);
+int hash__remove_section_mapping(unsigned long start, unsigned long end);
+
 #endif /* !__ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
index ede2151..7f4025a 100644 (file)
@@ -21,12 +21,12 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
         * We have only four bits to encode, MMU page size
         */
        BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
-       return __va(hpd.pd & HUGEPD_ADDR_MASK);
+       return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
 }
 
 static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
 {
-       return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
+       return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
 }
 
 static inline unsigned int hugepd_shift(hugepd_t hpd)
@@ -52,18 +52,20 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
 {
        BUG_ON(!hugepd_ok(hpd));
 #ifdef CONFIG_PPC_8xx
-       return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
+       return (pte_t *)__va(hpd_val(hpd) &
+                            ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
 #else
-       return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+       return (pte_t *)((hpd_val(hpd) &
+                         ~HUGEPD_SHIFT_MASK) | PD_HUGE);
 #endif
 }
 
 static inline unsigned int hugepd_shift(hugepd_t hpd)
 {
 #ifdef CONFIG_PPC_8xx
-       return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17;
+       return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
 #else
-       return hpd.pd & HUGEPD_SHIFT_MASK;
+       return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
 #endif
 }
 
index 1728497..0cd8a38 100644 (file)
@@ -227,9 +227,10 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 static inline int hugepd_ok(hugepd_t hpd)
 {
 #ifdef CONFIG_PPC_8xx
-       return ((hpd.pd & 0x4) != 0);
+       return ((hpd_val(hpd) & 0x4) != 0);
 #else
-       return (hpd.pd > 0);
+       /* We clear the top bit to indicate hugepd */
+       return ((hpd_val(hpd) & PD_HUGE) ==  0);
 #endif
 }
 
index 56398e7..47120bf 100644 (file)
@@ -294,15 +294,12 @@ extern long long virt_phys_offset;
 #include <asm/pgtable-types.h>
 #endif
 
-typedef struct { signed long pd; } hugepd_t;
 
 #ifndef CONFIG_HUGETLB_PAGE
 #define is_hugepd(pdep)                (0)
 #define pgd_huge(pgd)          (0)
 #endif /* CONFIG_HUGETLB_PAGE */
 
-#define __hugepd(x) ((hugepd_t) { (x) })
-
 struct page;
 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
 extern void copy_user_page(void *to, void *from, unsigned long vaddr,
index e157489..ae0a230 100644 (file)
@@ -65,6 +65,7 @@ struct power_pmu {
 #define PPMU_HAS_SSLOT         0x00000020 /* Has sampled slot in MMCRA */
 #define PPMU_HAS_SIER          0x00000040 /* Has SIER */
 #define PPMU_ARCH_207S         0x00000080 /* PMC is architecture v2.07S */
+#define PPMU_NO_SIAR           0x00000100 /* Do not use SIAR */
 
 /*
  * Values for flags to get_alternatives()
index 49c0a5a..9c0f5db 100644 (file)
@@ -104,4 +104,12 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
        return pmd_raw(old) == prev;
 }
 
+typedef struct { __be64 pdbe; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
+
+static inline unsigned long hpd_val(hugepd_t x)
+{
+       return be64_to_cpu(x.pdbe);
+}
+
 #endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
index e7f4f3e..8bd3b13 100644 (file)
@@ -66,4 +66,11 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
 }
 #endif
 
+typedef struct { unsigned long pd; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline unsigned long hpd_val(hugepd_t x)
+{
+       return x.pd;
+}
+
 #endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
index c56ea8c..c4ced1d 100644 (file)
 #define PPC_INST_MCRXR                 0x7c000400
 #define PPC_INST_MCRXR_MASK            0xfc0007fe
 #define PPC_INST_MFSPR_PVR             0x7c1f42a6
-#define PPC_INST_MFSPR_PVR_MASK                0xfc1fffff
+#define PPC_INST_MFSPR_PVR_MASK                0xfc1ffffe
 #define PPC_INST_MFTMR                 0x7c0002dc
 #define PPC_INST_MSGSND                        0x7c00019c
 #define PPC_INST_MSGCLR                        0x7c0001dc
 #define PPC_INST_RFDI                  0x4c00004e
 #define PPC_INST_RFMCI                 0x4c00004c
 #define PPC_INST_MFSPR_DSCR            0x7c1102a6
-#define PPC_INST_MFSPR_DSCR_MASK       0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_MASK       0xfc1ffffe
 #define PPC_INST_MTSPR_DSCR            0x7c1103a6
-#define PPC_INST_MTSPR_DSCR_MASK       0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_MASK       0xfc1ffffe
 #define PPC_INST_MFSPR_DSCR_USER       0x7c0302a6
-#define PPC_INST_MFSPR_DSCR_USER_MASK  0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_USER_MASK  0xfc1ffffe
 #define PPC_INST_MTSPR_DSCR_USER       0x7c0303a6
-#define PPC_INST_MTSPR_DSCR_USER_MASK  0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_USER_MASK  0xfc1ffffe
 #define PPC_INST_MFVSRD                        0x7c000066
 #define PPC_INST_MTVSRD                        0x7c000166
 #define PPC_INST_SLBFEE                        0x7c0007a7
index 8180bfd..9de7f79 100644 (file)
@@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
         *
         * For pHyp, we have to enable IO for log retrieval. Otherwise,
         * 0xFF's is always returned from PCI config space.
+        *
+        * When the @severity is EEH_LOG_PERM, the PE is going to be
+        * removed. Prior to that, the drivers for devices included in
+        * the PE will be closed. The drivers rely on working IO path
+        * to bring the devices to quiet state. Otherwise, PCI traffic
+        * from those devices after they are removed is like to cause
+        * another unexpected EEH error.
         */
        if (!(pe->type & EEH_PE_PHB)) {
-               if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
+               if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
+                   severity == EEH_LOG_PERM)
                        eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
 
                /*
index e4744ff..925a4ef 100644 (file)
@@ -463,6 +463,10 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
 
        flush_fp_to_thread(target);
 
+       for (i = 0; i < 32 ; i++)
+               buf[i] = target->thread.TS_FPR(i);
+       buf[32] = target->thread.fp_state.fpscr;
+
        /* copy to local buffer then write that out */
        i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
        if (i)
@@ -672,6 +676,9 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
        flush_altivec_to_thread(target);
        flush_vsx_to_thread(target);
 
+       for (i = 0; i < 32 ; i++)
+               buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                 buf, 0, 32 * sizeof(double));
        if (!ret)
@@ -1019,6 +1026,10 @@ static int tm_cfpr_set(struct task_struct *target,
        flush_fp_to_thread(target);
        flush_altivec_to_thread(target);
 
+       for (i = 0; i < 32; i++)
+               buf[i] = target->thread.TS_CKFPR(i);
+       buf[32] = target->thread.ckfp_state.fpscr;
+
        /* copy to local buffer then write that out */
        i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
        if (i)
@@ -1283,6 +1294,9 @@ static int tm_cvsx_set(struct task_struct *target,
        flush_altivec_to_thread(target);
        flush_vsx_to_thread(target);
 
+       for (i = 0; i < 32 ; i++)
+               buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
+
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                 buf, 0, 32 * sizeof(double));
        if (!ret)
index 8033493..67e19a0 100644 (file)
@@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void)
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-int create_section_mapping(unsigned long start, unsigned long end)
+int hash__create_section_mapping(unsigned long start, unsigned long end)
 {
        int rc = htab_bolt_mapping(start, end, __pa(start),
                                   pgprot_val(PAGE_KERNEL), mmu_linear_psize,
@@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end)
        return rc;
 }
 
-int remove_section_mapping(unsigned long start, unsigned long end)
+int hash__remove_section_mapping(unsigned long start, unsigned long end)
 {
        int rc = htab_remove_mapping(start, end, mmu_linear_psize,
                                     mmu_kernel_ssize);
index d5026f3..37b5f91 100644 (file)
@@ -125,11 +125,14 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 int hugepd_ok(hugepd_t hpd)
 {
        bool is_hugepd;
+       unsigned long hpdval;
+
+       hpdval = hpd_val(hpd);
 
        /*
         * We should not find this format in page directory, warn otherwise.
         */
-       is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
+       is_hugepd = (((hpdval & 0x3) == 0x0) && ((hpdval & HUGEPD_SHIFT_MASK) != 0));
        WARN(is_hugepd, "Found wrong page directory format\n");
        return 0;
 }
index 289df38..8c3389c 100644 (file)
@@ -53,7 +53,7 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES];
 static unsigned nr_gpages;
 #endif
 
-#define hugepd_none(hpd)       ((hpd).pd == 0)
+#define hugepd_none(hpd)       (hpd_val(hpd) == 0)
 
 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 {
@@ -103,24 +103,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
        for (i = 0; i < num_hugepd; i++, hpdp++) {
                if (unlikely(!hugepd_none(*hpdp)))
                        break;
-               else
+               else {
 #ifdef CONFIG_PPC_BOOK3S_64
-                       hpdp->pd = __pa(new) |
-                                  (shift_to_mmu_psize(pshift) << 2);
+                       *hpdp = __hugepd(__pa(new) |
+                                        (shift_to_mmu_psize(pshift) << 2));
 #elif defined(CONFIG_PPC_8xx)
-                       hpdp->pd = __pa(new) |
-                                  (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
-                                                             _PMD_PAGE_512K) |
-                                  _PMD_PRESENT;
+                       *hpdp = __hugepd(__pa(new) |
+                                        (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
+                                         _PMD_PAGE_512K) | _PMD_PRESENT);
 #else
                        /* We use the old format for PPC_FSL_BOOK3E */
-                       hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
+                       *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
 #endif
+               }
        }
        /* If we bailed from the for loop early, an error occurred, clean up */
        if (i < num_hugepd) {
                for (i = i - 1 ; i >= 0; i--, hpdp--)
-                       hpdp->pd = 0;
+                       *hpdp = __hugepd(0);
                kmem_cache_free(cachep, new);
        }
        spin_unlock(&mm->page_table_lock);
@@ -454,7 +454,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
                return;
 
        for (i = 0; i < num_hugepd; i++, hpdp++)
-               hpdp->pd = 0;
+               *hpdp = __hugepd(0);
 
        if (shift >= pdshift)
                hugepd_free(tlb, hugepte);
@@ -810,12 +810,8 @@ static int __init hugetlbpage_init(void)
                 * if we have pdshift and shift value same, we don't
                 * use pgt cache for hugepd.
                 */
-               if (pdshift > shift) {
+               if (pdshift > shift)
                        pgtable_cache_add(pdshift - shift, NULL);
-                       if (!PGT_CACHE(pdshift - shift))
-                               panic("hugetlbpage_init(): could not create "
-                                     "pgtable cache for %d bit pagesize\n", shift);
-               }
 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
                else if (!hugepte_cache) {
                        /*
@@ -852,9 +848,6 @@ static int __init hugetlbpage_init(void)
        else if (mmu_psize_defs[MMU_PAGE_2M].shift)
                HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
 #endif
-       else
-               panic("%s: Unable to set default huge page size\n", __func__);
-
        return 0;
 }
 
index a175cd8..f2108c4 100644 (file)
@@ -78,8 +78,12 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
        align = max_t(unsigned long, align, minalign);
        name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
        new = kmem_cache_create(name, table_size, align, 0, ctor);
+       if (!new)
+               panic("Could not allocate pgtable cache for order %d", shift);
+
        kfree(name);
        pgtable_cache[shift - 1] = new;
+
        pr_debug("Allocated pgtable cache for order %d\n", shift);
 }
 
@@ -88,7 +92,7 @@ void pgtable_cache_init(void)
 {
        pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
 
-       if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
+       if (PMD_CACHE_INDEX && !PGT_CACHE(PMD_CACHE_INDEX))
                pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
        /*
         * In all current configs, when the PUD index exists it's the
@@ -97,11 +101,4 @@ void pgtable_cache_init(void)
         */
        if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
                pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
-
-       if (!PGT_CACHE(PGD_INDEX_SIZE))
-               panic("Couldn't allocate pgd cache");
-       if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
-               panic("Couldn't allocate pmd pgtable caches");
-       if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
-               panic("Couldn't allocate pud pgtable caches");
 }
index ebf9782..653ff6c 100644 (file)
@@ -126,3 +126,21 @@ void mmu_cleanup_all(void)
        else if (mmu_hash_ops.hpte_clear_all)
                mmu_hash_ops.hpte_clear_all();
 }
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int create_section_mapping(unsigned long start, unsigned long end)
+{
+       if (radix_enabled())
+               return -ENODEV;
+
+       return hash__create_section_mapping(start, end);
+}
+
+int remove_section_mapping(unsigned long start, unsigned long end)
+{
+       if (radix_enabled())
+               return -ENODEV;
+
+       return hash__remove_section_mapping(start, end);
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
index fd3e403..270eb9b 100644 (file)
@@ -295,6 +295,8 @@ static inline void perf_read_regs(struct pt_regs *regs)
         */
        if (TRAP(regs) != 0xf00)
                use_siar = 0;
+       else if ((ppmu->flags & PPMU_NO_SIAR))
+               use_siar = 0;
        else if (marked)
                use_siar = 1;
        else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
index 6447dc1..929b56d 100644 (file)
@@ -16,7 +16,7 @@ EVENT(PM_CYC,                                 0x0001e)
 EVENT(PM_ICT_NOSLOT_CYC,                       0x100f8)
 EVENT(PM_CMPLU_STALL,                          0x1e054)
 EVENT(PM_INST_CMPL,                            0x00002)
-EVENT(PM_BRU_CMPL,                             0x40060)
+EVENT(PM_BRU_CMPL,                             0x10012)
 EVENT(PM_BR_MPRED_CMPL,                                0x400f6)
 
 /* All L1 D cache load references counted at finish, gated by reject */
index 346010e..7332634 100644 (file)
@@ -384,7 +384,7 @@ static struct power_pmu power9_isa207_pmu = {
        .bhrb_filter_map        = power9_bhrb_filter_map,
        .get_constraint         = isa207_get_constraint,
        .disable_pmc            = isa207_disable_pmc,
-       .flags                  = PPMU_HAS_SIER | PPMU_ARCH_207S,
+       .flags                  = PPMU_NO_SIAR | PPMU_ARCH_207S,
        .n_generic              = ARRAY_SIZE(power9_generic_events),
        .generic_events         = power9_generic_events,
        .cache_events           = &power9_cache_events,
index d38e86f..60c5765 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/xics.h>
 #include <asm/io.h>
 #include <asm/opal.h>
+#include <asm/kvm_ppc.h>
 
 static void icp_opal_teardown_cpu(void)
 {
@@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void)
         * Should we be flagging idle loop instead?
         * Or creating some task to be scheduled?
         */
-       opal_int_eoi((0x00 << 24) | XICS_IPI);
+       if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
+               force_external_irq_replay();
+}
+
+static unsigned int icp_opal_get_xirr(void)
+{
+       unsigned int kvm_xirr;
+       __be32 hw_xirr;
+       int64_t rc;
+
+       /* Handle an interrupt latched by KVM first */
+       kvm_xirr = kvmppc_get_xics_latch();
+       if (kvm_xirr)
+               return kvm_xirr;
+
+       /* Then ask OPAL */
+       rc = opal_int_get_xirr(&hw_xirr, false);
+       if (rc < 0)
+               return 0;
+       return be32_to_cpu(hw_xirr);
 }
 
 static unsigned int icp_opal_get_irq(void)
@@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void)
        unsigned int xirr;
        unsigned int vec;
        unsigned int irq;
-       int64_t rc;
 
-       rc = opal_int_get_xirr(&xirr, false);
-       if (rc < 0)
-               return 0;
-       xirr = be32_to_cpu(xirr);
+       xirr = icp_opal_get_xirr();
        vec = xirr & 0x00ffffff;
        if (vec == XICS_IRQ_SPURIOUS)
                return 0;
@@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void)
        xics_mask_unknown_vec(vec);
 
        /* We might learn about it later, so EOI it */
-       opal_int_eoi(xirr);
+       if (opal_int_eoi(xirr) > 0)
+               force_external_irq_replay();
 
        return 0;
 }
index e659daf..e009753 100644 (file)
@@ -69,7 +69,7 @@ CONFIG_CMA=y
 CONFIG_CMA_DEBUG=y
 CONFIG_CMA_DEBUGFS=y
 CONFIG_MEM_SOFT_DIRTY=y
-CONFIG_ZPOOL=m
+CONFIG_ZSWAP=y
 CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
 CONFIG_ZSMALLOC_STAT=y
@@ -141,8 +141,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
 CONFIG_NF_CONNTRACK_TIMEOUT=y
 CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -159,13 +157,12 @@ CONFIG_NF_TABLES=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -219,7 +216,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
 CONFIG_NETFILTER_XT_MATCH_REALM=m
 CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
 CONFIG_NETFILTER_XT_MATCH_STATE=m
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
 CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -258,7 +254,6 @@ CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -436,7 +431,6 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
@@ -480,6 +474,7 @@ CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -592,14 +587,12 @@ CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
 CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
-CONFIG_DEBUG_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_DEBUG_CREDENTIALS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=300
 CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
 CONFIG_PM_NOTIFIER_ERROR_INJECT=m
 CONFIG_FAULT_INJECTION=y
 CONFIG_FAILSLAB=y
@@ -618,6 +611,7 @@ CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
 CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_TEST_LIST_SORT=y
@@ -630,6 +624,7 @@ CONFIG_TEST_STRING_HELPERS=y
 CONFIG_TEST_KSTRTOX=y
 CONFIG_DMA_API_DEBUG=y
 CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_S390_PTDUMP=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
@@ -640,16 +635,18 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
 CONFIG_SECURITY_SELINUX_DISABLE=y
 CONFIG_IMA=y
 CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_RSA=m
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
 CONFIG_CRYPTO_CRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -673,11 +670,13 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_ZCRYPT=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
index 95ceac5..f05d2d6 100644 (file)
@@ -12,6 +12,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_NUMA_BALANCING=y
+# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
 CONFIG_BLK_CGROUP=y
@@ -54,8 +55,9 @@ CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
+CONFIG_LIVEPATCH=y
 CONFIG_TUNE_ZEC12=y
-CONFIG_NR_CPUS=256
+CONFIG_NR_CPUS=512
 CONFIG_NUMA=y
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
@@ -65,6 +67,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
 CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
 CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
@@ -136,8 +139,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
 CONFIG_NF_CONNTRACK_TIMEOUT=y
 CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +155,12 @@ CONFIG_NF_TABLES=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +214,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
 CONFIG_NETFILTER_XT_MATCH_REALM=m
 CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
 CONFIG_NETFILTER_XT_MATCH_STATE=m
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
 CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +252,6 @@ CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -430,7 +428,6 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
@@ -460,6 +457,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_RAW_DRIVER=m
 CONFIG_HANGCHECK_TIMER=m
 CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
@@ -473,6 +471,7 @@ CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -495,6 +494,7 @@ CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=y
@@ -551,25 +551,27 @@ CONFIG_FRAME_WARN=1024
 CONFIG_UNUSED_SYMBOLS=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
 CONFIG_PANIC_ON_OOPS=y
 CONFIG_TIMER_STATS=y
 CONFIG_RCU_TORTURE_TEST=m
 CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
-CONFIG_PM_NOTIFIER_ERROR_INJECT=m
 CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
-# CONFIG_KPROBE_EVENT is not set
+CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
-CONFIG_RBTREE_TEST=m
-CONFIG_INTERVAL_TREE_TEST=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
@@ -577,18 +579,25 @@ CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
 CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -598,6 +607,7 @@ CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -612,10 +622,13 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_ZCRYPT=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
@@ -624,9 +637,6 @@ CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
 CONFIG_CORDIC=m
index bc7b176..2cf8734 100644 (file)
@@ -65,6 +65,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CLEANCACHE=y
 CONFIG_FRONTSWAP=y
 CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
 CONFIG_ZSWAP=y
 CONFIG_ZBUD=m
 CONFIG_ZSMALLOC=m
@@ -136,8 +137,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
 CONFIG_NF_CONNTRACK_EVENTS=y
 CONFIG_NF_CONNTRACK_TIMEOUT=y
 CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
 CONFIG_NF_CONNTRACK_AMANDA=m
 CONFIG_NF_CONNTRACK_FTP=m
 CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +153,12 @@ CONFIG_NF_TABLES=m
 CONFIG_NFT_EXTHDR=m
 CONFIG_NFT_META=m
 CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
 CONFIG_NFT_COUNTER=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_AUDIT=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +212,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
 CONFIG_NETFILTER_XT_MATCH_RATEEST=m
 CONFIG_NETFILTER_XT_MATCH_REALM=m
 CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
 CONFIG_NETFILTER_XT_MATCH_STATE=m
 CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
 CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +250,6 @@ CONFIG_IP_VS_NQ=m
 CONFIG_IP_VS_FTP=m
 CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -430,7 +426,6 @@ CONFIG_EQUALIZER=m
 CONFIG_IFB=m
 CONFIG_MACVLAN=m
 CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
 CONFIG_VXLAN=m
 CONFIG_TUN=m
 CONFIG_VETH=m
@@ -474,6 +469,7 @@ CONFIG_VIRTIO_BALLOON=m
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
 CONFIG_JBD2_DEBUG=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
@@ -496,6 +492,7 @@ CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
 CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
 CONFIG_ISO9660_FS=y
@@ -563,12 +560,16 @@ CONFIG_STACK_TRACER=y
 CONFIG_BLK_DEV_IO_TRACE=y
 CONFIG_UPROBE_EVENT=y
 CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
 CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_LKDTM=m
 CONFIG_PERCPU_TEST=m
 CONFIG_ATOMIC64_SELFTEST=y
 CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
 CONFIG_ENCRYPTED_KEYS=m
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
@@ -576,18 +577,25 @@ CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
 CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
 CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
 CONFIG_CRYPTO_XCBC=m
 CONFIG_CRYPTO_VMAC=m
 CONFIG_CRYPTO_CRC32=m
@@ -597,6 +605,7 @@ CONFIG_CRYPTO_RMD160=m
 CONFIG_CRYPTO_RMD256=m
 CONFIG_CRYPTO_RMD320=m
 CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
 CONFIG_CRYPTO_TGR192=m
 CONFIG_CRYPTO_WP512=m
 CONFIG_CRYPTO_ANUBIS=m
@@ -611,10 +620,13 @@ CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SERPENT=m
 CONFIG_CRYPTO_TEA=m
 CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
 CONFIG_CRYPTO_LZ4=m
 CONFIG_CRYPTO_LZ4HC=m
 CONFIG_CRYPTO_USER_API_HASH=m
 CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
 CONFIG_ZCRYPT=m
 CONFIG_CRYPTO_SHA1_S390=m
 CONFIG_CRYPTO_SHA256_S390=m
@@ -623,9 +635,6 @@ CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRYPTO_GHASH_S390=m
 CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
 CONFIG_CRC7=m
 CONFIG_CRC8=m
 CONFIG_CORDIC=m
index 2d40ef0..d00e368 100644 (file)
@@ -38,7 +38,6 @@ CONFIG_JUMP_LABEL=y
 CONFIG_STATIC_KEYS_SELFTEST=y
 CONFIG_MODULES=y
 CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
 CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
@@ -130,8 +129,11 @@ CONFIG_DUMMY=m
 CONFIG_EQUALIZER=m
 CONFIG_TUN=m
 CONFIG_VIRTIO_NET=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
 # CONFIG_INPUT is not set
 # CONFIG_SERIO is not set
+CONFIG_DEVKMEM=y
 CONFIG_RAW_DRIVER=m
 CONFIG_VIRTIO_BALLOON=y
 CONFIG_EXT4_FS=y
@@ -183,7 +185,6 @@ CONFIG_TRACE_ENUM_MAP_FILE=y
 CONFIG_KPROBES_SANITY_TEST=y
 CONFIG_S390_PTDUMP=y
 CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_AUTHENC=m
 CONFIG_CRYPTO_TEST=m
 CONFIG_CRYPTO_CCM=m
 CONFIG_CRYPTO_GCM=m
index d7697ab..8e136b8 100644 (file)
@@ -15,7 +15,9 @@
        BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
        asm volatile(                                                   \
                "       lctlg   %1,%2,%0\n"                             \
-               : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+               :                                                       \
+               : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high)    \
+               : "memory");                                            \
 }
 
 #define __ctl_store(array, low, high) {                                        \
index 7447ba5..12020b5 100644 (file)
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
        if (target == current)
                save_fpu_regs();
 
+       if (MACHINE_HAS_VX)
+               convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
+       else
+               memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
+
        /* If setting FPC, must validate it first. */
        if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
                u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
        if (target == current)
                save_fpu_regs();
 
+       for (i = 0; i < __NUM_VXRS_LOW; i++)
+               vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
+
        rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
        if (rc == 0)
                for (i = 0; i < __NUM_VXRS_LOW; i++)
index bec71e9..6484a25 100644 (file)
@@ -916,7 +916,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
        memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
               S390_ARCH_FAC_LIST_SIZE_BYTE);
        memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
-              S390_ARCH_FAC_LIST_SIZE_BYTE);
+              sizeof(S390_lowcore.stfle_fac_list));
        if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
                ret = -EFAULT;
        kfree(mach);
@@ -1437,7 +1437,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
        /* Populate the facility mask initially. */
        memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
-              S390_ARCH_FAC_LIST_SIZE_BYTE);
+              sizeof(S390_lowcore.stfle_fac_list));
        for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
                if (i < kvm_s390_fac_list_mask_size())
                        kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
index 7a1897c..d56ef26 100644 (file)
@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
        return pgste;
 }
 
-static inline void ptep_xchg_commit(struct mm_struct *mm,
+static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
                                    unsigned long addr, pte_t *ptep,
                                    pgste_t pgste, pte_t old, pte_t new)
 {
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
        } else {
                *ptep = new;
        }
+       return old;
 }
 
 pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
        preempt_disable();
        pgste = ptep_xchg_start(mm, addr, ptep);
        old = ptep_flush_direct(mm, addr, ptep);
-       ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+       old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
        preempt_enable();
        return old;
 }
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
        preempt_disable();
        pgste = ptep_xchg_start(mm, addr, ptep);
        old = ptep_flush_lazy(mm, addr, ptep);
-       ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+       old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
        preempt_enable();
        return old;
 }
index d89b701..e279572 100644 (file)
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
                          const void *kbuf, const void __user *ubuf)
 {
        int ret;
-       struct pt_regs regs;
+       struct pt_regs regs = *task_pt_regs(target);
 
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
                                 sizeof(regs));
index 05612a2..496e603 100644 (file)
@@ -1010,7 +1010,7 @@ static __init int amd_ibs_init(void)
         * all online cpus.
         */
        cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
-                         "perf/x86/amd/ibs:STARTING",
+                         "perf/x86/amd/ibs:starting",
                          x86_pmu_amd_ibs_starting_cpu,
                          x86_pmu_amd_ibs_dying_cpu);
 
index d611cab..eb1484c 100644 (file)
@@ -3176,13 +3176,16 @@ static void intel_pmu_cpu_starting(int cpu)
 
        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
                for_each_cpu(i, topology_sibling_cpumask(cpu)) {
+                       struct cpu_hw_events *sibling;
                        struct intel_excl_cntrs *c;
 
-                       c = per_cpu(cpu_hw_events, i).excl_cntrs;
+                       sibling = &per_cpu(cpu_hw_events, i);
+                       c = sibling->excl_cntrs;
                        if (c && c->core_id == core_id) {
                                cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
                                cpuc->excl_cntrs = c;
-                               cpuc->excl_thread_id = 1;
+                               if (!sibling->excl_thread_id)
+                                       cpuc->excl_thread_id = 1;
                                break;
                        }
                }
index 945e512..1e35dd0 100644 (file)
@@ -1875,6 +1875,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
        .irq_ack                = irq_chip_ack_parent,
        .irq_eoi                = ioapic_ack_level,
        .irq_set_affinity       = ioapic_set_affinity,
+       .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
@@ -1886,6 +1887,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
        .irq_ack                = irq_chip_ack_parent,
        .irq_eoi                = ioapic_ir_ack_level,
        .irq_set_affinity       = ioapic_set_affinity,
+       .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
index 57d8a85..d153be8 100644 (file)
@@ -6171,7 +6171,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
 
        kvm_x86_ops->patch_hypercall(vcpu, instruction);
 
-       return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
+       return emulator_write_emulated(ctxt, rip, instruction, 3,
+               &ctxt->exception);
 }
 
 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
index 3cd6983..3961103 100644 (file)
@@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
                        DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
                },
        },
+       /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
+       {
+               .callback = set_nouse_crs,
+               .ident = "Supermicro X8DTH",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
+                       DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
+               },
+       },
 
        /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
        {
index a8e67a1..c3400b5 100644 (file)
@@ -912,7 +912,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
 static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
 {
        LIST_HEAD(rq_list);
-       LIST_HEAD(driver_list);
 
        if (unlikely(blk_mq_hctx_stopped(hctx)))
                return;
index 82b0b57..b0399e8 100644 (file)
@@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
 
        ACPI_FUNCTION_TRACE(tb_install_and_load_table);
 
-       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
        /* Install the table and load it into the namespace */
 
        status = acpi_tb_install_standard_table(address, flags, TRUE,
                                                override, &i);
        if (ACPI_FAILURE(status)) {
-               goto unlock_and_exit;
+               goto exit;
        }
 
-       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
        status = acpi_tb_load_table(i, acpi_gbl_root_node);
-       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
 
-unlock_and_exit:
+exit:
        *table_index = i;
-       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
        return_ACPI_STATUS(status);
 }
 
index 5fdf251..01e1b3d 100644 (file)
@@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                goto release_and_exit;
        }
 
+       /* Acquire the table lock */
+
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
        if (reload) {
                /*
                 * Validate the incoming table signature.
@@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                                         new_table_desc.signature.integer));
 
                        status = AE_BAD_SIGNATURE;
-                       goto release_and_exit;
+                       goto unlock_and_exit;
                }
 
                /* Check if table is already registered */
@@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                                /* Table is still loaded, this is an error */
 
                                status = AE_ALREADY_EXISTS;
-                               goto release_and_exit;
+                               goto unlock_and_exit;
                        } else {
                                /*
                                 * Table was unloaded, allow it to be reloaded.
@@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
                                 * indicate the re-installation.
                                 */
                                acpi_tb_uninstall_table(&new_table_desc);
+                               (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
                                *table_index = i;
                                return_ACPI_STATUS(AE_OK);
                        }
@@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address,
 
        /* Invoke table handler if present */
 
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
        if (acpi_gbl_table_handler) {
                (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
                                             new_table_desc.pointer,
                                             acpi_gbl_table_handler_context);
        }
+       (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+unlock_and_exit:
+
+       /* Release the table lock */
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
 
 release_and_exit:
 
index 9b6cebe..54abb26 100644 (file)
@@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void)
                if (acpi_sleep_state_supported(i))
                        sleep_states[i] = 1;
 
-       /*
-        * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
-        * the default suspend mode was not selected from the command line.
-        */
-       if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
-           mem_sleep_default > PM_SUSPEND_MEM)
-               mem_sleep_default = PM_SUSPEND_FREEZE;
-
        suspend_set_ops(old_suspend_ordering ?
                &acpi_suspend_ops_old : &acpi_suspend_ops);
        freeze_set_ops(&acpi_freeze_ops);
index 02ded25..7f48156 100644 (file)
@@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
                },
        },
-       {
-       /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
-       /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
-       .callback = video_detect_force_native,
-       .ident = "HP Pavilion dv6",
-       .matches = {
-               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
-               DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
-               },
-       },
-
        { },
 };
 
index 8ab8ea1..dacb6a8 100644 (file)
@@ -408,14 +408,14 @@ static ssize_t show_valid_zones(struct device *dev,
        sprintf(buf, "%s", zone->name);
 
        /* MMOP_ONLINE_KERNEL */
-       zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
+       zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
        }
 
        /* MMOP_ONLINE_MOVABLE */
-       zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
+       zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
        if (zone_shift) {
                strcat(buf, " ");
                strcat(buf, (zone + zone_shift)->name);
index 50a2020..9fd06ee 100644 (file)
@@ -271,7 +271,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index,
 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 {
        struct request *req = blk_mq_rq_from_pdu(cmd);
-       int result, flags;
+       int result;
        struct nbd_request request;
        unsigned long size = blk_rq_bytes(req);
        struct bio *bio;
@@ -310,7 +310,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
        if (type != NBD_CMD_WRITE)
                return 0;
 
-       flags = 0;
        bio = req->bio;
        while (bio) {
                struct bio *next = bio->bi_next;
@@ -319,9 +318,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 
                bio_for_each_segment(bvec, bio, iter) {
                        bool is_last = !next && bio_iter_last(bvec, iter);
+                       int flags = is_last ? 0 : MSG_MORE;
 
-                       if (is_last)
-                               flags = MSG_MORE;
                        dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
                                cmd, bvec.bv_len);
                        result = sock_send_bvec(nbd, index, &bvec, flags);
index b2bdfa8..265f1a7 100644 (file)
@@ -197,13 +197,13 @@ struct blkfront_info
        /* Number of pages per ring buffer. */
        unsigned int nr_ring_pages;
        struct request_queue *rq;
-       unsigned int feature_flush;
-       unsigned int feature_fua;
+       unsigned int feature_flush:1;
+       unsigned int feature_fua:1;
        unsigned int feature_discard:1;
        unsigned int feature_secdiscard:1;
+       unsigned int feature_persistent:1;
        unsigned int discard_granularity;
        unsigned int discard_alignment;
-       unsigned int feature_persistent:1;
        /* Number of 4KB segments handled */
        unsigned int max_indirect_segments;
        int is_ready;
@@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
        }
        else
                grants = info->max_indirect_segments;
-       psegs = grants / GRANTS_PER_PSEG;
+       psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
 
        err = fill_grant_buffer(rinfo,
                                (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
@@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
                blkfront_setup_discard(info);
 
        info->feature_persistent =
-               xenbus_read_unsigned(info->xbdev->otherend,
-                                    "feature-persistent", 0);
+               !!xenbus_read_unsigned(info->xbdev->otherend,
+                                      "feature-persistent", 0);
 
        indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
                                        "feature-max-indirect-segments", 0);
-       info->max_indirect_segments = min(indirect_segments,
-                                         xen_blkif_max_segments);
+       if (indirect_segments > xen_blkif_max_segments)
+               indirect_segments = xen_blkif_max_segments;
+       if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
+               indirect_segments = 0;
+       info->max_indirect_segments = indirect_segments;
 }
 
 /*
@@ -2652,6 +2655,9 @@ static int __init xlblk_init(void)
        if (!xen_domain())
                return -ENODEV;
 
+       if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
+               xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
        if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
                pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
                        xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
index 8b00e79..17857be 100644 (file)
@@ -1862,7 +1862,7 @@ static void config_work_handler(struct work_struct *work)
 {
        struct ports_device *portdev;
 
-       portdev = container_of(work, struct ports_device, control_work);
+       portdev = container_of(work, struct ports_device, config_work);
        if (!use_multiport(portdev)) {
                struct virtio_device *vdev;
                struct port *port;
index 8c8b495..cdc092a 100644 (file)
@@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
        GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
                                GATE_BUS_TOP, 24, 0, 0),
        GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
-                               GATE_BUS_TOP, 27, 0, 0),
+                               GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
 };
 
 static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
        GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
 
        GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
-                       GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
+                       GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
        GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
                        GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
 
        GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
                        GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
        GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
-                       GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0),
+                       GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
        GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
                        GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
        GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
                        GATE_BUS_TOP, 5, 0, 0),
        GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
-                       GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0),
+                       GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
        GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
                        GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
        GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
@@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
        GATE(0, "aclk166", "mout_user_aclk166",
                        GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
-                       GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
+                       GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
        GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
                        GATE_BUS_TOP, 16, 0, 0),
        GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
                        GATE_BUS_TOP, 17, 0, 0),
        GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
-                       GATE_BUS_TOP, 18, 0, 0),
+                       GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
        GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
                        GATE_BUS_TOP, 28, 0, 0),
        GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
                        GATE_BUS_TOP, 29, 0, 0),
 
        GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
-                       SRC_MASK_TOP2, 24, 0, 0),
+                       SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
 
        GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
                        SRC_MASK_TOP7, 20, 0, 0),
index 4da1dc2..670ff0f 100644 (file)
@@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
        if (mct_int_type == MCT_INT_SPI) {
                if (evt->irq != -1)
                        disable_irq_nosync(evt->irq);
+               exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
        } else {
                disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
        }
index f91c257..a54d65a 100644 (file)
@@ -2005,7 +2005,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                        limits = &performance_limits;
                        perf_limits = limits;
                }
-               if (policy->max >= policy->cpuinfo.max_freq) {
+               if (policy->max >= policy->cpuinfo.max_freq &&
+                   !limits->no_turbo) {
                        pr_debug("set performance\n");
                        intel_pstate_set_performance_limits(perf_limits);
                        goto out;
@@ -2047,6 +2048,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
            policy->policy != CPUFREQ_POLICY_PERFORMANCE)
                return -EINVAL;
 
+       /* When per-CPU limits are used, sysfs limits are not used */
+       if (!per_cpu_limits) {
+               unsigned int max_freq, min_freq;
+
+               max_freq = policy->cpuinfo.max_freq *
+                                               limits->max_sysfs_pct / 100;
+               min_freq = policy->cpuinfo.max_freq *
+                                               limits->min_sysfs_pct / 100;
+               cpufreq_verify_within_limits(policy, min_freq, max_freq);
+       }
+
        return 0;
 }
 
index 86bf3b8..a07ae9e 100644 (file)
@@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
 }
 
 /**
- * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip
+ * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
  * @gpiochip: the gpiochip to add the irqchip to
  * @irqchip: the irqchip to add to the gpiochip
  * @first_irq: if not dynamically assigned, the base (first) IRQ to
@@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
  * the pins on the gpiochip can generate a unique IRQ. Everything else
  * need to be open coded.
  */
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
-                         struct irq_chip *irqchip,
-                         unsigned int first_irq,
-                         irq_flow_handler_t handler,
-                         unsigned int type,
-                         bool nested,
-                         struct lock_class_key *lock_key)
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+                            struct irq_chip *irqchip,
+                            unsigned int first_irq,
+                            irq_flow_handler_t handler,
+                            unsigned int type,
+                            bool nested,
+                            struct lock_class_key *lock_key)
 {
        struct device_node *of_node;
        bool irq_base_set = false;
@@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add);
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
 
 #else /* CONFIG_GPIOLIB_IRQCHIP */
 
index 29d6d84..41e41f9 100644 (file)
@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
                }
                break;
        }
+
+       if (!(*out_ring && (*out_ring)->adev)) {
+               DRM_ERROR("Ring %d is not initialized on IP %d\n",
+                         ring, ip_type);
+               return -EINVAL;
+       }
+
        return 0;
 }
 
index 9999dc7..ccb5e02 100644 (file)
@@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                      int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v10_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v10_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v10_0_show_cursor(crtc);
@@ -2620,7 +2617,6 @@ unpin:
 static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v10_0_lock_cursor(crtc, true);
@@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
                dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                             amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v10_0_show_cursor(crtc);
 
                dce_v10_0_lock_cursor(crtc, false);
index 2006abb..a7af5b3 100644 (file)
@@ -2532,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -2557,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                      int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -2598,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v11_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -2607,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v11_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v11_0_show_cursor(crtc);
@@ -2640,7 +2637,6 @@ unpin:
 static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v11_0_lock_cursor(crtc, true);
@@ -2648,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
                dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                             amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v11_0_show_cursor(crtc);
 
                dce_v11_0_lock_cursor(crtc, false);
index b4e4ec6..39df6a5 100644 (file)
@@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
        struct amdgpu_device *adev = crtc->dev->dev_private;
        int xorigin = 0, yorigin = 0;
 
+       int w = amdgpu_crtc->cursor_width;
+
        amdgpu_crtc->cursor_x = x;
        amdgpu_crtc->cursor_y = y;
 
@@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                     int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v6_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v6_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v6_0_show_cursor(crtc);
@@ -1986,7 +1985,6 @@ unpin:
 static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v6_0_lock_cursor(crtc, true);
@@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
                dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                            amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v6_0_show_cursor(crtc);
                dce_v6_0_lock_cursor(crtc, false);
        }
index 584abe8..28102bb 100644 (file)
@@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
 
        WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
        WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+       WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+              ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
 
        return 0;
 }
@@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
                                     int32_t hot_y)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
        struct drm_gem_object *obj;
        struct amdgpu_bo *aobj;
        int ret;
@@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
        dce_v8_0_lock_cursor(crtc, true);
 
-       if (hot_x != amdgpu_crtc->cursor_hot_x ||
+       if (width != amdgpu_crtc->cursor_width ||
+           height != amdgpu_crtc->cursor_height ||
+           hot_x != amdgpu_crtc->cursor_hot_x ||
            hot_y != amdgpu_crtc->cursor_hot_y) {
                int x, y;
 
@@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
 
                dce_v8_0_cursor_move_locked(crtc, x, y);
 
-               amdgpu_crtc->cursor_hot_x = hot_x;
-               amdgpu_crtc->cursor_hot_y = hot_y;
-       }
-
-       if (width != amdgpu_crtc->cursor_width ||
-           height != amdgpu_crtc->cursor_height) {
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (width - 1) << 16 | (height - 1));
                amdgpu_crtc->cursor_width = width;
                amdgpu_crtc->cursor_height = height;
+               amdgpu_crtc->cursor_hot_x = hot_x;
+               amdgpu_crtc->cursor_hot_y = hot_y;
        }
 
        dce_v8_0_show_cursor(crtc);
@@ -2471,7 +2468,6 @@ unpin:
 static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct amdgpu_device *adev = crtc->dev->dev_private;
 
        if (amdgpu_crtc->cursor_bo) {
                dce_v8_0_lock_cursor(crtc, true);
@@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
                dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
                                            amdgpu_crtc->cursor_y);
 
-               WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
-                      (amdgpu_crtc->cursor_width - 1) << 16 |
-                      (amdgpu_crtc->cursor_height - 1));
-
                dce_v8_0_show_cursor(crtc);
 
                dce_v8_0_lock_cursor(crtc, false);
index 762f8e8..e9a1768 100644 (file)
@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
 
 static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
 {
-       struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
-       kfree(amdgpu_encoder->enc_priv);
        drm_encoder_cleanup(encoder);
-       kfree(amdgpu_encoder);
+       kfree(encoder);
 }
 
 static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
index 45a573e..e2b0b16 100644 (file)
@@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin");
 MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
 MODULE_FIRMWARE("radeon/verde_mc.bin");
 MODULE_FIRMWARE("radeon/oland_mc.bin");
+MODULE_FIRMWARE("radeon/si58_mc.bin");
 
 #define MC_SEQ_MISC0__MT__MASK   0xf0000000
 #define MC_SEQ_MISC0__MT__GDDR1  0x10000000
@@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
        const char *chip_name;
        char fw_name[30];
        int err;
+       bool is_58_fw = false;
 
        DRM_DEBUG("\n");
 
@@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+       /* this memory configuration requires special firmware */
+       if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+               is_58_fw = true;
+
+       if (is_58_fw)
+               snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+       else
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
        err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -463,19 +472,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
        WREG32(mmVM_CONTEXT1_CNTL,
               VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
               (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
-              ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
-              VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
-              VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
-              VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
+              ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
+       if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+               gmc_v6_0_set_fault_enable_default(adev, false);
+       else
+               gmc_v6_0_set_fault_enable_default(adev, true);
 
        gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
        dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -754,7 +755,10 @@ static int gmc_v6_0_late_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+       if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+               return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+       else
+               return 0;
 }
 
 static int gmc_v6_0_sw_init(void *handle)
index 10bedfa..6e150db 100644 (file)
@@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
 MODULE_FIRMWARE("radeon/oland_k_smc.bin");
 MODULE_FIRMWARE("radeon/hainan_smc.bin");
 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
 
 union power_info {
        struct _ATOM_POWERPLAY_INFO info;
@@ -3487,17 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                    (adev->pdev->device == 0x6817) ||
                    (adev->pdev->device == 0x6806))
                        max_mclk = 120000;
-       } else if (adev->asic_type == CHIP_OLAND) {
-               if ((adev->pdev->revision == 0xC7) ||
-                   (adev->pdev->revision == 0x80) ||
-                   (adev->pdev->revision == 0x81) ||
-                   (adev->pdev->revision == 0x83) ||
-                   (adev->pdev->revision == 0x87) ||
-                   (adev->pdev->device == 0x6604) ||
-                   (adev->pdev->device == 0x6605)) {
-                       max_sclk = 75000;
-                       max_mclk = 80000;
-               }
        } else if (adev->asic_type == CHIP_HAINAN) {
                if ((adev->pdev->revision == 0x81) ||
                    (adev->pdev->revision == 0x83) ||
@@ -3506,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
                    (adev->pdev->device == 0x6665) ||
                    (adev->pdev->device == 0x6667)) {
                        max_sclk = 75000;
-                       max_mclk = 80000;
                }
        }
        /* Apply dpm quirks */
@@ -7713,10 +7702,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
                        ((adev->pdev->device == 0x6660) ||
                        (adev->pdev->device == 0x6663) ||
                        (adev->pdev->device == 0x6665) ||
-                       (adev->pdev->device == 0x6667))) ||
-                   ((adev->pdev->revision == 0xc3) &&
-                       (adev->pdev->device == 0x6665)))
+                        (adev->pdev->device == 0x6667))))
                        chip_name = "hainan_k";
+               else if ((adev->pdev->revision == 0xc3) &&
+                        (adev->pdev->device == 0x6665))
+                       chip_name = "banks_k_2";
                else
                        chip_name = "hainan";
                break;
index 96444e4..7fb9137 100644 (file)
 #include "smu/smu_7_0_1_sh_mask.h"
 
 static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
 static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
 static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
 static int uvd_v4_2_start(struct amdgpu_device *adev);
 static void uvd_v4_2_stop(struct amdgpu_device *adev);
 static int uvd_v4_2_set_clockgating_state(void *handle,
                                enum amd_clockgating_state state);
+static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
+                            bool sw_mode);
 /**
  * uvd_v4_2_ring_get_rptr - get read pointer
  *
@@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle)
 
        return r;
 }
-
+static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
+                                bool enable);
 /**
  * uvd_v4_2_hw_init - start and test UVD block
  *
@@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle)
        uint32_t tmp;
        int r;
 
-       uvd_v4_2_init_cg(adev);
-       uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+       uvd_v4_2_enable_mgcg(adev, true);
        amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
        r = uvd_v4_2_start(adev);
        if (r)
@@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
        struct amdgpu_ring *ring = &adev->uvd.ring;
        uint32_t rb_bufsz;
        int i, j, r;
-
        /* disable byte swapping */
        u32 lmi_swap_cntl = 0;
        u32 mp_swap_cntl = 0;
 
+       WREG32(mmUVD_CGC_GATE, 0);
+       uvd_v4_2_set_dcm(adev, true);
+
        uvd_v4_2_mc_resume(adev);
 
        /* disable interupt */
@@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
 
        /* Unstall UMC and register bus */
        WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+       uvd_v4_2_set_dcm(adev, false);
 }
 
 /**
@@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
        WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
 }
 
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
-{
-       bool hw_mode = true;
-
-       if (hw_mode) {
-               uvd_v4_2_set_dcm(adev, false);
-       } else {
-               u32 tmp = RREG32(mmUVD_CGC_CTRL);
-               tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
-               WREG32(mmUVD_CGC_CTRL, tmp);
-       }
-}
-
 static bool uvd_v4_2_is_idle(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
 static int uvd_v4_2_set_clockgating_state(void *handle,
                                          enum amd_clockgating_state state)
 {
-       bool gate = false;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
-               return 0;
-
-       if (state == AMD_CG_STATE_GATE)
-               gate = true;
-
-       uvd_v4_2_enable_mgcg(adev, gate);
-
        return 0;
 }
 
@@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
         */
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
-               return 0;
-
        if (state == AMD_PG_STATE_GATE) {
                uvd_v4_2_stop(adev);
                return 0;
index 5fb0b7f..37ca685 100644 (file)
 
 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT    0x04
 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK      0x10
+#define GRBM_GFX_INDEX__VCE_ALL_PIPE           0x07
+
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0        0x8616
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1        0x8617
 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2        0x8618
+#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
+
 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK  0x02
 
 #define VCE_V3_0_FW_SIZE       (384 * 1024)
@@ -54,6 +58,9 @@
 
 #define FW_52_8_3      ((52 << 24) | (8 << 16) | (3 << 8))
 
+#define GET_VCE_INSTANCE(i)  ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
+                                       | GRBM_GFX_INDEX__VCE_ALL_PIPE)
+
 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
                WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
 
                data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
-               data &= ~0xffc00000;
+               data &= ~0x3ff;
                WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
 
                data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
@@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
-               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
                vce_v3_0_mc_resume(adev, idx);
                WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
 
@@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
                }
        }
 
-       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
@@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
                if (adev->vce.harvest_config & (1 << idx))
                        continue;
 
-               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
 
                if (adev->asic_type >= CHIP_STONEY)
                        WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
@@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
                        vce_v3_0_set_vce_sw_clock_gating(adev, false);
        }
 
-       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
@@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
         * VCE team suggest use bit 3--bit 6 for busy status check
         */
        mutex_lock(&adev->grbm_idx_mutex);
-       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+       WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
        if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
        }
-       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+       WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
        if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
                srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
        }
-       WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+       WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
        mutex_unlock(&adev->grbm_idx_mutex);
 
        if (srbm_soft_reset) {
@@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
                if (adev->vce.harvest_config & (1 << i))
                        continue;
 
-               WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
+               WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
 
                if (enable) {
                        /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
                vce_v3_0_set_vce_sw_clock_gating(adev, enable);
        }
 
-       WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+       WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
index b0c63c5..6bb79c9 100644 (file)
@@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                                cgs_set_clockgating_state(
                                                        hwmgr->device,
                                                        AMD_IP_BLOCK_TYPE_VCE,
-                                                       AMD_CG_STATE_UNGATE);
+                                                       AMD_CG_STATE_GATE);
                                cgs_set_powergating_state(
                                                        hwmgr->device,
                                                        AMD_IP_BLOCK_TYPE_VCE,
@@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
                                cgs_set_clockgating_state(
                                                        hwmgr->device,
                                                        AMD_IP_BLOCK_TYPE_VCE,
-                                                       AMD_PG_STATE_GATE);
+                                                       AMD_PG_STATE_UNGATE);
                                cz_dpm_update_vce_dpm(hwmgr);
                                cz_enable_disable_vce_dpm(hwmgr, true);
                                return 0;
index 4b14f25..0fb4e8c 100644 (file)
@@ -1402,14 +1402,22 @@ int  cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
                                             cz_hwmgr->vce_dpm.hard_min_clk,
                                                PPSMC_MSG_SetEclkHardMin));
        } else {
-               /*EPR# 419220 -HW limitation to to */
-               cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
-               smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
-                                           PPSMC_MSG_SetEclkHardMin,
-                                           cz_get_eclk_level(hwmgr,
-                                    cz_hwmgr->vce_dpm.hard_min_clk,
-                                         PPSMC_MSG_SetEclkHardMin));
-
+               /*Program HardMin based on the vce_arbiter.ecclk */
+               if (hwmgr->vce_arbiter.ecclk == 0) {
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                           PPSMC_MSG_SetEclkHardMin, 0);
+               /* disable ECLK DPM 0. Otherwise VCE could hang if
+                * switching SCLK from DPM 0 to 6/7 */
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                       PPSMC_MSG_SetEclkSoftMin, 1);
+               } else {
+                       cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
+                       smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                                               PPSMC_MSG_SetEclkHardMin,
+                                               cz_get_eclk_level(hwmgr,
+                                               cz_hwmgr->vce_dpm.hard_min_clk,
+                                               PPSMC_MSG_SetEclkHardMin));
+               }
        }
        return 0;
 }
index 908011d..7abda94 100644 (file)
@@ -113,6 +113,7 @@ struct ast_private {
        struct ttm_bo_kmap_obj cache_kmap;
        int next_cursor;
        bool support_wide_screen;
+       bool DisableP2A;
 
        enum ast_tx_chip tx_chip_type;
        u8 dp501_maxclk;
index f75c642..533e762 100644 (file)
@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
        } else
                *need_post = false;
 
+       /* Check P2A Access */
+       ast->DisableP2A = true;
+       data = ast_read32(ast, 0xf004);
+       if (data != 0xFFFFFFFF)
+               ast->DisableP2A = false;
+
        /* Check if we support wide screen */
        switch (ast->chip) {
        case AST1180:
@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
                        ast->support_wide_screen = true;
                else {
                        ast->support_wide_screen = false;
-                       /* Read SCU7c (silicon revision register) */
-                       ast_write32(ast, 0xf004, 0x1e6e0000);
-                       ast_write32(ast, 0xf000, 0x1);
-                       data = ast_read32(ast, 0x1207c);
-                       data &= 0x300;
-                       if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
-                               ast->support_wide_screen = true;
-                       if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
-                               ast->support_wide_screen = true;
+                       if (ast->DisableP2A == false) {
+                               /* Read SCU7c (silicon revision register) */
+                               ast_write32(ast, 0xf004, 0x1e6e0000);
+                               ast_write32(ast, 0xf000, 0x1);
+                               data = ast_read32(ast, 0x1207c);
+                               data &= 0x300;
+                               if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+                                       ast->support_wide_screen = true;
+                               if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+                                       ast->support_wide_screen = true;
+                       }
                }
                break;
        }
@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
        uint32_t data, data2;
        uint32_t denum, num, div, ref_pll;
 
-       ast_write32(ast, 0xf004, 0x1e6e0000);
-       ast_write32(ast, 0xf000, 0x1);
-
-
-       ast_write32(ast, 0x10000, 0xfc600309);
-
-       do {
-               if (pci_channel_offline(dev->pdev))
-                       return -EIO;
-       } while (ast_read32(ast, 0x10000) != 0x01);
-       data = ast_read32(ast, 0x10004);
-
-       if (data & 0x40)
+       if (ast->DisableP2A)
+       {
                ast->dram_bus_width = 16;
+               ast->dram_type = AST_DRAM_1Gx16;
+               ast->mclk = 396;
+       }
        else
-               ast->dram_bus_width = 32;
+       {
+               ast_write32(ast, 0xf004, 0x1e6e0000);
+               ast_write32(ast, 0xf000, 0x1);
+               data = ast_read32(ast, 0x10004);
+
+               if (data & 0x40)
+                       ast->dram_bus_width = 16;
+               else
+                       ast->dram_bus_width = 32;
+
+               if (ast->chip == AST2300 || ast->chip == AST2400) {
+                       switch (data & 0x03) {
+                       case 0:
+                               ast->dram_type = AST_DRAM_512Mx16;
+                               break;
+                       default:
+                       case 1:
+                               ast->dram_type = AST_DRAM_1Gx16;
+                               break;
+                       case 2:
+                               ast->dram_type = AST_DRAM_2Gx16;
+                               break;
+                       case 3:
+                               ast->dram_type = AST_DRAM_4Gx16;
+                               break;
+                       }
+               } else {
+                       switch (data & 0x0c) {
+                       case 0:
+                       case 4:
+                               ast->dram_type = AST_DRAM_512Mx16;
+                               break;
+                       case 8:
+                               if (data & 0x40)
+                                       ast->dram_type = AST_DRAM_1Gx16;
+                               else
+                                       ast->dram_type = AST_DRAM_512Mx32;
+                               break;
+                       case 0xc:
+                               ast->dram_type = AST_DRAM_1Gx32;
+                               break;
+                       }
+               }
 
-       if (ast->chip == AST2300 || ast->chip == AST2400) {
-               switch (data & 0x03) {
-               case 0:
-                       ast->dram_type = AST_DRAM_512Mx16;
-                       break;
-               default:
-               case 1:
-                       ast->dram_type = AST_DRAM_1Gx16;
-                       break;
-               case 2:
-                       ast->dram_type = AST_DRAM_2Gx16;
-                       break;
+               data = ast_read32(ast, 0x10120);
+               data2 = ast_read32(ast, 0x10170);
+               if (data2 & 0x2000)
+                       ref_pll = 14318;
+               else
+                       ref_pll = 12000;
+
+               denum = data & 0x1f;
+               num = (data & 0x3fe0) >> 5;
+               data = (data & 0xc000) >> 14;
+               switch (data) {
                case 3:
-                       ast->dram_type = AST_DRAM_4Gx16;
-                       break;
-               }
-       } else {
-               switch (data & 0x0c) {
-               case 0:
-               case 4:
-                       ast->dram_type = AST_DRAM_512Mx16;
+                       div = 0x4;
                        break;
-               case 8:
-                       if (data & 0x40)
-                               ast->dram_type = AST_DRAM_1Gx16;
-                       else
-                               ast->dram_type = AST_DRAM_512Mx32;
+               case 2:
+               case 1:
+                       div = 0x2;
                        break;
-               case 0xc:
-                       ast->dram_type = AST_DRAM_1Gx32;
+               default:
+                       div = 0x1;
                        break;
                }
+               ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
        }
-
-       data = ast_read32(ast, 0x10120);
-       data2 = ast_read32(ast, 0x10170);
-       if (data2 & 0x2000)
-               ref_pll = 14318;
-       else
-               ref_pll = 12000;
-
-       denum = data & 0x1f;
-       num = (data & 0x3fe0) >> 5;
-       data = (data & 0xc000) >> 14;
-       switch (data) {
-       case 3:
-               div = 0x4;
-               break;
-       case 2:
-       case 1:
-               div = 0x2;
-               break;
-       default:
-               div = 0x1;
-               break;
-       }
-       ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
        return 0;
 }
 
index 810c51d..5331ee1 100644 (file)
@@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
        ast_open_key(ast);
        ast_set_def_ext_reg(dev);
 
-       if (ast->chip == AST2300 || ast->chip == AST2400)
-               ast_init_dram_2300(dev);
-       else
-               ast_init_dram_reg(dev);
+       if (ast->DisableP2A == false)
+       {
+               if (ast->chip == AST2300 || ast->chip == AST2400)
+                       ast_init_dram_2300(dev);
+               else
+                       ast_init_dram_reg(dev);
 
-       ast_init_3rdtx(dev);
+               ast_init_3rdtx(dev);
+       }
+       else
+       {
+               if (ast->tx_chip_type != AST_TX_NONE)
+                       ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80);        /* Enable DVO */
+       }
 }
 
 /* AST 2300 DRAM settings */
index eb9bf87..18eefdc 100644 (file)
@@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
 
        pm_runtime_enable(dev);
 
+       pm_runtime_get_sync(dev);
        phy_power_on(dp->phy);
 
        analogix_dp_init_dp(dp);
@@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
                goto err_disable_pm_runtime;
        }
 
+       phy_power_off(dp->phy);
+       pm_runtime_put(dev);
+
        return 0;
 
 err_disable_pm_runtime:
+
+       phy_power_off(dp->phy);
+       pm_runtime_put(dev);
        pm_runtime_disable(dev);
 
        return ret;
index 04b3c16..7f4cc6e 100644 (file)
@@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU
         This is a KMS driver for emulated cirrus device in qemu.
         It is *NOT* intended for real cirrus devices. This requires
         the modesetting userspace X.org driver.
+
+        Cirrus is obsolete, the hardware was designed in the 90ies
+        and can't keep up with todays needs.  More background:
+        https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
+
+        Better alternatives are:
+          - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
+          - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
+          - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
index 6069748..50f5cf7 100644 (file)
@@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
 EXPORT_SYMBOL(drm_atomic_get_crtc_state);
 
 static void set_out_fence_for_crtc(struct drm_atomic_state *state,
-                                  struct drm_crtc *crtc, s64 __user *fence_ptr)
+                                  struct drm_crtc *crtc, s32 __user *fence_ptr)
 {
        state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
 }
 
-static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
                                          struct drm_crtc *crtc)
 {
-       s64 __user *fence_ptr;
+       s32 __user *fence_ptr;
 
        fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
        state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
@@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
                state->color_mgmt_changed |= replaced;
                return ret;
        } else if (property == config->prop_out_fence_ptr) {
-               s64 __user *fence_ptr = u64_to_user_ptr(val);
+               s32 __user *fence_ptr = u64_to_user_ptr(val);
 
                if (!fence_ptr)
                        return 0;
@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
  */
 
 struct drm_out_fence_state {
-       s64 __user *out_fence_ptr;
+       s32 __user *out_fence_ptr;
        struct sync_file *sync_file;
        int fd;
 };
@@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
                return 0;
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
-               u64 __user *fence_ptr;
+               s32 __user *fence_ptr;
 
                fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
 
index ac6a352..e6b19bc 100644 (file)
@@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
                return NULL;
 
        mode->type |= DRM_MODE_TYPE_USERDEF;
+       /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
+       if (cmd->xres == 1366 && mode->hdisplay == 1368) {
+               mode->hdisplay = 1366;
+               mode->hsync_start--;
+               mode->hsync_end--;
+               drm_mode_set_name(mode);
+       }
        drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
        return mode;
 }
index ac953f0..cf8f012 100644 (file)
@@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
        }
 
        if (dev->mode_config.delayed_event) {
+               /*
+                * FIXME:
+                *
+                * Use short (1s) delay to handle the initial delayed event.
+                * This delay should not be needed, but Optimus/nouveau will
+                * fail in a mysterious way if the delayed event is handled as
+                * soon as possible like it is done in
+                * drm_helper_probe_single_connector_modes() in case the poll
+                * was enabled before.
+                */
                poll = true;
-               delay = 0;
+               delay = HZ;
        }
 
        if (poll)
index 169ac96..fe0e85b 100644 (file)
@@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
                struct list_head list;
                bool found;
 
+               /*
+                * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
+                * drm_mm into giving out a low IOVA after address space
+                * rollover. This needs a proper fix.
+                */
                ret = drm_mm_insert_node_in_range(&mmu->mm, node,
                        size, 0, mmu->last_iova, ~0UL,
-                       DRM_MM_SEARCH_DEFAULT);
+                       mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
 
                if (ret != -ENOSPC)
                        break;
index 6ca1f31..75eeb83 100644 (file)
@@ -46,7 +46,8 @@ enum decon_flag_bits {
        BIT_CLKS_ENABLED,
        BIT_IRQS_ENABLED,
        BIT_WIN_UPDATED,
-       BIT_SUSPENDED
+       BIT_SUSPENDED,
+       BIT_REQUEST_UPDATE
 };
 
 struct decon_context {
@@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
                m->crtc_vsync_end = m->crtc_vsync_start + 1;
        }
 
-       decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
-
-       /* enable clock gate */
-       val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
-       writel(val, ctx->addr + DECON_CMU);
-
        if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
                decon_setup_trigger(ctx);
 
@@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
 
        /* window enable */
        decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
+       set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
 }
 
 static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
                return;
 
        decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
+       set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
 }
 
 static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
        for (i = ctx->first_win; i < WINDOWS_NR; i++)
                decon_shadow_protect_win(ctx, i, false);
 
-       /* standalone update */
-       decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+       if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
+               decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
 
        if (ctx->out_type & IFTYPE_I80)
                set_bit(BIT_WIN_UPDATED, &ctx->flags);
index 0d41ebc..f7bce86 100644 (file)
 #include "i915_drv.h"
 #include "gvt.h"
 
-#define MB_TO_BYTES(mb) ((mb) << 20ULL)
-#define BYTES_TO_MB(b) ((b) >> 20ULL)
-
-#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
-#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
-#define HOST_FENCE 4
-
 static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
 {
        struct intel_gvt *gvt = vgpu->gvt;
@@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
        POSTING_READ(fence_reg_lo);
 }
 
+static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
+{
+       int i;
+
+       for (i = 0; i < vgpu_fence_sz(vgpu); i++)
+               intel_vgpu_write_fence(vgpu, i, 0);
+}
+
 static void free_vgpu_fence(struct intel_vgpu *vgpu)
 {
        struct intel_gvt *gvt = vgpu->gvt;
@@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
        intel_runtime_pm_get(dev_priv);
 
        mutex_lock(&dev_priv->drm.struct_mutex);
+       _clear_vgpu_fence(vgpu);
        for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
                reg = vgpu->fence.regs[i];
-               intel_vgpu_write_fence(vgpu, i, 0);
                list_add_tail(&reg->link,
                              &dev_priv->mm.fence_list);
        }
@@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
                        continue;
                list_del(pos);
                vgpu->fence.regs[i] = reg;
-               intel_vgpu_write_fence(vgpu, i, 0);
                if (++i == vgpu_fence_sz(vgpu))
                        break;
        }
        if (i != vgpu_fence_sz(vgpu))
                goto out_free_fence;
 
+       _clear_vgpu_fence(vgpu);
+
        mutex_unlock(&dev_priv->drm.struct_mutex);
        intel_runtime_pm_put(dev_priv);
        return 0;
@@ -314,6 +316,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
 }
 
 /**
+ * intel_vgpu_reset_resource - reset resource state owned by a vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is used to reset resource state owned by a vGPU.
+ *
+ */
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
+{
+       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+       intel_runtime_pm_get(dev_priv);
+       _clear_vgpu_fence(vgpu);
+       intel_runtime_pm_put(dev_priv);
+}
+
+/**
  * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
  * @vgpu: vGPU
  * @param: vGPU creation params
index 711c31c..4a6a2ed 100644 (file)
@@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
        }
        return 0;
 }
+
+/**
+ * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
+ *
+ * @vgpu: a vGPU
+ * @primary: is the vGPU presented as primary
+ *
+ */
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+                              bool primary)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       const struct intel_gvt_device_info *info = &gvt->device_info;
+       u16 *gmch_ctl;
+       int i;
+
+       memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+              info->cfg_space_size);
+
+       if (!primary) {
+               vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
+                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
+               vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
+                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
+       }
+
+       /* Show guest that there isn't any stolen memory.*/
+       gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
+       *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
+
+       intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
+                                gvt_aperture_pa_base(gvt), true);
+
+       vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
+                                            | PCI_COMMAND_MEMORY
+                                            | PCI_COMMAND_MASTER);
+       /*
+        * Clear the bar upper 32bit and let guest to assign the new value
+        */
+       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+       memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
+
+       for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+               vgpu->cfg_space.bar[i].size = pci_resource_len(
+                                             gvt->dev_priv->drm.pdev, i * 2);
+               vgpu->cfg_space.bar[i].tracked = false;
+       }
+}
+
+/**
+ * intel_vgpu_reset_cfg_space - reset vGPU configuration space
+ *
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
+{
+       u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
+       bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
+                               INTEL_GVT_PCI_CLASS_VGA_OTHER;
+
+       if (cmd & PCI_COMMAND_MEMORY) {
+               trap_gttmmio(vgpu, false);
+               map_aperture(vgpu, false);
+       }
+
+       /**
+        * Currently we only do such reset when vGPU is not
+        * owned by any VM, so we simply restore entire cfg
+        * space to default value.
+        */
+       intel_vgpu_init_cfg_space(vgpu, primary);
+}
index d26a092..e456398 100644 (file)
@@ -481,7 +481,6 @@ struct parser_exec_state {
        (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
 
 static unsigned long bypass_scan_mask = 0;
-static bool bypass_batch_buffer_scan = true;
 
 /* ring ALL, type = 0 */
 static struct sub_op_bits sub_op_mi[] = {
@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
 {
        struct intel_gvt *gvt = s->vgpu->gvt;
 
-       if (bypass_batch_buffer_scan)
-               return 0;
-
        if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
                /* BDW decides privilege based on address space */
                if (cmd_val(s, 0) & (1 << 8))
index f32bb6f..3408373 100644 (file)
@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
 #define get_desc_from_elsp_dwords(ed, i) \
        ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
 
-
-#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
-#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
-static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
-                            unsigned long add, int gmadr_bytes)
-{
-       if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
-               return -1;
-
-       *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
-               BATCH_BUFFER_ADDR_MASK;
-       if (gmadr_bytes == 8) {
-               *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
-                       add & BATCH_BUFFER_ADDR_HIGH_MASK;
-       }
-
-       return 0;
-}
-
 static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 {
-       int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+       const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+       struct intel_shadow_bb_entry *entry_obj;
 
        /* pin the gem object to ggtt */
-       if (!list_empty(&workload->shadow_bb)) {
-               struct intel_shadow_bb_entry *entry_obj =
-                       list_first_entry(&workload->shadow_bb,
-                                        struct intel_shadow_bb_entry,
-                                        list);
-               struct intel_shadow_bb_entry *temp;
+       list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
+               struct i915_vma *vma;
 
-               list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
-                               list) {
-                       struct i915_vma *vma;
-
-                       vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
-                                                      4, 0);
-                       if (IS_ERR(vma)) {
-                               gvt_err("Cannot pin\n");
-                               return;
-                       }
-
-                       /* FIXME: we are not tracking our pinned VMA leaving it
-                        * up to the core to fix up the stray pin_count upon
-                        * free.
-                        */
-
-                       /* update the relocate gma with shadow batch buffer*/
-                       set_gma_to_bb_cmd(entry_obj,
-                                         i915_ggtt_offset(vma),
-                                         gmadr_bytes);
+               vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
+               if (IS_ERR(vma)) {
+                       gvt_err("Cannot pin\n");
+                       return;
                }
+
+               /* FIXME: we are not tracking our pinned VMA leaving it
+                * up to the core to fix up the stray pin_count upon
+                * free.
+                */
+
+               /* update the relocate gma with shadow batch buffer*/
+               entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
+               if (gmadr_bytes == 8)
+                       entry_obj->bb_start_cmd_va[2] = 0;
        }
 }
 
@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
                INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
        }
 
-       vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
+       vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
                        sizeof(struct intel_vgpu_workload), 0,
                        SLAB_HWCACHE_ALIGN,
                        NULL);
index 6c5fdf5..47dec4a 100644 (file)
@@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
 {
        void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
-       u64 pte;
 
-#ifdef readq
-       pte = readq(addr);
-#else
-       pte = ioread32(addr);
-       pte |= (u64)ioread32(addr + 4) << 32;
-#endif
-       return pte;
+       return readq(addr);
 }
 
 static void write_pte64(struct drm_i915_private *dev_priv,
@@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
 {
        void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
 
-#ifdef writeq
        writeq(pte, addr);
-#else
-       iowrite32((u32)pte, addr);
-       iowrite32(pte >> 32, addr + 4);
-#endif
+
        I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
        POSTING_READ(GFX_FLSH_CNTL_GEN6);
 }
@@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
                        info->gtt_entry_size;
                mem = kzalloc(mm->has_shadow_page_table ?
                        mm->page_table_entry_size * 2
-                               : mm->page_table_entry_size,
-                       GFP_ATOMIC);
+                               : mm->page_table_entry_size, GFP_KERNEL);
                if (!mem)
                        return -ENOMEM;
                mm->virtual_page_table = mem;
@@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
        struct intel_vgpu_mm *mm;
        int ret;
 
-       mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
+       mm = kzalloc(sizeof(*mm), GFP_KERNEL);
        if (!mm) {
                ret = -ENOMEM;
                goto fail;
@@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
        struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        int page_entry_num = GTT_PAGE_SIZE >>
                                vgpu->gvt->device_info.gtt_entry_size_shift;
-       struct page *scratch_pt;
+       void *scratch_pt;
        unsigned long mfn;
        int i;
-       void *p;
 
        if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
                return -EINVAL;
 
-       scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+       scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
        if (!scratch_pt) {
                gvt_err("fail to allocate scratch page\n");
                return -ENOMEM;
        }
 
-       p = kmap_atomic(scratch_pt);
-       mfn = intel_gvt_hypervisor_virt_to_mfn(p);
+       mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
        if (mfn == INTEL_GVT_INVALID_ADDR) {
-               gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
-               kunmap_atomic(p);
-               __free_page(scratch_pt);
+               gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
+               free_page((unsigned long)scratch_pt);
                return -EFAULT;
        }
        gtt->scratch_pt[type].page_mfn = mfn;
-       gtt->scratch_pt[type].page = scratch_pt;
+       gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
        gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
                        vgpu->id, type, mfn);
 
@@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
         * scratch_pt[type] indicate the scratch pt/scratch page used by the
         * 'type' pt.
         * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
-        * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
+        * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
         * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
         */
        if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
@@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
                        se.val64 |= PPAT_CACHED_INDEX;
 
                for (i = 0; i < page_entry_num; i++)
-                       ops->set_entry(p, &se, i, false, 0, vgpu);
+                       ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
        }
 
-       kunmap_atomic(p);
-
        return 0;
 }
 
@@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
 int intel_gvt_init_gtt(struct intel_gvt *gvt)
 {
        int ret;
-       void *page_addr;
+       void *page;
 
        gvt_dbg_core("init gtt\n");
 
@@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
                return -ENODEV;
        }
 
-       gvt->gtt.scratch_ggtt_page =
-               alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
-       if (!gvt->gtt.scratch_ggtt_page) {
+       page = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!page) {
                gvt_err("fail to allocate scratch ggtt page\n");
                return -ENOMEM;
        }
+       gvt->gtt.scratch_ggtt_page = virt_to_page(page);
 
-       page_addr = page_address(gvt->gtt.scratch_ggtt_page);
-
-       gvt->gtt.scratch_ggtt_mfn =
-               intel_gvt_hypervisor_virt_to_mfn(page_addr);
+       gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
        if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
                gvt_err("fail to translate scratch ggtt page\n");
                __free_page(gvt->gtt.scratch_ggtt_page);
@@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
        for (offset = 0; offset < num_entries; offset++)
                ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
 }
+
+/**
+ * intel_vgpu_reset_gtt - reset the all GTT related status
+ * @vgpu: a vGPU
+ * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
+ *
+ * This function is called from vfio core to reset reset all
+ * GTT related status, including GGTT, PPGTT, scratch page.
+ *
+ */
+void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
+{
+       int i;
+
+       ppgtt_free_all_shadow_page(vgpu);
+       if (!dmlr)
+               return;
+
+       intel_vgpu_reset_ggtt(vgpu);
+
+       /* clear scratch page for security */
+       for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+               if (vgpu->gtt.scratch_pt[i].page != NULL)
+                       memset(page_address(vgpu->gtt.scratch_pt[i].page),
+                               0, PAGE_SIZE);
+       }
+}
index b315ab3..f88eb5e 100644 (file)
@@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
 
 extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
 extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
 
 extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
index 398877c..e6bf5c5 100644 (file)
@@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
        intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
        intel_gvt_clean_vgpu_types(gvt);
 
+       idr_destroy(&gvt->vgpu_idr);
+
        kfree(dev_priv->gvt);
        dev_priv->gvt = NULL;
 }
@@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 
        gvt_dbg_core("init gvt device\n");
 
+       idr_init(&gvt->vgpu_idr);
+
        mutex_init(&gvt->lock);
        gvt->dev_priv = dev_priv;
 
@@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
 
        ret = intel_gvt_setup_mmio_info(gvt);
        if (ret)
-               return ret;
+               goto out_clean_idr;
 
        ret = intel_gvt_load_firmware(gvt);
        if (ret)
@@ -313,6 +317,8 @@ out_free_firmware:
        intel_gvt_free_firmware(gvt);
 out_clean_mmio_info:
        intel_gvt_clean_mmio_info(gvt);
+out_clean_idr:
+       idr_destroy(&gvt->vgpu_idr);
        kfree(gvt);
        return ret;
 }
index 0af1701..e227caf 100644 (file)
@@ -323,6 +323,7 @@ struct intel_vgpu_creation_params {
 
 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
                              struct intel_vgpu_creation_params *param);
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
 void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
 void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
        u32 fence, u64 value);
@@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
                                         struct intel_vgpu_type *type);
 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+                                unsigned int engine_mask);
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
 
 
@@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
                             unsigned long *g_index);
 
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+               bool primary);
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
+
 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes);
 
@@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
 
 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
-int setup_vgpu_mmio(struct intel_vgpu *vgpu);
 void populate_pvinfo_page(struct intel_vgpu *vgpu);
 
 struct intel_gvt_ops {
index 5228097..ab2ea15 100644 (file)
@@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
 static int new_mmio_info(struct intel_gvt *gvt,
                u32 offset, u32 flags, u32 size,
                u32 addr_mask, u32 ro_mask, u32 device,
-               void *read, void *write)
+               int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
+               int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
 {
        struct intel_gvt_mmio_info *info, *p;
        u32 start, end, i;
@@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
                default:
                        /*should not hit here*/
                        gvt_err("invalid forcewake offset 0x%x\n", offset);
-                       return 1;
+                       return -EINVAL;
                }
        } else {
                ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
@@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
        return 0;
 }
 
-static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
-               void *p_data, unsigned int bytes, unsigned long bitmap)
-{
-       struct intel_gvt_workload_scheduler *scheduler =
-               &vgpu->gvt->scheduler;
-
-       vgpu->resetting = true;
-
-       intel_vgpu_stop_schedule(vgpu);
-       /*
-        * The current_vgpu will set to NULL after stopping the
-        * scheduler when the reset is triggered by current vgpu.
-        */
-       if (scheduler->current_vgpu == NULL) {
-               mutex_unlock(&vgpu->gvt->lock);
-               intel_gvt_wait_vgpu_idle(vgpu);
-               mutex_lock(&vgpu->gvt->lock);
-       }
-
-       intel_vgpu_reset_execlist(vgpu, bitmap);
-
-       /* full GPU reset */
-       if (bitmap == 0xff) {
-               mutex_unlock(&vgpu->gvt->lock);
-               intel_vgpu_clean_gtt(vgpu);
-               mutex_lock(&vgpu->gvt->lock);
-               setup_vgpu_mmio(vgpu);
-               populate_pvinfo_page(vgpu);
-               intel_vgpu_init_gtt(vgpu);
-       }
-
-       vgpu->resetting = false;
-
-       return 0;
-}
-
 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
-               void *p_data, unsigned int bytes)
+                           void *p_data, unsigned int bytes)
 {
+       unsigned int engine_mask = 0;
        u32 data;
-       u64 bitmap = 0;
 
        write_vreg(vgpu, offset, p_data, bytes);
        data = vgpu_vreg(vgpu, offset);
 
        if (data & GEN6_GRDOM_FULL) {
                gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
-               bitmap = 0xff;
-       }
-       if (data & GEN6_GRDOM_RENDER) {
-               gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
-               bitmap |= (1 << RCS);
-       }
-       if (data & GEN6_GRDOM_MEDIA) {
-               gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
-               bitmap |= (1 << VCS);
-       }
-       if (data & GEN6_GRDOM_BLT) {
-               gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
-               bitmap |= (1 << BCS);
-       }
-       if (data & GEN6_GRDOM_VECS) {
-               gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
-               bitmap |= (1 << VECS);
-       }
-       if (data & GEN8_GRDOM_MEDIA2) {
-               gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
-               if (HAS_BSD2(vgpu->gvt->dev_priv))
-                       bitmap |= (1 << VCS2);
+               engine_mask = ALL_ENGINES;
+       } else {
+               if (data & GEN6_GRDOM_RENDER) {
+                       gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
+                       engine_mask |= (1 << RCS);
+               }
+               if (data & GEN6_GRDOM_MEDIA) {
+                       gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
+                       engine_mask |= (1 << VCS);
+               }
+               if (data & GEN6_GRDOM_BLT) {
+                       gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
+                       engine_mask |= (1 << BCS);
+               }
+               if (data & GEN6_GRDOM_VECS) {
+                       gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
+                       engine_mask |= (1 << VECS);
+               }
+               if (data & GEN8_GRDOM_MEDIA2) {
+                       gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
+                       if (HAS_BSD2(vgpu->gvt->dev_priv))
+                               engine_mask |= (1 << VCS2);
+               }
        }
-       return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
+
+       intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
+
+       return 0;
 }
 
 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
@@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
        return 0;
 }
 
-static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
                void *p_data, unsigned int bytes)
 {
        u32 data;
@@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
 static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
                unsigned int offset, void *p_data, unsigned int bytes)
 {
-       int rc = 0;
        unsigned int id = 0;
 
        write_vreg(vgpu, offset, p_data, bytes);
@@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
                id = VECS;
                break;
        default:
-               rc = -EINVAL;
-               break;
+               return -EINVAL;
        }
        set_bit(id, (void *)vgpu->tlb_handle_pending);
 
-       return rc;
+       return 0;
 }
 
 static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
index faaae07..3f656e3 100644 (file)
@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
        return NULL;
 }
 
-static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
-               char *buf)
+static ssize_t available_instances_show(struct kobject *kobj,
+                                       struct device *dev, char *buf)
 {
        struct intel_vgpu_type *type;
        unsigned int num = 0;
@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
                                type->fence);
 }
 
-static MDEV_TYPE_ATTR_RO(available_instance);
+static MDEV_TYPE_ATTR_RO(available_instances);
 static MDEV_TYPE_ATTR_RO(device_api);
 static MDEV_TYPE_ATTR_RO(description);
 
 static struct attribute *type_attrs[] = {
-       &mdev_type_attr_available_instance.attr,
+       &mdev_type_attr_available_instances.attr,
        &mdev_type_attr_device_api.attr,
        &mdev_type_attr_description.attr,
        NULL,
@@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
        struct intel_vgpu_type *type;
        struct device *pdev;
        void *gvt;
+       int ret;
 
        pdev = mdev_parent_dev(mdev);
        gvt = kdev_to_i915(pdev)->gvt;
@@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
        if (!type) {
                gvt_err("failed to find type %s to create\n",
                                                kobject_name(kobj));
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        vgpu = intel_gvt_ops->vgpu_create(gvt, type);
        if (IS_ERR_OR_NULL(vgpu)) {
-               gvt_err("create intel vgpu failed\n");
-               return -EINVAL;
+               ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
+               gvt_err("failed to create intel vgpu: %d\n", ret);
+               goto out;
        }
 
        INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
@@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
 
        gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
                     dev_name(mdev_dev(mdev)));
-       return 0;
+       ret = 0;
+
+out:
+       return ret;
 }
 
 static int intel_vgpu_remove(struct mdev_device *mdev)
index 09c9450..4df078b 100644 (file)
@@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
        if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
                goto err;
 
-       mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
-       if (!mmio && !vgpu->mmio.disable_warn_untrack) {
-               gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
-                               vgpu->id, offset, bytes, *(u32 *)p_data);
-
-               if (offset == 0x206c) {
-                       gvt_err("------------------------------------------\n");
-                       gvt_err("vgpu%d: likely triggers a gfx reset\n",
-                       vgpu->id);
-                       gvt_err("------------------------------------------\n");
-                       vgpu->mmio.disable_warn_untrack = true;
-               }
-       }
-
        if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
                if (WARN_ON(!IS_ALIGNED(offset, bytes)))
                        goto err;
        }
 
+       mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
        if (mmio) {
                if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
                        if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
@@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
                                goto err;
                }
                ret = mmio->read(vgpu, offset, p_data, bytes);
-       } else
+       } else {
                ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
 
+               if (!vgpu->mmio.disable_warn_untrack) {
+                       gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
+                               vgpu->id, offset, bytes, *(u32 *)p_data);
+
+                       if (offset == 0x206c) {
+                               gvt_err("------------------------------------------\n");
+                               gvt_err("vgpu%d: likely triggers a gfx reset\n",
+                                       vgpu->id);
+                               gvt_err("------------------------------------------\n");
+                               vgpu->mmio.disable_warn_untrack = true;
+                       }
+               }
+       }
+
        if (ret)
                goto err;
 
@@ -302,3 +303,56 @@ err:
        mutex_unlock(&gvt->lock);
        return ret;
 }
+
+
+/**
+ * intel_vgpu_reset_mmio - reset virtual MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       const struct intel_gvt_device_info *info = &gvt->device_info;
+
+       memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
+       memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+
+       vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+
+       /* set the bit 0:2(Core C-State ) to C0 */
+       vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+}
+
+/**
+ * intel_vgpu_init_mmio - init MMIO  space
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
+{
+       const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+
+       vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+       if (!vgpu->mmio.vreg)
+               return -ENOMEM;
+
+       vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
+
+       intel_vgpu_reset_mmio(vgpu);
+
+       return 0;
+}
+
+/**
+ * intel_vgpu_clean_mmio - clean MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
+{
+       vfree(vgpu->mmio.vreg);
+       vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+}
index 87d5b5e..3bc620f 100644 (file)
@@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
        *offset; \
 })
 
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
+
 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
 
 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
index 81cd921..d9fb41a 100644 (file)
@@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
                        vgpu->id))
                return -EINVAL;
 
-       vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
-                       GFP_DMA32 | __GFP_ZERO,
-                       INTEL_GVT_OPREGION_PORDER);
+       vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
+                       __GFP_ZERO,
+                       get_order(INTEL_GVT_OPREGION_SIZE));
 
        if (!vgpu_opregion(vgpu)->va)
                return -ENOMEM;
@@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
        if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
                map_vgpu_opregion(vgpu, false);
                free_pages((unsigned long)vgpu_opregion(vgpu)->va,
-                               INTEL_GVT_OPREGION_PORDER);
+                               get_order(INTEL_GVT_OPREGION_SIZE));
 
                vgpu_opregion(vgpu)->va = NULL;
        }
index 0dfe789..fbd023a 100644 (file)
@@ -50,8 +50,7 @@
 #define INTEL_GVT_OPREGION_PARM                   0x204
 
 #define INTEL_GVT_OPREGION_PAGES       2
-#define INTEL_GVT_OPREGION_PORDER      1
-#define INTEL_GVT_OPREGION_SIZE                (2 * 4096)
+#define INTEL_GVT_OPREGION_SIZE                (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
 
 #define VGT_SPRSTRIDE(pipe)    _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
 
index 4db2422..e91885d 100644 (file)
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 {
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct intel_vgpu_workload *workload;
+       struct intel_vgpu *vgpu;
        int event;
 
        mutex_lock(&gvt->lock);
 
        workload = scheduler->current_workload[ring_id];
+       vgpu = workload->vgpu;
 
-       if (!workload->status && !workload->vgpu->resetting) {
+       if (!workload->status && !vgpu->resetting) {
                wait_event(workload->shadow_ctx_status_wq,
                           !atomic_read(&workload->shadow_ctx_active));
 
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
                for_each_set_bit(event, workload->pending_events,
                                 INTEL_GVT_EVENT_MAX)
-                       intel_vgpu_trigger_virtual_event(workload->vgpu,
-                                       event);
+                       intel_vgpu_trigger_virtual_event(vgpu, event);
        }
 
        gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 
        scheduler->current_workload[ring_id] = NULL;
 
-       atomic_dec(&workload->vgpu->running_workload_num);
-
        list_del_init(&workload->list);
        workload->complete(workload);
 
+       atomic_dec(&vgpu->running_workload_num);
        wake_up(&scheduler->workload_complete_wq);
        mutex_unlock(&gvt->lock);
 }
@@ -459,11 +459,11 @@ complete:
                gvt_dbg_sched("will complete workload %p\n, status: %d\n",
                                workload, workload->status);
 
-               complete_current_workload(gvt, ring_id);
-
                if (workload->req)
                        i915_gem_request_put(fetch_and_zero(&workload->req));
 
+               complete_current_workload(gvt, ring_id);
+
                if (need_force_wake)
                        intel_uncore_forcewake_put(gvt->dev_priv,
                                        FORCEWAKE_ALL);
index 3b30c28..2833dfa 100644 (file)
@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
        struct drm_i915_gem_object *obj;
        void *va;
        unsigned long len;
-       void *bb_start_cmd_va;
+       u32 *bb_start_cmd_va;
 };
 
 #define workload_q_head(vgpu, ring_id) \
index 536d2b9..7295bc8 100644 (file)
 #include "gvt.h"
 #include "i915_pvinfo.h"
 
-static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
-{
-       vfree(vgpu->mmio.vreg);
-       vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
-}
-
-int setup_vgpu_mmio(struct intel_vgpu *vgpu)
-{
-       struct intel_gvt *gvt = vgpu->gvt;
-       const struct intel_gvt_device_info *info = &gvt->device_info;
-
-       if (vgpu->mmio.vreg)
-               memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
-       else {
-               vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
-               if (!vgpu->mmio.vreg)
-                       return -ENOMEM;
-       }
-
-       vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
-
-       memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
-       memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
-
-       vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
-
-       /* set the bit 0:2(Core C-State ) to C0 */
-       vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
-       return 0;
-}
-
-static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
-       struct intel_vgpu_creation_params *param)
-{
-       struct intel_gvt *gvt = vgpu->gvt;
-       const struct intel_gvt_device_info *info = &gvt->device_info;
-       u16 *gmch_ctl;
-       int i;
-
-       memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
-              info->cfg_space_size);
-
-       if (!param->primary) {
-               vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
-                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
-               vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
-                       INTEL_GVT_PCI_CLASS_VGA_OTHER;
-       }
-
-       /* Show guest that there isn't any stolen memory.*/
-       gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
-       *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
-
-       intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
-                                gvt_aperture_pa_base(gvt), true);
-
-       vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
-                                            | PCI_COMMAND_MEMORY
-                                            | PCI_COMMAND_MASTER);
-       /*
-        * Clear the bar upper 32bit and let guest to assign the new value
-        */
-       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
-       memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
-       memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
-
-       for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
-               vgpu->cfg_space.bar[i].size = pci_resource_len(
-                                             gvt->dev_priv->drm.pdev, i * 2);
-               vgpu->cfg_space.bar[i].tracked = false;
-       }
-}
-
 void populate_pvinfo_page(struct intel_vgpu *vgpu)
 {
        /* setup the ballooning information */
@@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
                if (low_avail / min_low == 0)
                        break;
                gvt->types[i].low_gm_size = min_low;
-               gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
+               gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
                gvt->types[i].fence = 4;
                gvt->types[i].max_instance = low_avail / min_low;
                gvt->types[i].avail_instance = gvt->types[i].max_instance;
@@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
         */
        low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
                gvt->gm.vgpu_allocated_low_gm_size;
-       high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
+       high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
                gvt->gm.vgpu_allocated_high_gm_size;
        fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
                gvt->fence.vgpu_allocated_fence_num;
@@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        intel_vgpu_clean_gtt(vgpu);
        intel_gvt_hypervisor_detach_vgpu(vgpu);
        intel_vgpu_free_resource(vgpu);
-       clean_vgpu_mmio(vgpu);
+       intel_vgpu_clean_mmio(vgpu);
        vfree(vgpu);
 
        intel_gvt_update_vgpu_types(gvt);
@@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        vgpu->gvt = gvt;
        bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
 
-       setup_vgpu_cfg_space(vgpu, param);
+       intel_vgpu_init_cfg_space(vgpu, param->primary);
 
-       ret = setup_vgpu_mmio(vgpu);
+       ret = intel_vgpu_init_mmio(vgpu);
        if (ret)
-               goto out_free_vgpu;
+               goto out_clean_idr;
 
        ret = intel_vgpu_alloc_resource(vgpu, param);
        if (ret)
@@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu:
 out_clean_vgpu_resource:
        intel_vgpu_free_resource(vgpu);
 out_clean_vgpu_mmio:
-       clean_vgpu_mmio(vgpu);
+       intel_vgpu_clean_mmio(vgpu);
+out_clean_idr:
+       idr_remove(&gvt->vgpu_idr, vgpu->id);
 out_free_vgpu:
        vfree(vgpu);
        mutex_unlock(&gvt->lock);
@@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
 }
 
 /**
- * intel_gvt_reset_vgpu - reset a virtual GPU
+ * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
+ * @vgpu: virtual GPU
+ * @dmlr: vGPU Device Model Level Reset or GT Reset
+ * @engine_mask: engines to reset for GT reset
+ *
+ * This function is called when user wants to reset a virtual GPU through
+ * device model reset or GT reset. The caller should hold the gvt lock.
+ *
+ * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
+ * the whole vGPU to default state as when it is created. This vGPU function
+ * is required both for functionary and security concerns.The ultimate goal
+ * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
+ * assign a vGPU to a virtual machine we must isse such reset first.
+ *
+ * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
+ * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
+ * Unlike the FLR, GT reset only reset particular resource of a vGPU per
+ * the reset request. Guest driver can issue a GT reset by programming the
+ * virtual GDRST register to reset specific virtual GPU engine or all
+ * engines.
+ *
+ * The parameter dev_level is to identify if we will do DMLR or GT reset.
+ * The parameter engine_mask is to specific the engines that need to be
+ * resetted. If value ALL_ENGINES is given for engine_mask, it means
+ * the caller requests a full GT reset that we will reset all virtual
+ * GPU engines. For FLR, engine_mask is ignored.
+ */
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+                                unsigned int engine_mask)
+{
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+       gvt_dbg_core("------------------------------------------\n");
+       gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
+                    vgpu->id, dmlr, engine_mask);
+       vgpu->resetting = true;
+
+       intel_vgpu_stop_schedule(vgpu);
+       /*
+        * The current_vgpu will set to NULL after stopping the
+        * scheduler when the reset is triggered by current vgpu.
+        */
+       if (scheduler->current_vgpu == NULL) {
+               mutex_unlock(&gvt->lock);
+               intel_gvt_wait_vgpu_idle(vgpu);
+               mutex_lock(&gvt->lock);
+       }
+
+       intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+
+       /* full GPU reset or device model level reset */
+       if (engine_mask == ALL_ENGINES || dmlr) {
+               intel_vgpu_reset_gtt(vgpu, dmlr);
+               intel_vgpu_reset_resource(vgpu);
+               intel_vgpu_reset_mmio(vgpu);
+               populate_pvinfo_page(vgpu);
+
+               if (dmlr)
+                       intel_vgpu_reset_cfg_space(vgpu);
+       }
+
+       vgpu->resetting = false;
+       gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
+       gvt_dbg_core("------------------------------------------\n");
+}
+
+/**
+ * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
  * @vgpu: virtual GPU
  *
  * This function is called when user wants to reset a virtual GPU.
@@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
  */
 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
 {
+       mutex_lock(&vgpu->gvt->lock);
+       intel_gvt_reset_vgpu_locked(vgpu, true, 0);
+       mutex_unlock(&vgpu->gvt->lock);
 }
index 445fec9..b2c4a0b 100644 (file)
@@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev)
 
        assert_forcewakes_inactive(dev_priv);
 
-       if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+       if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
                intel_hpd_poll_init(dev_priv);
 
        DRM_DEBUG_KMS("Device suspended\n");
index 243224a..69bc3b0 100644 (file)
@@ -1977,6 +1977,11 @@ struct drm_i915_private {
 
        struct i915_frontbuffer_tracking fb_tracking;
 
+       struct intel_atomic_helper {
+               struct llist_head free_list;
+               struct work_struct free_work;
+       } atomic_helper;
+
        u16 orig_clock;
 
        bool mchbar_need_disable;
index 3dd7fc6..4b23a78 100644 (file)
@@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
                     struct drm_i915_gem_pwrite *args,
                     struct drm_file *file)
 {
-       struct drm_device *dev = obj->base.dev;
        void *vaddr = obj->phys_handle->vaddr + args->offset;
        char __user *user_data = u64_to_user_ptr(args->data_ptr);
-       int ret;
 
        /* We manually control the domain here and pretend that it
         * remains coherent i.e. in the GTT domain, like shmem_pwrite.
         */
-       lockdep_assert_held(&obj->base.dev->struct_mutex);
-       ret = i915_gem_object_wait(obj,
-                                  I915_WAIT_INTERRUPTIBLE |
-                                  I915_WAIT_LOCKED |
-                                  I915_WAIT_ALL,
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  to_rps_client(file));
-       if (ret)
-               return ret;
-
        intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-       if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
-               unsigned long unwritten;
-
-               /* The physical object once assigned is fixed for the lifetime
-                * of the obj, so we can safely drop the lock and continue
-                * to access vaddr.
-                */
-               mutex_unlock(&dev->struct_mutex);
-               unwritten = copy_from_user(vaddr, user_data, args->size);
-               mutex_lock(&dev->struct_mutex);
-               if (unwritten) {
-                       ret = -EFAULT;
-                       goto out;
-               }
-       }
+       if (copy_from_user(vaddr, user_data, args->size))
+               return -EFAULT;
 
        drm_clflush_virt_range(vaddr, args->size);
-       i915_gem_chipset_flush(to_i915(dev));
+       i915_gem_chipset_flush(to_i915(obj->base.dev));
 
-out:
        intel_fb_obj_flush(obj, false, ORIGIN_CPU);
-       return ret;
+       return 0;
 }
 
 void *i915_gem_object_alloc(struct drm_device *dev)
index bd08814..d534a31 100644 (file)
@@ -199,6 +199,7 @@ found:
        }
 
        /* Unbinding will emit any required flushes */
+       ret = 0;
        while (!list_empty(&eviction_list)) {
                vma = list_first_entry(&eviction_list,
                                       struct i915_vma,
index a792dcb..e924a95 100644 (file)
@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
                        return ret;
        }
 
+       trace_i915_vma_bind(vma, bind_flags);
        ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
        if (ret)
                return ret;
index 86ecec5..588470e 100644 (file)
@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
        struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
        struct edid *edid;
        struct i2c_adapter *i2c;
+       bool ret = false;
 
        BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
 
@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
                 */
                if (!is_digital) {
                        DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
-                       return true;
+                       ret = true;
+               } else {
+                       DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
                }
-
-               DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
        } else {
                DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
        }
 
        kfree(edid);
 
-       return false;
+       return ret;
 }
 
 static enum drm_connector_status
index 3dc8724..77f7b1d 100644 (file)
@@ -2585,8 +2585,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
                         * We only keep the x/y offsets, so push all of the
                         * gtt offset into the x/y offsets.
                         */
-                       _intel_adjust_tile_offset(&x, &y, tile_size,
-                                                 tile_width, tile_height, pitch_tiles,
+                       _intel_adjust_tile_offset(&x, &y,
+                                                 tile_width, tile_height,
+                                                 tile_size, pitch_tiles,
                                                  gtt_offset_rotated * tile_size, 0);
 
                        gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
@@ -2967,6 +2968,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
        unsigned int rotation = plane_state->base.rotation;
        int ret;
 
+       if (!plane_state->base.visible)
+               return 0;
+
        /* Rotate src coordinates to match rotated GTT view */
        if (drm_rotation_90_or_270(rotation))
                drm_rect_rotate(&plane_state->base.src,
@@ -6846,6 +6850,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
        }
 
        state = drm_atomic_state_alloc(crtc->dev);
+       if (!state) {
+               DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
+                             crtc->base.id, crtc->name);
+               return;
+       }
+
        state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
 
        /* Everything's already locked, -EDEADLK can't happen. */
@@ -11243,6 +11253,7 @@ found:
        }
 
        old->restore_state = restore_state;
+       drm_atomic_state_put(state);
 
        /* let the connector get through one full cycle before testing */
        intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -14512,8 +14523,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
                break;
 
        case FENCE_FREE:
-               drm_atomic_state_put(&state->base);
-               break;
+               {
+                       struct intel_atomic_helper *helper =
+                               &to_i915(state->base.dev)->atomic_helper;
+
+                       if (llist_add(&state->freed, &helper->free_list))
+                               schedule_work(&helper->free_work);
+                       break;
+               }
        }
 
        return NOTIFY_DONE;
@@ -16392,6 +16409,18 @@ fail:
        drm_modeset_acquire_fini(&ctx);
 }
 
+static void intel_atomic_helper_free_state(struct work_struct *work)
+{
+       struct drm_i915_private *dev_priv =
+               container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+       struct intel_atomic_state *state, *next;
+       struct llist_node *freed;
+
+       freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+       llist_for_each_entry_safe(state, next, freed, freed)
+               drm_atomic_state_put(&state->base);
+}
+
 int intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16411,6 +16440,9 @@ int intel_modeset_init(struct drm_device *dev)
 
        dev->mode_config.funcs = &intel_mode_funcs;
 
+       INIT_WORK(&dev_priv->atomic_helper.free_work,
+                 intel_atomic_helper_free_state);
+
        intel_init_quirks(dev);
 
        intel_init_pm(dev_priv);
@@ -17024,7 +17056,8 @@ void intel_display_resume(struct drm_device *dev)
 
        if (ret)
                DRM_ERROR("Restoring old state failed with %i\n", ret);
-       drm_atomic_state_put(state);
+       if (state)
+               drm_atomic_state_put(state);
 }
 
 void intel_modeset_gem_init(struct drm_device *dev)
@@ -17094,6 +17127,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = to_i915(dev);
 
+       flush_work(&dev_priv->atomic_helper.free_work);
+       WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+
        intel_disable_gt_powersave(dev_priv);
 
        /*
index cd132c2..cd72ae1 100644 (file)
@@ -370,6 +370,8 @@ struct intel_atomic_state {
        struct skl_wm_values wm_results;
 
        struct i915_sw_fence commit_ready;
+
+       struct llist_node freed;
 };
 
 struct intel_plane_state {
index beb0898..8cf2d80 100644 (file)
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
 {
        struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
 
+       if (!ifbdev)
+               return;
+
        ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
 }
 
index d4961fa..beabc17 100644 (file)
@@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
                                                uint32_t *batch,
                                                uint32_t index)
 {
-       struct drm_i915_private *dev_priv = engine->i915;
        uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
 
-       /*
-        * WaDisableLSQCROPERFforOCL:kbl
-        * This WA is implemented in skl_init_clock_gating() but since
-        * this batch updates GEN8_L3SQCREG4 with default value we need to
-        * set this bit here to retain the WA during flush.
-        */
-       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
-               l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
-
        wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
                                   MI_SRM_LRM_GLOBAL_GTT));
        wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
index aeb637d..91cb4c4 100644 (file)
@@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
                WA_SET_BIT_MASKED(HDC_CHICKEN0,
                                  HDC_FENCE_DEST_SLM_DISABLE);
 
-       /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
-        * involving this register should also be added to WA batch as required.
-        */
-       if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
-               /* WaDisableLSQCROPERFforOCL:kbl */
-               I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
-                          GEN8_LQSC_RO_PERF_DIS);
-
        /* WaToEnableHwFixForPushConstHWBug:kbl */
        if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
                WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
index 14ff876..686a580 100644 (file)
@@ -345,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 {
        struct adreno_platform_config *config = pdev->dev.platform_data;
        struct msm_gpu *gpu = &adreno_gpu->base;
-       struct msm_mmu *mmu;
        int ret;
 
        adreno_gpu->funcs = funcs;
@@ -385,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                return ret;
        }
 
-       mmu = gpu->aspace->mmu;
-       if (mmu) {
+       if (gpu->aspace && gpu->aspace->mmu) {
+               struct msm_mmu *mmu = gpu->aspace->mmu;
                ret = mmu->funcs->attach(mmu, iommu_ports,
                                ARRAY_SIZE(iommu_ports));
                if (ret)
index 5f6cd87..c396d45 100644 (file)
@@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
 
 static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
 {
-       int i;
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
-       struct drm_plane *plane;
-       struct drm_plane_state *plane_state;
-
-       for_each_plane_in_state(state, plane, plane_state, i)
-               mdp5_plane_complete_commit(plane, plane_state);
 
        if (mdp5_kms->smp)
                mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
index 17b0cc1..cdfc63d 100644 (file)
@@ -104,8 +104,6 @@ struct mdp5_plane_state {
 
        /* assigned by crtc blender */
        enum mdp_mixer_stage_id stage;
-
-       bool pending : 1;
 };
 #define to_mdp5_plane_state(x) \
                container_of(x, struct mdp5_plane_state, base)
@@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
 void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
 
 uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
-void mdp5_plane_complete_commit(struct drm_plane *plane,
-       struct drm_plane_state *state);
 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
 struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
 
index c099da7..25d9d0a 100644 (file)
@@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
        drm_printf(p, "\tzpos=%u\n", pstate->zpos);
        drm_printf(p, "\talpha=%u\n", pstate->alpha);
        drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
-       drm_printf(p, "\tpending=%u\n", pstate->pending);
 }
 
 static void mdp5_plane_reset(struct drm_plane *plane)
@@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
        if (mdp5_state && mdp5_state->base.fb)
                drm_framebuffer_reference(mdp5_state->base.fb);
 
-       mdp5_state->pending = false;
-
        return &mdp5_state->base;
 }
 
@@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
        DBG("%s: check (%d -> %d)", plane->name,
                        plane_enabled(old_state), plane_enabled(state));
 
-       /* We don't allow faster-than-vblank updates.. if we did add this
-        * some day, we would need to disallow in cases where hwpipe
-        * changes
-        */
-       if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
-               return -EBUSY;
-
        max_width = config->hw->lm.max_width << 16;
        max_height = config->hw->lm.max_height << 16;
 
@@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
                                     struct drm_plane_state *old_state)
 {
        struct drm_plane_state *state = plane->state;
-       struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
 
        DBG("%s: update", plane->name);
 
-       mdp5_state->pending = true;
-
        if (plane_enabled(state)) {
                int ret;
 
@@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
        return pstate->hwpipe->flush_mask;
 }
 
-/* called after vsync in thread context */
-void mdp5_plane_complete_commit(struct drm_plane *plane,
-       struct drm_plane_state *state)
-{
-       struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
-
-       pstate->pending = false;
-}
-
 /* initialize plane */
 struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
 {
index d8bc59c..8098677 100644 (file)
@@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj)
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
        for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+               if (!priv->aspace[id])
+                       continue;
                msm_gem_unmap_vma(priv->aspace[id],
                                &msm_obj->domain[id], msm_obj->sgt);
        }
index cef08da..6a15776 100644 (file)
@@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev)
                return ret;
 
        /* enable polling for external displays */
-       drm_kms_helper_poll_enable(dev);
+       if (!dev->mode_config.poll_enabled)
+               drm_kms_helper_poll_enable(dev);
 
        /* enable hotplug interrupts */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
index 59348fc..bc85a45 100644 (file)
@@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
        pci_set_master(pdev);
 
        ret = nouveau_do_resume(drm_dev, true);
-       drm_kms_helper_poll_enable(drm_dev);
+
+       if (!drm_dev->mode_config.poll_enabled)
+               drm_kms_helper_poll_enable(drm_dev);
+
        /* do magic */
        nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
        vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
index 8d5ed5b..42c1fa5 100644 (file)
@@ -165,6 +165,8 @@ struct nouveau_drm {
        struct backlight_device *backlight;
        struct list_head bl_connectors;
        struct work_struct hpd_work;
+       struct work_struct fbcon_work;
+       int fbcon_new_state;
 #ifdef CONFIG_ACPI
        struct notifier_block acpi_nb;
 #endif
index 2f2a3dc..fa2d0a9 100644 (file)
@@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
        .fb_probe = nouveau_fbcon_create,
 };
 
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+       struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
+       int state = READ_ONCE(drm->fbcon_new_state);
+
+       if (state == FBINFO_STATE_RUNNING)
+               pm_runtime_get_sync(drm->dev->dev);
+
+       console_lock();
+       if (state == FBINFO_STATE_RUNNING)
+               nouveau_fbcon_accel_restore(drm->dev);
+       drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
+       if (state != FBINFO_STATE_RUNNING)
+               nouveau_fbcon_accel_save_disable(drm->dev);
+       console_unlock();
+
+       if (state == FBINFO_STATE_RUNNING) {
+               pm_runtime_mark_last_busy(drm->dev->dev);
+               pm_runtime_put_sync(drm->dev->dev);
+       }
+}
+
 void
 nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon) {
-               console_lock();
-               if (state == FBINFO_STATE_RUNNING)
-                       nouveau_fbcon_accel_restore(dev);
-               drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
-               if (state != FBINFO_STATE_RUNNING)
-                       nouveau_fbcon_accel_save_disable(dev);
-               console_unlock();
-       }
+
+       if (!drm->fbcon)
+               return;
+
+       drm->fbcon_new_state = state;
+       /* Since runtime resume can happen as a result of a sysfs operation,
+        * it's possible we already have the console locked. So handle fbcon
+        * init/deinit from a seperate work thread
+        */
+       schedule_work(&drm->fbcon_work);
 }
 
 int
@@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev)
                return -ENOMEM;
 
        drm->fbcon = fbcon;
+       INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
 
        drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
 
index 00ea000..e0c143b 100644 (file)
@@ -366,11 +366,10 @@ static void
 radeon_pci_shutdown(struct pci_dev *pdev)
 {
        /* if we are running in a VM, make sure the device
-        * torn down properly on reboot/shutdown.
-        * unfortunately we can't detect certain
-        * hypervisors so just do this all the time.
+        * torn down properly on reboot/shutdown
         */
-       radeon_pci_remove(pdev);
+       if (radeon_device_is_virtual())
+               radeon_pci_remove(pdev);
 }
 
 static int radeon_pmops_suspend(struct device *dev)
index e8a38d2..4147768 100644 (file)
@@ -114,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin");
 MODULE_FIRMWARE("radeon/hainan_rlc.bin");
 MODULE_FIRMWARE("radeon/hainan_smc.bin");
 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+
+MODULE_FIRMWARE("radeon/si58_mc.bin");
 
 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
 static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1650,6 +1653,8 @@ static int si_init_microcode(struct radeon_device *rdev)
        int err;
        int new_fw = 0;
        bool new_smc = false;
+       bool si58_fw = false;
+       bool banks2_fw = false;
 
        DRM_DEBUG("\n");
 
@@ -1727,10 +1732,11 @@ static int si_init_microcode(struct radeon_device *rdev)
                     ((rdev->pdev->device == 0x6660) ||
                      (rdev->pdev->device == 0x6663) ||
                      (rdev->pdev->device == 0x6665) ||
-                     (rdev->pdev->device == 0x6667))) ||
-                   ((rdev->pdev->revision == 0xc3) &&
-                    (rdev->pdev->device == 0x6665)))
+                     (rdev->pdev->device == 0x6667))))
                        new_smc = true;
+               else if ((rdev->pdev->revision == 0xc3) &&
+                        (rdev->pdev->device == 0x6665))
+                       banks2_fw = true;
                new_chip_name = "hainan";
                pfp_req_size = SI_PFP_UCODE_SIZE * 4;
                me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1742,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev)
        default: BUG();
        }
 
+       /* this memory configuration requires special firmware */
+       if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+               si58_fw = true;
+
        DRM_INFO("Loading %s Microcode\n", new_chip_name);
 
        snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
@@ -1845,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev)
                }
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
+       if (si58_fw)
+               snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+       else
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
        err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
        if (err) {
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
@@ -1876,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev)
                }
        }
 
-       if (new_smc)
+       if (banks2_fw)
+               snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
+       else if (new_smc)
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
        else
                snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
index 13ba73f..2944916 100644 (file)
@@ -3008,17 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                    (rdev->pdev->device == 0x6817) ||
                    (rdev->pdev->device == 0x6806))
                        max_mclk = 120000;
-       } else if (rdev->family == CHIP_OLAND) {
-               if ((rdev->pdev->revision == 0xC7) ||
-                   (rdev->pdev->revision == 0x80) ||
-                   (rdev->pdev->revision == 0x81) ||
-                   (rdev->pdev->revision == 0x83) ||
-                   (rdev->pdev->revision == 0x87) ||
-                   (rdev->pdev->device == 0x6604) ||
-                   (rdev->pdev->device == 0x6605)) {
-                       max_sclk = 75000;
-                       max_mclk = 80000;
-               }
        } else if (rdev->family == CHIP_HAINAN) {
                if ((rdev->pdev->revision == 0x81) ||
                    (rdev->pdev->revision == 0x83) ||
@@ -3027,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                    (rdev->pdev->device == 0x6665) ||
                    (rdev->pdev->device == 0x6667)) {
                        max_sclk = 75000;
-                       max_mclk = 80000;
                }
        }
        /* Apply dpm quirks */
index a0fd3e6..7aadce1 100644 (file)
@@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
 
        }
 
-       __drm_atomic_helper_crtc_destroy_state(state);
+       drm_atomic_helper_crtc_destroy_state(crtc, state);
 }
 
 static const struct drm_crtc_funcs vc4_crtc_funcs = {
index db92077..ab30169 100644 (file)
@@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
                                          args->shader_rec_count);
        struct vc4_bo *bo;
 
-       if (uniforms_offset < shader_rec_offset ||
+       if (shader_rec_offset < args->bin_cl_size ||
+           uniforms_offset < shader_rec_offset ||
            exec_size < uniforms_offset ||
            args->shader_rec_count >= (UINT_MAX /
                                          sizeof(struct vc4_shader_state)) ||
            temp_size < exec_size) {
                DRM_ERROR("overflow in exec arguments\n");
+               ret = -EINVAL;
                goto fail;
        }
 
index 08886a3..5cdd003 100644 (file)
@@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
                }
 
                ret = vc4_full_res_bounds_check(exec, *obj, surf);
-               if (!ret)
+               if (ret)
                        return ret;
 
                return 0;
index dd21f95..cde9f37 100644 (file)
@@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
        info->fbops = &virtio_gpufb_ops;
        info->pixmap.flags = FB_PIXMAP_SYSTEM;
 
-       info->screen_base = obj->vmap;
+       info->screen_buffer = obj->vmap;
        info->screen_size = obj->gem_base.size;
        drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
        drm_fb_helper_fill_var(info, &vfbdev->helper,
index 717704e..c0303f6 100644 (file)
@@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
        struct usb_interface *usbif = to_usb_interface(dev->parent);
        struct usb_device *usbdev = interface_to_usbdev(usbif);
        int brightness;
-       char data[8];
+       char *data;
+
+       data = kmalloc(8, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
 
        ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
                              K90_REQUEST_STATUS,
                              USB_DIR_IN | USB_TYPE_VENDOR |
                              USB_RECIP_DEVICE, 0, 0, data, 8,
                              USB_CTRL_SET_TIMEOUT);
-       if (ret < 0) {
+       if (ret < 5) {
                dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
                         ret);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
        brightness = data[4];
        if (brightness < 0 || brightness > 3) {
                dev_warn(dev,
                         "Read invalid backlight brightness: %02hhx.\n",
                         data[4]);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
-       return brightness;
+       ret = brightness;
+out:
+       kfree(data);
+
+       return ret;
 }
 
 static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev)
@@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev,
        struct usb_interface *usbif = to_usb_interface(dev->parent);
        struct usb_device *usbdev = interface_to_usbdev(usbif);
        const char *macro_mode;
-       char data[8];
+       char *data;
+
+       data = kmalloc(2, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
 
        ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
                              K90_REQUEST_GET_MODE,
                              USB_DIR_IN | USB_TYPE_VENDOR |
                              USB_RECIP_DEVICE, 0, 0, data, 2,
                              USB_CTRL_SET_TIMEOUT);
-       if (ret < 0) {
+       if (ret < 1) {
                dev_warn(dev, "Failed to get K90 initial mode (error %d).\n",
                         ret);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
        switch (data[0]) {
@@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev,
        default:
                dev_warn(dev, "K90 in unknown mode: %02hhx.\n",
                         data[0]);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
+       ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
+out:
+       kfree(data);
+
+       return ret;
 }
 
 static ssize_t k90_store_macro_mode(struct device *dev,
@@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev,
        struct usb_interface *usbif = to_usb_interface(dev->parent);
        struct usb_device *usbdev = interface_to_usbdev(usbif);
        int current_profile;
-       char data[8];
+       char *data;
+
+       data = kmalloc(8, GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
 
        ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
                              K90_REQUEST_STATUS,
                              USB_DIR_IN | USB_TYPE_VENDOR |
                              USB_RECIP_DEVICE, 0, 0, data, 8,
                              USB_CTRL_SET_TIMEOUT);
-       if (ret < 0) {
+       if (ret < 8) {
                dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
                         ret);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
        current_profile = data[7];
        if (current_profile < 1 || current_profile > 3) {
                dev_warn(dev, "Read invalid current profile: %02hhx.\n",
                         data[7]);
-               return -EIO;
+               ret = -EIO;
+               goto out;
        }
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
+       ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
+out:
+       kfree(data);
+
+       return ret;
 }
 
 static ssize_t k90_store_current_profile(struct device *dev,
index b9779bc..8aeca03 100644 (file)
@@ -740,6 +740,11 @@ static int wacom_add_shared_data(struct hid_device *hdev)
                return retval;
        }
 
+       if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
+               wacom_wac->shared->touch = hdev;
+       else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
+               wacom_wac->shared->pen = hdev;
+
 out:
        mutex_unlock(&wacom_udev_list_lock);
        return retval;
@@ -2036,10 +2041,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
        if (error)
                goto fail;
 
-       error = wacom_add_shared_data(hdev);
-       if (error)
-               goto fail;
-
        /*
         * Bamboo Pad has a generic hid handling for the Pen, and we switch it
         * into debug mode for the touch part.
@@ -2080,10 +2081,9 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
 
        wacom_update_name(wacom, wireless ? " (WL)" : "");
 
-       if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
-               wacom_wac->shared->touch = hdev;
-       else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
-               wacom_wac->shared->pen = hdev;
+       error = wacom_add_shared_data(hdev);
+       if (error)
+               goto fail;
 
        if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
             (features->quirks & WACOM_QUIRK_BATTERY)) {
index b1a9a3c..0884dc9 100644 (file)
@@ -2187,6 +2187,16 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
 
        wacom_report_events(hdev, report);
 
+       /*
+        * Non-input reports may be sent prior to the device being
+        * completely initialized. Since only their events need
+        * to be processed, exit after 'wacom_report_events' has
+        * been called to prevent potential crashes in the report-
+        * processing functions.
+        */
+       if (report->type != HID_INPUT_REPORT)
+               return;
+
        if (WACOM_PAD_FIELD(field)) {
                wacom_wac_pad_battery_report(hdev, report);
                if (wacom->wacom_wac.pad_input)
index e7dcfac..3e70a9c 100644 (file)
@@ -2811,7 +2811,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
        if (!src_addr || !src_addr->sa_family) {
                src_addr = (struct sockaddr *) &id->route.addr.src_addr;
                src_addr->sa_family = dst_addr->sa_family;
-               if (dst_addr->sa_family == AF_INET6) {
+               if (IS_ENABLED(CONFIG_IPV6) &&
+                   dst_addr->sa_family == AF_INET6) {
                        struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
                        struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
                        src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
index 1e62a5f..4609b92 100644 (file)
@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                 IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
 
        if (access & IB_ACCESS_ON_DEMAND) {
+               put_pid(umem->pid);
                ret = ib_umem_odp_get(context, umem);
                if (ret) {
                        kfree(umem);
@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
 
        page_list = (struct page **) __get_free_page(GFP_KERNEL);
        if (!page_list) {
+               put_pid(umem->pid);
                kfree(umem);
                return ERR_PTR(-ENOMEM);
        }
index 9d5fe18..6262dc0 100644 (file)
@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev,
 
        memset(props, 0, sizeof(struct ib_port_attr));
        props->max_mtu = IB_MTU_4096;
-       if (netdev->mtu >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        if (!netif_carrier_ok(netdev))
                props->state = IB_PORT_DOWN;
index f1510cc..9398143 100644 (file)
@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
        skb_trim(skb, dlen);
        mutex_lock(&ep->com.mutex);
 
-       /* update RX credits */
-       update_rx_credits(ep, dlen);
-
        switch (ep->com.state) {
        case MPA_REQ_SENT:
+               update_rx_credits(ep, dlen);
                ep->rcv_seq += dlen;
                disconnect = process_mpa_reply(ep, skb);
                break;
        case MPA_REQ_WAIT:
+               update_rx_credits(ep, dlen);
                ep->rcv_seq += dlen;
                disconnect = process_mpa_request(ep, skb);
                break;
        case FPDU_MODE: {
                struct c4iw_qp_attributes attrs;
+
+               update_rx_credits(ep, dlen);
                BUG_ON(!ep->com.qp);
                if (status)
                        pr_err("%s Unexpected streaming data." \
index 19c6477..bec82a6 100644 (file)
@@ -505,6 +505,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
        }
 
        /*
+        * Special cqe for drain WR completions...
+        */
+       if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+               *cookie = CQE_DRAIN_COOKIE(hw_cqe);
+               *cqe = *hw_cqe;
+               goto skip_cqe;
+       }
+
+       /*
         * Gotta tweak READ completions:
         *      1) the cqe doesn't contain the sq_wptr from the wr.
         *      2) opcode not reflected from the wr.
@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                                c4iw_invalidate_mr(qhp->rhp,
                                                   CQE_WRID_FR_STAG(&cqe));
                        break;
+               case C4IW_DRAIN_OPCODE:
+                       wc->opcode = IB_WC_SEND;
+                       break;
                default:
                        printk(KERN_ERR MOD "Unexpected opcode %d "
                               "in the CQE received for QPID=0x%0x\n",
@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
                }
        }
 out:
-       if (wq) {
-               if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
-                       if (t4_sq_empty(wq))
-                               complete(&qhp->sq_drained);
-                       if (t4_rq_empty(wq))
-                               complete(&qhp->rq_drained);
-               }
+       if (wq)
                spin_unlock(&qhp->lock);
-       }
        return ret;
 }
 
index 516b0ae..40c0e7b 100644 (file)
@@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
                }
        }
 
+       rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
+       if (!rdev->free_workq) {
+               err = -ENOMEM;
+               goto err_free_status_page;
+       }
+
        rdev->status_page->db_off = 0;
 
        return 0;
+err_free_status_page:
+       free_page((unsigned long)rdev->status_page);
 destroy_ocqp_pool:
        c4iw_ocqp_pool_destroy(rdev);
 destroy_rqtpool:
@@ -862,6 +870,7 @@ destroy_resource:
 
 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 {
+       destroy_workqueue(rdev->free_workq);
        kfree(rdev->wr_log);
        free_page((unsigned long)rdev->status_page);
        c4iw_pblpool_destroy(rdev);
index 4788e1a..8cd4d05 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/kref.h>
 #include <linux/timer.h>
 #include <linux/io.h>
+#include <linux/workqueue.h>
 
 #include <asm/byteorder.h>
 
@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
        struct list_head qpids;
        struct list_head cqids;
        struct mutex lock;
+       struct kref kref;
 };
 
 enum c4iw_rdev_flags {
@@ -183,6 +185,7 @@ struct c4iw_rdev {
        atomic_t wr_log_idx;
        struct wr_log_entry *wr_log;
        int wr_log_size;
+       struct workqueue_struct *free_workq;
 };
 
 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -480,8 +483,8 @@ struct c4iw_qp {
        wait_queue_head_t wait;
        struct timer_list timer;
        int sq_sig_all;
-       struct completion rq_drained;
-       struct completion sq_drained;
+       struct work_struct free_work;
+       struct c4iw_ucontext *ucontext;
 };
 
 static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -495,6 +498,7 @@ struct c4iw_ucontext {
        u32 key;
        spinlock_t mmap_lock;
        struct list_head mmaps;
+       struct kref kref;
 };
 
 static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
        return container_of(c, struct c4iw_ucontext, ibucontext);
 }
 
+void _c4iw_free_ucontext(struct kref *kref);
+
+static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
+{
+       kref_put(&ucontext->kref, _c4iw_free_ucontext);
+}
+
+static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
+{
+       kref_get(&ucontext->kref);
+}
+
 struct c4iw_mm_entry {
        struct list_head entry;
        u64 addr;
@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
        return IB_QPS_ERR;
 }
 
+#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
+
 static inline u32 c4iw_ib_to_tpt_access(int a)
 {
        return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
@@ -997,8 +1015,6 @@ extern int c4iw_wr_log;
 extern int db_fc_threshold;
 extern int db_coalescing_threshold;
 extern int use_dsgl;
-void c4iw_drain_rq(struct ib_qp *qp);
-void c4iw_drain_sq(struct ib_qp *qp);
 void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
 
 #endif
index 49b51b7..3345e1c 100644 (file)
@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
        return -ENOSYS;
 }
 
-static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+void _c4iw_free_ucontext(struct kref *kref)
 {
-       struct c4iw_dev *rhp = to_c4iw_dev(context->device);
-       struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+       struct c4iw_ucontext *ucontext;
+       struct c4iw_dev *rhp;
        struct c4iw_mm_entry *mm, *tmp;
 
-       PDBG("%s context %p\n", __func__, context);
+       ucontext = container_of(kref, struct c4iw_ucontext, kref);
+       rhp = to_c4iw_dev(ucontext->ibucontext.device);
+
+       PDBG("%s ucontext %p\n", __func__, ucontext);
        list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
                kfree(mm);
        c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
        kfree(ucontext);
+}
+
+static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+       struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+
+       PDBG("%s context %p\n", __func__, context);
+       c4iw_put_ucontext(ucontext);
        return 0;
 }
 
@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
        c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
        INIT_LIST_HEAD(&context->mmaps);
        spin_lock_init(&context->mmap_lock);
+       kref_init(&context->kref);
 
        if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
                if (!warned++)
@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
 
        memset(props, 0, sizeof(struct ib_port_attr));
        props->max_mtu = IB_MTU_4096;
-       if (netdev->mtu >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        if (!netif_carrier_ok(netdev))
                props->state = IB_PORT_DOWN;
@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
        dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
        dev->ibdev.get_port_immutable = c4iw_port_immutable;
        dev->ibdev.get_dev_fw_str = get_dev_fw_str;
-       dev->ibdev.drain_sq = c4iw_drain_sq;
-       dev->ibdev.drain_rq = c4iw_drain_rq;
 
        dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
        if (!dev->ibdev.iwcm)
index cda5542..04c1c38 100644 (file)
@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
        return 0;
 }
 
-static void _free_qp(struct kref *kref)
+static void free_qp_work(struct work_struct *work)
+{
+       struct c4iw_ucontext *ucontext;
+       struct c4iw_qp *qhp;
+       struct c4iw_dev *rhp;
+
+       qhp = container_of(work, struct c4iw_qp, free_work);
+       ucontext = qhp->ucontext;
+       rhp = qhp->rhp;
+
+       PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
+       destroy_qp(&rhp->rdev, &qhp->wq,
+                  ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+
+       if (ucontext)
+               c4iw_put_ucontext(ucontext);
+       kfree(qhp);
+}
+
+static void queue_qp_free(struct kref *kref)
 {
        struct c4iw_qp *qhp;
 
        qhp = container_of(kref, struct c4iw_qp, kref);
        PDBG("%s qhp %p\n", __func__, qhp);
-       kfree(qhp);
+       queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
 }
 
 void c4iw_qp_add_ref(struct ib_qp *qp)
@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
 void c4iw_qp_rem_ref(struct ib_qp *qp)
 {
        PDBG("%s ib_qp %p\n", __func__, qp);
-       kref_put(&to_c4iw_qp(qp)->kref, _free_qp);
+       kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
 }
 
 static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
        return 0;
 }
 
+static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+{
+       struct t4_cqe cqe = {};
+       struct c4iw_cq *schp;
+       unsigned long flag;
+       struct t4_cq *cq;
+
+       schp = to_c4iw_cq(qhp->ibqp.send_cq);
+       cq = &schp->cq;
+
+       cqe.u.drain_cookie = wr->wr_id;
+       cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_TYPE_V(1) |
+                                CQE_SWCQE_V(1) |
+                                CQE_QPID_V(qhp->wq.sq.qid));
+
+       spin_lock_irqsave(&schp->lock, flag);
+       cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+       cq->sw_queue[cq->sw_pidx] = cqe;
+       t4_swcq_produce(cq);
+       spin_unlock_irqrestore(&schp->lock, flag);
+
+       spin_lock_irqsave(&schp->comp_handler_lock, flag);
+       (*schp->ibcq.comp_handler)(&schp->ibcq,
+                                  schp->ibcq.cq_context);
+       spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+}
+
+static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+       struct t4_cqe cqe = {};
+       struct c4iw_cq *rchp;
+       unsigned long flag;
+       struct t4_cq *cq;
+
+       rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+       cq = &rchp->cq;
+
+       cqe.u.drain_cookie = wr->wr_id;
+       cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+                                CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+                                CQE_TYPE_V(0) |
+                                CQE_SWCQE_V(1) |
+                                CQE_QPID_V(qhp->wq.sq.qid));
+
+       spin_lock_irqsave(&rchp->lock, flag);
+       cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+       cq->sw_queue[cq->sw_pidx] = cqe;
+       t4_swcq_produce(cq);
+       spin_unlock_irqrestore(&rchp->lock, flag);
+
+       spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+       (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+                                  rchp->ibcq.cq_context);
+       spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+}
+
 int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                   struct ib_send_wr **bad_wr)
 {
@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
        spin_lock_irqsave(&qhp->lock, flag);
        if (t4_wq_in_error(&qhp->wq)) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               *bad_wr = wr;
-               return -EINVAL;
+               complete_sq_drain_wr(qhp, wr);
+               return err;
        }
        num_wrs = t4_sq_avail(&qhp->wq);
        if (num_wrs == 0) {
@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
        spin_lock_irqsave(&qhp->lock, flag);
        if (t4_wq_in_error(&qhp->wq)) {
                spin_unlock_irqrestore(&qhp->lock, flag);
-               *bad_wr = wr;
-               return -EINVAL;
+               complete_rq_drain_wr(qhp, wr);
+               return err;
        }
        num_wrs = t4_rq_avail(&qhp->wq);
        if (num_wrs == 0) {
@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                }
                break;
        case C4IW_QP_STATE_CLOSING:
-               if (!internal) {
+
+               /*
+                * Allow kernel users to move to ERROR for qp draining.
+                */
+               if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
+                                 C4IW_QP_STATE_ERROR)) {
                        ret = -EINVAL;
                        goto out;
                }
@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        struct c4iw_dev *rhp;
        struct c4iw_qp *qhp;
        struct c4iw_qp_attributes attrs;
-       struct c4iw_ucontext *ucontext;
 
        qhp = to_c4iw_qp(ib_qp);
        rhp = qhp->rhp;
@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        spin_unlock_irq(&rhp->lock);
        free_ird(rhp, qhp->attr.max_ird);
 
-       ucontext = ib_qp->uobject ?
-                  to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
-       destroy_qp(&rhp->rdev, &qhp->wq,
-                  ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-
        c4iw_qp_rem_ref(ib_qp);
 
        PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        qhp->attr.max_ird = 0;
        qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
        spin_lock_init(&qhp->lock);
-       init_completion(&qhp->sq_drained);
-       init_completion(&qhp->rq_drained);
        mutex_init(&qhp->mutex);
        init_waitqueue_head(&qhp->wait);
        kref_init(&qhp->kref);
+       INIT_WORK(&qhp->free_work, free_qp_work);
 
        ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
        if (ret)
@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                        ma_sync_key_mm->len = PAGE_SIZE;
                        insert_mmap(ucontext, ma_sync_key_mm);
                }
+
+               c4iw_get_ucontext(ucontext);
+               qhp->ucontext = ucontext;
        }
        qhp->ibqp.qp_num = qhp->wq.sq.qid;
        init_timer(&(qhp->timer));
@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
        return 0;
 }
-
-static void move_qp_to_err(struct c4iw_qp *qp)
-{
-       struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
-
-       (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
-}
-
-void c4iw_drain_sq(struct ib_qp *ibqp)
-{
-       struct c4iw_qp *qp = to_c4iw_qp(ibqp);
-       unsigned long flag;
-       bool need_to_wait;
-
-       move_qp_to_err(qp);
-       spin_lock_irqsave(&qp->lock, flag);
-       need_to_wait = !t4_sq_empty(&qp->wq);
-       spin_unlock_irqrestore(&qp->lock, flag);
-
-       if (need_to_wait)
-               wait_for_completion(&qp->sq_drained);
-}
-
-void c4iw_drain_rq(struct ib_qp *ibqp)
-{
-       struct c4iw_qp *qp = to_c4iw_qp(ibqp);
-       unsigned long flag;
-       bool need_to_wait;
-
-       move_qp_to_err(qp);
-       spin_lock_irqsave(&qp->lock, flag);
-       need_to_wait = !t4_rq_empty(&qp->wq);
-       spin_unlock_irqrestore(&qp->lock, flag);
-
-       if (need_to_wait)
-               wait_for_completion(&qp->rq_drained);
-}
index 862381a..640d221 100644 (file)
@@ -179,6 +179,7 @@ struct t4_cqe {
                        __be32 wrid_hi;
                        __be32 wrid_low;
                } gen;
+               u64 drain_cookie;
        } u;
        __be64 reserved;
        __be64 bits_type_ts;
@@ -238,6 +239,7 @@ struct t4_cqe {
 /* generic accessor macros */
 #define CQE_WRID_HI(x)         (be32_to_cpu((x)->u.gen.wrid_hi))
 #define CQE_WRID_LOW(x)                (be32_to_cpu((x)->u.gen.wrid_low))
+#define CQE_DRAIN_COOKIE(x)    ((x)->u.drain_cookie)
 
 /* macros for flit 3 of the cqe */
 #define CQE_GENBIT_S   63
index 29e97df..4c000d6 100644 (file)
@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
        memset(props, 0, sizeof(*props));
 
        props->max_mtu = IB_MTU_4096;
-       if (netdev->mtu >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        props->lid = 1;
        if (netif_carrier_ok(iwdev->netdev))
index aff9fb1..5a31f3c 100644 (file)
@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
        memset(props, 0, sizeof(*props));
 
        props->max_mtu = IB_MTU_4096;
-
-       if (netdev->mtu  >= 4096)
-               props->active_mtu = IB_MTU_4096;
-       else if (netdev->mtu  >= 2048)
-               props->active_mtu = IB_MTU_2048;
-       else if (netdev->mtu  >= 1024)
-               props->active_mtu = IB_MTU_1024;
-       else if (netdev->mtu  >= 512)
-               props->active_mtu = IB_MTU_512;
-       else
-               props->active_mtu = IB_MTU_256;
+       props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
        props->lid = 1;
        props->lmc = 0;
index 7b74d09..3ac8aa5 100644 (file)
@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
        return 0;
 }
 
-void qedr_unaffiliated_event(void *context,
-                            u8 event_code)
+void qedr_unaffiliated_event(void *context, u8 event_code)
 {
        pr_err("unaffiliated event not implemented yet\n");
 }
@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
                if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
                        goto sysfs_err;
 
+       if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+
        DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
        return dev;
 
@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
        ib_dealloc_device(&dev->ibdev);
 }
 
-static int qedr_close(struct qedr_dev *dev)
+static void qedr_close(struct qedr_dev *dev)
 {
-       qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
-
-       return 0;
+       if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
 }
 
 static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
        qedr_remove(dev);
 }
 
+static void qedr_open(struct qedr_dev *dev)
+{
+       if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+               qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+}
+
 static void qedr_mac_address_change(struct qedr_dev *dev)
 {
        union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
 
        ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
 
-       qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+       qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
 
        if (rc)
                DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
 {
        switch (event) {
        case QEDE_UP:
-               qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+               qedr_open(dev);
                break;
        case QEDE_DOWN:
                qedr_close(dev);
index 620badd..bb32e47 100644 (file)
@@ -113,6 +113,8 @@ struct qedr_device_attr {
        struct qed_rdma_events events;
 };
 
+#define QEDR_ENET_STATE_BIT    (0)
+
 struct qedr_dev {
        struct ib_device        ibdev;
        struct qed_dev          *cdev;
@@ -153,6 +155,8 @@ struct qedr_dev {
        struct qedr_cq          *gsi_sqcq;
        struct qedr_cq          *gsi_rqcq;
        struct qedr_qp          *gsi_qp;
+
+       unsigned long enet_state;
 };
 
 #define QEDR_MAX_SQ_PBL                        (0x8000)
@@ -188,6 +192,7 @@ struct qedr_dev {
 #define QEDR_ROCE_MAX_CNQ_SIZE         (0x4000)
 
 #define QEDR_MAX_PORT                  (1)
+#define QEDR_PORT                      (1)
 
 #define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
 
@@ -251,9 +256,6 @@ struct qedr_cq {
 
        u16 icid;
 
-       /* Lock to protect completion handler */
-       spinlock_t comp_handler_lock;
-
        /* Lock to protect multiplem CQ's */
        spinlock_t cq_lock;
        u8 arm_flags;
index 63890eb..a9a8d87 100644 (file)
@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
        qedr_inc_sw_gsi_cons(&qp->sq);
        spin_unlock_irqrestore(&qp->q_lock, flags);
 
-       if (cq->ibcq.comp_handler) {
-               spin_lock_irqsave(&cq->comp_handler_lock, flags);
+       if (cq->ibcq.comp_handler)
                (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-               spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
-       }
 }
 
 void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
 
        spin_unlock_irqrestore(&qp->q_lock, flags);
 
-       if (cq->ibcq.comp_handler) {
-               spin_lock_irqsave(&cq->comp_handler_lock, flags);
+       if (cq->ibcq.comp_handler)
                (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-               spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
-       }
 }
 
 static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
        }
 
        if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
-               packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
-       else
                packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+       else
+               packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
 
        packet->roce_mode = roce_mode;
        memcpy(packet->header.vaddr, ud_header_buffer, header_size);
index 57c8de2..c7d6c9a 100644 (file)
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
                            struct ib_ucontext *context, struct ib_udata *udata)
 {
        struct qedr_dev *dev = get_qedr_dev(ibdev);
-       struct qedr_ucontext *uctx = NULL;
-       struct qedr_alloc_pd_uresp uresp;
        struct qedr_pd *pd;
        u16 pd_id;
        int rc;
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
        if (!pd)
                return ERR_PTR(-ENOMEM);
 
-       dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+       rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+       if (rc)
+               goto err;
 
-       uresp.pd_id = pd_id;
        pd->pd_id = pd_id;
 
        if (udata && context) {
+               struct qedr_alloc_pd_uresp uresp;
+
+               uresp.pd_id = pd_id;
+
                rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
-               if (rc)
+               if (rc) {
                        DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
-               uctx = get_qedr_ucontext(context);
-               uctx->pd = pd;
-               pd->uctx = uctx;
+                       dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
+                       goto err;
+               }
+
+               pd->uctx = get_qedr_ucontext(context);
+               pd->uctx->pd = pd;
        }
 
        return &pd->ibpd;
+
+err:
+       kfree(pd);
+       return ERR_PTR(rc);
 }
 
 int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1600,7 +1610,7 @@ err0:
        return ERR_PTR(-EFAULT);
 }
 
-enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
 {
        switch (qp_state) {
        case QED_ROCE_QP_STATE_RESET:
@@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
        return IB_QPS_ERR;
 }
 
-enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
+static enum qed_roce_qp_state qedr_get_state_from_ibqp(
+                                       enum ib_qp_state qp_state)
 {
        switch (qp_state) {
        case IB_QPS_RESET:
@@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
        int status = 0;
 
        if (new_state == qp->state)
-               return 1;
+               return 0;
 
        switch (qp->state) {
        case QED_ROCE_QP_STATE_RESET:
@@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
                /* ERR->XXX */
                switch (new_state) {
                case QED_ROCE_QP_STATE_RESET:
+                       if ((qp->rq.prod != qp->rq.cons) ||
+                           (qp->sq.prod != qp->sq.cons)) {
+                               DP_NOTICE(dev,
+                                         "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
+                                         qp->rq.prod, qp->rq.cons, qp->sq.prod,
+                                         qp->sq.cons);
+                               status = -EINVAL;
+                       }
                        break;
                default:
                        status = -EINVAL;
@@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                         qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
                DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
                         qp_params.remote_mac_addr);
-;
 
                qp_params.mtu = qp->mtu;
                qp_params.lb_indication = false;
@@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
 
        qp_attr->qp_state = qedr_get_ibqp_state(params.state);
        qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
-       qp_attr->path_mtu = iboe_get_mtu(params.mtu);
+       qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
        qp_attr->path_mig_state = IB_MIG_MIGRATED;
        qp_attr->rq_psn = params.rq_psn;
        qp_attr->sq_psn = params.sq_psn;
@@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
        qp_attr->cap.max_recv_wr = qp->rq.max_wr;
        qp_attr->cap.max_send_sge = qp->sq.max_sges;
        qp_attr->cap.max_recv_sge = qp->rq.max_sges;
-       qp_attr->cap.max_inline_data = qp->max_inline_data;
+       qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
        qp_init_attr->cap = qp_attr->cap;
 
        memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
@@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
        return rc;
 }
 
-struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
+static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
+                                      int max_page_list_len)
 {
        struct qedr_pd *pd = get_qedr_pd(ibpd);
        struct qedr_dev *dev = get_qedr_dev(ibpd->device);
@@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
        return 0;
 }
 
-enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
 {
        switch (opcode) {
        case IB_WR_RDMA_WRITE:
@@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
        }
 }
 
-inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
 {
        int wq_is_full, err_wr, pbl_is_full;
        struct qedr_dev *dev = qp->dev;
@@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
        return true;
 }
 
-int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                     struct ib_send_wr **bad_wr)
 {
        struct qedr_dev *dev = get_qedr_dev(ibqp->device);
@@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
                                  IB_WC_SUCCESS, 0);
                break;
        case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
-               DP_ERR(dev,
-                      "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
-                      cq->icid, qp->icid);
+               if (qp->state != QED_ROCE_QP_STATE_ERR)
+                       DP_ERR(dev,
+                              "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+                              cq->icid, qp->icid);
                cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
                                  IB_WC_WR_FLUSH_ERR, 1);
                break;
index 231a1ce..bd8fbd3 100644 (file)
@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
        if (ret) {
                dev_err(&pdev->dev, "failed to allocate interrupts\n");
                ret = -ENOMEM;
-               goto err_netdevice;
+               goto err_free_cq_ring;
        }
 
        /* Allocate UAR table. */
@@ -1092,8 +1092,6 @@ err_free_uar_table:
 err_free_intrs:
        pvrdma_free_irq(dev);
        pvrdma_disable_msi_all(dev);
-err_netdevice:
-       unregister_netdevice_notifier(&dev->nb_netdev);
 err_free_cq_ring:
        pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
 err_free_async_ring:
index 5489137..c2aa526 100644 (file)
@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
        union pvrdma_cmd_resp rsp;
        struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
        struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
-       struct pvrdma_alloc_ucontext_resp uresp;
+       struct pvrdma_alloc_ucontext_resp uresp = {0};
        int ret;
        void *ptr;
 
index 342e781..4abdeb3 100644 (file)
@@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
        }
 
        spin_lock_bh(&dev_list_lock);
-       list_add_tail(&rxe_dev_list, &rxe->list);
+       list_add_tail(&rxe->list, &rxe_dev_list);
        spin_unlock_bh(&dev_list_lock);
        return rxe;
 }
index 486d576..44b2108 100644 (file)
@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
        del_timer_sync(&qp->rnr_nak_timer);
 
        rxe_cleanup_task(&qp->req.task);
-       if (qp_type(qp) == IB_QPT_RC)
-               rxe_cleanup_task(&qp->comp.task);
+       rxe_cleanup_task(&qp->comp.task);
 
        /* flush out any receive wr's or pending requests */
        __rxe_do_task(&qp->req.task);
index 9104e6b..e71af71 100644 (file)
@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                                                   SHOST_DIX_GUARD_CRC);
                }
 
-               /*
-                * Limit the sg_tablesize and max_sectors based on the device
-                * max fastreg page list length.
-                */
-               shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
-                       ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
-
                if (iscsi_host_add(shost,
                                   ib_conn->device->ib_device->dma_device)) {
                        mutex_unlock(&iser_conn->state_mutex);
@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
        max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
        shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
 
+       iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
+                iser_conn, shost->sg_tablesize,
+                shost->max_sectors);
+
        if (cmds_max > max_cmds) {
                iser_info("cmds_max changed from %u to %u\n",
                          cmds_max, max_cmds);
index 0be6a7c..9d0b22a 100644 (file)
@@ -496,7 +496,6 @@ struct ib_conn {
  * @rx_descs:         rx buffers array (cyclic buffer)
  * @num_rx_descs:     number of rx descriptors
  * @scsi_sg_tablesize: scsi host sg_tablesize
- * @scsi_max_sectors: scsi host max sectors
  */
 struct iser_conn {
        struct ib_conn               ib_conn;
@@ -519,7 +518,6 @@ struct iser_conn {
        struct iser_rx_desc          *rx_descs;
        u32                          num_rx_descs;
        unsigned short               scsi_sg_tablesize;
-       unsigned int                 scsi_max_sectors;
        bool                         snd_w_inv;
 };
 
index 8ae7a3b..6a9d1cb 100644 (file)
@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
        sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
                                 device->ib_device->attrs.max_fast_reg_page_list_len);
 
-       if (sg_tablesize > sup_sg_tablesize) {
-               sg_tablesize = sup_sg_tablesize;
-               iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
-       } else {
-               iser_conn->scsi_max_sectors = max_sectors;
-       }
-
-       iser_conn->scsi_sg_tablesize = sg_tablesize;
-
-       iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
-                iser_conn, iser_conn->scsi_sg_tablesize,
-                iser_conn->scsi_max_sectors);
+       iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
 }
 
 /**
index 8ddc071..79bf484 100644 (file)
@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
        struct srp_fr_desc *d;
        struct ib_mr *mr;
        int i, ret = -EINVAL;
+       enum ib_mr_type mr_type;
 
        if (pool_size <= 0)
                goto err;
@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
        spin_lock_init(&pool->lock);
        INIT_LIST_HEAD(&pool->free_list);
 
+       if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+               mr_type = IB_MR_TYPE_SG_GAPS;
+       else
+               mr_type = IB_MR_TYPE_MEM_REG;
+
        for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
-               mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
-                                max_page_list_len);
+               mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
                if (IS_ERR(mr)) {
                        ret = PTR_ERR(mr);
                        if (ret == -ENOMEM)
@@ -3694,6 +3699,12 @@ static int __init srp_init_module(void)
                indirect_sg_entries = cmd_sg_entries;
        }
 
+       if (indirect_sg_entries > SG_MAX_SEGMENTS) {
+               pr_warn("Clamping indirect_sg_entries to %u\n",
+                       SG_MAX_SEGMENTS);
+               indirect_sg_entries = SG_MAX_SEGMENTS;
+       }
+
        srp_remove_wq = create_workqueue("srp_remove");
        if (!srp_remove_wq) {
                ret = -ENOMEM;
index 0ea4efb..ebb5e39 100644 (file)
@@ -30,8 +30,9 @@
 
 #include "cec-priv.h"
 
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx);
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx);
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+                                        struct cec_msg *msg,
+                                        unsigned int la_idx);
 
 /*
  * 400 ms is the time it takes for one 16 byte message to be
@@ -288,10 +289,10 @@ static void cec_data_cancel(struct cec_data *data)
 
        /* Mark it as an error */
        data->msg.tx_ts = ktime_get_ns();
-       data->msg.tx_status = CEC_TX_STATUS_ERROR |
-                             CEC_TX_STATUS_MAX_RETRIES;
+       data->msg.tx_status |= CEC_TX_STATUS_ERROR |
+                              CEC_TX_STATUS_MAX_RETRIES;
+       data->msg.tx_error_cnt++;
        data->attempts = 0;
-       data->msg.tx_error_cnt = 1;
        /* Queue transmitted message for monitoring purposes */
        cec_queue_msg_monitor(data->adap, &data->msg, 1);
 
@@ -851,7 +852,7 @@ static const u8 cec_msg_size[256] = {
        [CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
        [CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
        [CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
-       [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST,
+       [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
        [CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
 };
 
@@ -1250,30 +1251,49 @@ configured:
                for (i = 1; i < las->num_log_addrs; i++)
                        las->log_addr[i] = CEC_LOG_ADDR_INVALID;
        }
+       for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+               las->log_addr[i] = CEC_LOG_ADDR_INVALID;
        adap->is_configured = true;
        adap->is_configuring = false;
        cec_post_state_event(adap);
-       mutex_unlock(&adap->lock);
 
+       /*
+        * Now post the Report Features and Report Physical Address broadcast
+        * messages. Note that these are non-blocking transmits, meaning that
+        * they are just queued up and once adap->lock is unlocked the main
+        * thread will kick in and start transmitting these.
+        *
+        * If after this function is done (but before one or more of these
+        * messages are actually transmitted) the CEC adapter is unconfigured,
+        * then any remaining messages will be dropped by the main thread.
+        */
        for (i = 0; i < las->num_log_addrs; i++) {
+               struct cec_msg msg = {};
+
                if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
                    (las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
                        continue;
 
-               /*
-                * Report Features must come first according
-                * to CEC 2.0
-                */
-               if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED)
-                       cec_report_features(adap, i);
-               cec_report_phys_addr(adap, i);
+               msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
+
+               /* Report Features must come first according to CEC 2.0 */
+               if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
+                   adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
+                       cec_fill_msg_report_features(adap, &msg, i);
+                       cec_transmit_msg_fh(adap, &msg, NULL, false);
+               }
+
+               /* Report Physical Address */
+               cec_msg_report_physical_addr(&msg, adap->phys_addr,
+                                            las->primary_device_type[i]);
+               dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
+                       las->log_addr[i],
+                       cec_phys_addr_exp(adap->phys_addr));
+               cec_transmit_msg_fh(adap, &msg, NULL, false);
        }
-       for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
-               las->log_addr[i] = CEC_LOG_ADDR_INVALID;
-       mutex_lock(&adap->lock);
        adap->kthread_config = NULL;
-       mutex_unlock(&adap->lock);
        complete(&adap->config_completion);
+       mutex_unlock(&adap->lock);
        return 0;
 
 unconfigure:
@@ -1526,52 +1546,32 @@ EXPORT_SYMBOL_GPL(cec_s_log_addrs);
 
 /* High-level core CEC message handling */
 
-/* Transmit the Report Features message */
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx)
+/* Fill in the Report Features message */
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+                                        struct cec_msg *msg,
+                                        unsigned int la_idx)
 {
-       struct cec_msg msg = { };
        const struct cec_log_addrs *las = &adap->log_addrs;
        const u8 *features = las->features[la_idx];
        bool op_is_dev_features = false;
        unsigned int idx;
 
-       /* This is 2.0 and up only */
-       if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
-               return 0;
-
        /* Report Features */
-       msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
-       msg.len = 4;
-       msg.msg[1] = CEC_MSG_REPORT_FEATURES;
-       msg.msg[2] = adap->log_addrs.cec_version;
-       msg.msg[3] = las->all_device_types[la_idx];
+       msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
+       msg->len = 4;
+       msg->msg[1] = CEC_MSG_REPORT_FEATURES;
+       msg->msg[2] = adap->log_addrs.cec_version;
+       msg->msg[3] = las->all_device_types[la_idx];
 
        /* Write RC Profiles first, then Device Features */
        for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
-               msg.msg[msg.len++] = features[idx];
+               msg->msg[msg->len++] = features[idx];
                if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
                        if (op_is_dev_features)
                                break;
                        op_is_dev_features = true;
                }
        }
-       return cec_transmit_msg(adap, &msg, false);
-}
-
-/* Transmit the Report Physical Address message */
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
-{
-       const struct cec_log_addrs *las = &adap->log_addrs;
-       struct cec_msg msg = { };
-
-       /* Report Physical Address */
-       msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
-       cec_msg_report_physical_addr(&msg, adap->phys_addr,
-                                    las->primary_device_type[la_idx]);
-       dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
-               las->log_addr[la_idx],
-                       cec_phys_addr_exp(adap->phys_addr));
-       return cec_transmit_msg(adap, &msg, false);
 }
 
 /* Transmit the Feature Abort message */
@@ -1777,9 +1777,10 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
        }
 
        case CEC_MSG_GIVE_FEATURES:
-               if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
-                       return cec_report_features(adap, la_idx);
-               return 0;
+               if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
+                       return cec_feature_abort(adap, msg);
+               cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
+               return cec_transmit_msg(adap, &tx_cec_msg, false);
 
        default:
                /*
index bc5e8cf..8f11d7e 100644 (file)
@@ -719,6 +719,9 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
                skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
                                          ETH_ALEN);
                skb_pull(h->priv->ule_skb, ETH_ALEN);
+       } else {
+               /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
+               eth_zero_addr(dest_addr);
        }
 
        /* Handle ULE Extension Headers. */
@@ -750,16 +753,8 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
        if (!h->priv->ule_bridged) {
                skb_push(h->priv->ule_skb, ETH_HLEN);
                h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
-               if (!h->priv->ule_dbit) {
-                       /*
-                        * dest_addr buffer is only valid if
-                        * h->priv->ule_dbit == 0
-                        */
-                       memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
-                       eth_zero_addr(h->ethh->h_source);
-               } else /* zeroize source and dest */
-                       memset(h->ethh, 0, ETH_ALEN * 2);
-
+               memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
+               eth_zero_addr(h->ethh->h_source);
                h->ethh->h_proto = htons(h->priv->ule_sndu_type);
        }
        /* else:  skb is in correct state; nothing to do. */
index b31fa6f..b979ea1 100644 (file)
@@ -655,6 +655,7 @@ config VIDEO_S5K6A3
 config VIDEO_S5K4ECGX
         tristate "Samsung S5K4ECGX sensor support"
         depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       select CRC32
         ---help---
           This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
           camera sensor with an embedded SoC image signal processor.
index 59872b3..f4e92bd 100644 (file)
@@ -2741,9 +2741,7 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
  * I2C Driver
  */
 
-#ifdef CONFIG_PM
-
-static int smiapp_suspend(struct device *dev)
+static int __maybe_unused smiapp_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2768,7 +2766,7 @@ static int smiapp_suspend(struct device *dev)
        return 0;
 }
 
-static int smiapp_resume(struct device *dev)
+static int __maybe_unused smiapp_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2783,13 +2781,6 @@ static int smiapp_resume(struct device *dev)
        return rval;
 }
 
-#else
-
-#define smiapp_suspend NULL
-#define smiapp_resume  NULL
-
-#endif /* CONFIG_PM */
-
 static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
 {
        struct smiapp_hwconfig *hwcfg;
@@ -2913,13 +2904,9 @@ static int smiapp_probe(struct i2c_client *client,
        if (IS_ERR(sensor->xshutdown))
                return PTR_ERR(sensor->xshutdown);
 
-       pm_runtime_enable(&client->dev);
-
-       rval = pm_runtime_get_sync(&client->dev);
-       if (rval < 0) {
-               rval = -ENODEV;
-               goto out_power_off;
-       }
+       rval = smiapp_power_on(&client->dev);
+       if (rval < 0)
+               return rval;
 
        rval = smiapp_identify_module(sensor);
        if (rval) {
@@ -3100,6 +3087,9 @@ static int smiapp_probe(struct i2c_client *client,
        if (rval < 0)
                goto out_media_entity_cleanup;
 
+       pm_runtime_set_active(&client->dev);
+       pm_runtime_get_noresume(&client->dev);
+       pm_runtime_enable(&client->dev);
        pm_runtime_set_autosuspend_delay(&client->dev, 1000);
        pm_runtime_use_autosuspend(&client->dev);
        pm_runtime_put_autosuspend(&client->dev);
@@ -3113,8 +3103,7 @@ out_cleanup:
        smiapp_cleanup(sensor);
 
 out_power_off:
-       pm_runtime_put(&client->dev);
-       pm_runtime_disable(&client->dev);
+       smiapp_power_off(&client->dev);
 
        return rval;
 }
@@ -3127,8 +3116,10 @@ static int smiapp_remove(struct i2c_client *client)
 
        v4l2_async_unregister_subdev(subdev);
 
-       pm_runtime_suspend(&client->dev);
        pm_runtime_disable(&client->dev);
+       if (!pm_runtime_status_suspended(&client->dev))
+               smiapp_power_off(&client->dev);
+       pm_runtime_set_suspended(&client->dev);
 
        for (i = 0; i < sensor->ssds_used; i++) {
                v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
index 3a0fe8c..48646a7 100644 (file)
@@ -291,8 +291,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
        tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode);
        tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input);
 
-       /* Svideo should enable YCrCb output and disable GPCL output
-        * For Composite and TV, it should be the reverse
+       /*
+        * Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For
+        * S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK
+        * and set INTREQ/GPCL/VBLK to logic 0. For composite we output the
+        * field indicator (FID) signal on FID/GLCO/VLK/HVLK and set
+        * INTREQ/GPCL/VBLK to logic 1.
         */
        val = tvp5150_read(sd, TVP5150_MISC_CTL);
        if (val < 0) {
@@ -301,9 +305,9 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
        }
 
        if (decoder->input == TVP5150_SVIDEO)
-               val = (val & ~0x40) | 0x10;
+               val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK;
        else
-               val = (val & ~0x10) | 0x40;
+               val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL;
        tvp5150_write(sd, TVP5150_MISC_CTL, val);
 };
 
@@ -455,7 +459,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = {
        },{     /* Automatic offset and AGC enabled */
                TVP5150_ANAL_CHL_CTL, 0x15
        },{     /* Activate YCrCb output 0x9 or 0xd ? */
-               TVP5150_MISC_CTL, 0x6f
+               TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL |
+                                 TVP5150_MISC_CTL_INTREQ_OE |
+                                 TVP5150_MISC_CTL_YCBCR_OE |
+                                 TVP5150_MISC_CTL_SYNC_OE |
+                                 TVP5150_MISC_CTL_VBLANK |
+                                 TVP5150_MISC_CTL_CLOCK_OE,
        },{     /* Activates video std autodetection for all standards */
                TVP5150_AUTOSW_MSK, 0x0
        },{     /* Default format: 0x47. For 4:2:2: 0x40 */
@@ -861,8 +870,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
 
        f = &format->format;
 
-       tvp5150_reset(sd, 0);
-
        f->width = decoder->rect.width;
        f->height = decoder->rect.height / 2;
 
@@ -1051,21 +1058,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = {
 static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
 {
        struct tvp5150 *decoder = to_tvp5150(sd);
-       /* Output format: 8-bit ITU-R BT.656 with embedded syncs */
-       int val = 0x09;
-
-       /* Output format: 8-bit 4:2:2 YUV with discrete sync */
-       if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
-               val = 0x0d;
+       int val;
 
-       /* Initializes TVP5150 to its default values */
-       /* # set PCLK (27MHz) */
-       tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00);
+       /* Enable or disable the video output signals. */
+       val = tvp5150_read(sd, TVP5150_MISC_CTL);
+       if (val < 0)
+               return val;
+
+       val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE |
+                TVP5150_MISC_CTL_CLOCK_OE);
+
+       if (enable) {
+               /*
+                * Enable the YCbCr and clock outputs. In discrete sync mode
+                * (non-BT.656) additionally enable the the sync outputs.
+                */
+               val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE;
+               if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
+                       val |= TVP5150_MISC_CTL_SYNC_OE;
+       }
 
-       if (enable)
-               tvp5150_write(sd, TVP5150_MISC_CTL, val);
-       else
-               tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
+       tvp5150_write(sd, TVP5150_MISC_CTL, val);
 
        return 0;
 }
@@ -1524,7 +1537,6 @@ static int tvp5150_probe(struct i2c_client *c,
                res = core->hdl.error;
                goto err;
        }
-       v4l2_ctrl_handler_setup(&core->hdl);
 
        /* Default is no cropping */
        core->rect.top = 0;
@@ -1535,6 +1547,8 @@ static int tvp5150_probe(struct i2c_client *c,
        core->rect.left = 0;
        core->rect.width = TVP5150_H_MAX;
 
+       tvp5150_reset(sd, 0);   /* Calls v4l2_ctrl_handler_setup() */
+
        res = v4l2_async_register_subdev(sd);
        if (res < 0)
                goto err;
index 25a9949..30a48c2 100644 (file)
@@ -9,6 +9,15 @@
 #define TVP5150_ANAL_CHL_CTL         0x01 /* Analog channel controls */
 #define TVP5150_OP_MODE_CTL          0x02 /* Operation mode controls */
 #define TVP5150_MISC_CTL             0x03 /* Miscellaneous controls */
+#define TVP5150_MISC_CTL_VBLK_GPCL     BIT(7)
+#define TVP5150_MISC_CTL_GPCL          BIT(6)
+#define TVP5150_MISC_CTL_INTREQ_OE     BIT(5)
+#define TVP5150_MISC_CTL_HVLK          BIT(4)
+#define TVP5150_MISC_CTL_YCBCR_OE      BIT(3)
+#define TVP5150_MISC_CTL_SYNC_OE       BIT(2)
+#define TVP5150_MISC_CTL_VBLANK                BIT(1)
+#define TVP5150_MISC_CTL_CLOCK_OE      BIT(0)
+
 #define TVP5150_AUTOSW_MSK           0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */
 
 /* Reserved 05h */
index 9796340..d5c911c 100644 (file)
@@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev)
 static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev)
 {
        free_irq(pci_dev->irq, (void *)cobalt);
-
-       if (cobalt->msi_enabled)
-               pci_disable_msi(pci_dev);
+       pci_free_irq_vectors(pci_dev);
 }
 
 static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
@@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
           from being generated. */
        cobalt_set_interrupt(cobalt, false);
 
-       if (pci_enable_msi_range(pci_dev, 1, 1) < 1) {
+       if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) {
                cobalt_err("Could not enable MSI\n");
-               cobalt->msi_enabled = false;
                ret = -EIO;
                goto err_release;
        }
        msi_config_show(cobalt, pci_dev);
-       cobalt->msi_enabled = true;
 
        /* Register IRQ */
        if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED,
index ed00dc9..00f773e 100644 (file)
@@ -287,8 +287,6 @@ struct cobalt {
        u32 irq_none;
        u32 irq_full_fifo;
 
-       bool msi_enabled;
-
        /* omnitek dma */
        int dma_channels;
        int first_fifo_channel;
index 07fa08b..d54ebe7 100644 (file)
@@ -97,14 +97,13 @@ struct pctv452e_state {
        u8 c;      /* transaction counter, wraps around...  */
        u8 initialized; /* set to 1 if 0x15 has been sent */
        u16 last_rc_key;
-
-       unsigned char data[80];
 };
 
 static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
                         unsigned int write_len, unsigned int read_len)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+       u8 *buf;
        u8 id;
        unsigned int rlen;
        int ret;
@@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
                return -EIO;
        }
 
-       mutex_lock(&state->ca_mutex);
+       buf = kmalloc(64, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
        id = state->c++;
 
-       state->data[0] = SYNC_BYTE_OUT;
-       state->data[1] = id;
-       state->data[2] = cmd;
-       state->data[3] = write_len;
+       buf[0] = SYNC_BYTE_OUT;
+       buf[1] = id;
+       buf[2] = cmd;
+       buf[3] = write_len;
 
-       memcpy(state->data + 4, data, write_len);
+       memcpy(buf + 4, data, write_len);
 
        rlen = (read_len > 0) ? 64 : 0;
-       ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
-                                 state->data, rlen, /* delay_ms */ 0);
+       ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
+                                 buf, rlen, /* delay_ms */ 0);
        if (0 != ret)
                goto failed;
 
        ret = -EIO;
-       if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+       if (SYNC_BYTE_IN != buf[0] || id != buf[1])
                goto failed;
 
-       memcpy(data, state->data + 4, read_len);
+       memcpy(data, buf + 4, read_len);
 
-       mutex_unlock(&state->ca_mutex);
+       kfree(buf);
        return 0;
 
 failed:
        err("CI error %d; %02X %02X %02X -> %*ph.",
-            ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
+            ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
 
-       mutex_unlock(&state->ca_mutex);
+       kfree(buf);
        return ret;
 }
 
@@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
                                u8 *rcv_buf, u8 rcv_len)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+       u8 *buf;
        u8 id;
        int ret;
 
-       mutex_lock(&state->ca_mutex);
+       buf = kmalloc(64, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
        id = state->c++;
 
        ret = -EINVAL;
        if (snd_len > 64 - 7 || rcv_len > 64 - 7)
                goto failed;
 
-       state->data[0] = SYNC_BYTE_OUT;
-       state->data[1] = id;
-       state->data[2] = PCTV_CMD_I2C;
-       state->data[3] = snd_len + 3;
-       state->data[4] = addr << 1;
-       state->data[5] = snd_len;
-       state->data[6] = rcv_len;
+       buf[0] = SYNC_BYTE_OUT;
+       buf[1] = id;
+       buf[2] = PCTV_CMD_I2C;
+       buf[3] = snd_len + 3;
+       buf[4] = addr << 1;
+       buf[5] = snd_len;
+       buf[6] = rcv_len;
 
-       memcpy(state->data + 7, snd_buf, snd_len);
+       memcpy(buf + 7, snd_buf, snd_len);
 
-       ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
-                                 state->data, /* rcv_len */ 64,
+       ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
+                                 buf, /* rcv_len */ 64,
                                  /* delay_ms */ 0);
        if (ret < 0)
                goto failed;
 
        /* TT USB protocol error. */
        ret = -EIO;
-       if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+       if (SYNC_BYTE_IN != buf[0] || id != buf[1])
                goto failed;
 
        /* I2C device didn't respond as expected. */
        ret = -EREMOTEIO;
-       if (state->data[5] < snd_len || state->data[6] < rcv_len)
+       if (buf[5] < snd_len || buf[6] < rcv_len)
                goto failed;
 
-       memcpy(rcv_buf, state->data + 7, rcv_len);
-       mutex_unlock(&state->ca_mutex);
+       memcpy(rcv_buf, buf + 7, rcv_len);
 
+       kfree(buf);
        return rcv_len;
 
 failed:
        err("I2C error %d; %02X %02X  %02X %02X %02X -> %*ph",
             ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
-            7, state->data);
+            7, buf);
 
-       mutex_unlock(&state->ca_mutex);
+       kfree(buf);
        return ret;
 }
 
@@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
 static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
-       u8 *rx;
+       u8 *b0, *rx;
        int ret;
 
        info("%s: %d\n", __func__, i);
@@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
        if (state->initialized)
                return 0;
 
-       rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
-       if (!rx)
+       b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL);
+       if (!b0)
                return -ENOMEM;
 
-       mutex_lock(&state->ca_mutex);
+       rx = b0 + 5;
+
        /* hmm where shoud this should go? */
        ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
        if (ret != 0)
@@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
                        __func__, ret);
 
        /* this is a one-time initialization, dont know where to put */
-       state->data[0] = 0xaa;
-       state->data[1] = state->c++;
-       state->data[2] = PCTV_CMD_RESET;
-       state->data[3] = 1;
-       state->data[4] = 0;
+       b0[0] = 0xaa;
+       b0[1] = state->c++;
+       b0[2] = PCTV_CMD_RESET;
+       b0[3] = 1;
+       b0[4] = 0;
        /* reset board */
-       ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+       ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
        if (ret)
                goto ret;
 
-       state->data[1] = state->c++;
-       state->data[4] = 1;
+       b0[1] = state->c++;
+       b0[4] = 1;
        /* reset board (again?) */
-       ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+       ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
        if (ret)
                goto ret;
 
        state->initialized = 1;
 
 ret:
-       mutex_unlock(&state->ca_mutex);
-       kfree(rx);
+       kfree(b0);
        return ret;
 }
 
 static int pctv452e_rc_query(struct dvb_usb_device *d)
 {
        struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+       u8 *b, *rx;
        int ret, i;
        u8 id;
 
-       mutex_lock(&state->ca_mutex);
+       b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL);
+       if (!b)
+               return -ENOMEM;
+
+       rx = b + CMD_BUFFER_SIZE;
+
        id = state->c++;
 
        /* prepare command header  */
-       state->data[0] = SYNC_BYTE_OUT;
-       state->data[1] = id;
-       state->data[2] = PCTV_CMD_IR;
-       state->data[3] = 0;
+       b[0] = SYNC_BYTE_OUT;
+       b[1] = id;
+       b[2] = PCTV_CMD_IR;
+       b[3] = 0;
 
        /* send ir request */
-       ret = dvb_usb_generic_rw(d, state->data, 4,
-                                state->data, PCTV_ANSWER_LEN, 0);
+       ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
        if (ret != 0)
                goto ret;
 
        if (debug > 3) {
-               info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
-               for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
-                       info(" %02x", state->data[i + 3]);
+               info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
+               for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
+                       info(" %02x", rx[i+3]);
 
                info("\n");
        }
 
-       if ((state->data[3] == 9) &&  (state->data[12] & 0x01)) {
+       if ((rx[3] == 9) &&  (rx[12] & 0x01)) {
                /* got a "press" event */
-               state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
+               state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
                if (debug > 2)
                        info("%s: cmd=0x%02x sys=0x%02x\n",
-                               __func__, state->data[6], state->data[7]);
+                               __func__, rx[6], rx[7]);
 
                rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
        } else if (state->last_rc_key) {
@@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d)
                state->last_rc_key = 0;
        }
 ret:
-       mutex_unlock(&state->ca_mutex);
+       kfree(b);
        return ret;
 }
 
index a0547db..76382c8 100644 (file)
@@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card,
        struct ms_id_register id_reg;
 
        if (!(*mrq)) {
-               memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL,
+               memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
                                  sizeof(struct ms_id_register));
                *mrq = &card->current_mrq;
                return 0;
index b44306b..73db085 100644 (file)
@@ -3354,10 +3354,11 @@ int dw_mci_runtime_resume(struct device *dev)
 
                if (!slot)
                        continue;
-               if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
+               if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
                        dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
-                       dw_mci_setup_bus(slot, true);
-               }
+
+               /* Force setup bus to guarantee available clock output */
+               dw_mci_setup_bus(slot, true);
        }
 
        /* Now that slots are all setup, we can enable card detect */
index 6307088..a518cb1 100644 (file)
@@ -957,6 +957,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
 {
        resource_size_t allocated = 0, available = 0;
        struct nd_region *nd_region = to_nd_region(dev->parent);
+       struct nd_namespace_common *ndns = to_ndns(dev);
        struct nd_mapping *nd_mapping;
        struct nvdimm_drvdata *ndd;
        struct nd_label_id label_id;
@@ -964,7 +965,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
        u8 *uuid = NULL;
        int rc, i;
 
-       if (dev->driver || to_ndns(dev)->claim)
+       if (dev->driver || ndns->claim)
                return -EBUSY;
 
        if (is_namespace_pmem(dev)) {
@@ -1034,20 +1035,16 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
 
                nd_namespace_pmem_set_resource(nd_region, nspm,
                                val * nd_region->ndr_mappings);
-       } else if (is_namespace_blk(dev)) {
-               struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
-               /*
-                * Try to delete the namespace if we deleted all of its
-                * allocation, this is not the seed device for the
-                * region, and it is not actively claimed by a btt
-                * instance.
-                */
-               if (val == 0 && nd_region->ns_seed != dev
-                               && !nsblk->common.claim)
-                       nd_device_unregister(dev, ND_ASYNC);
        }
 
+       /*
+        * Try to delete the namespace if we deleted all of its
+        * allocation, this is not the seed device for the region, and
+        * it is not actively claimed by a btt instance.
+        */
+       if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
+               nd_device_unregister(dev, ND_ASYNC);
+
        return rc;
 }
 
index 7282d74..5b536be 100644 (file)
@@ -90,7 +90,9 @@ static int read_pmem(struct page *page, unsigned int off,
 
        rc = memcpy_from_pmem(mem + off, pmem_addr, len);
        kunmap_atomic(mem);
-       return rc;
+       if (rc)
+               return -EIO;
+       return 0;
 }
 
 static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
index fcc9dcf..e65041c 100644 (file)
@@ -1663,13 +1663,13 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
                return 0;
 
        freq->sg_table.sgl = freq->first_sgl;
-       ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments,
-                       freq->sg_table.sgl);
+       ret = sg_alloc_table_chained(&freq->sg_table,
+                       blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
        if (ret)
                return -ENOMEM;
 
        op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
-       WARN_ON(op->nents > rq->nr_phys_segments);
+       WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
        dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
        freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
                                op->nents, dir);
index 6f50741..be8c800 100644 (file)
@@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item)
 {
        struct nvmet_subsys *subsys = to_subsys(item);
 
+       nvmet_subsys_del_ctrls(subsys);
        nvmet_subsys_put(subsys);
 }
 
index b1d66ed..fc5ba2f 100644 (file)
@@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
        pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
                ctrl->cntlid, ctrl->kato);
 
-       ctrl->ops->delete_ctrl(ctrl);
+       nvmet_ctrl_fatal_error(ctrl);
 }
 
 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref)
        list_del(&ctrl->subsys_entry);
        mutex_unlock(&subsys->lock);
 
+       flush_work(&ctrl->async_event_work);
+       cancel_work_sync(&ctrl->fatal_err_work);
+
        ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
        nvmet_subsys_put(subsys);
 
@@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref)
        kfree(subsys);
 }
 
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
+{
+       struct nvmet_ctrl *ctrl;
+
+       mutex_lock(&subsys->lock);
+       list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+               ctrl->ops->delete_ctrl(ctrl);
+       mutex_unlock(&subsys->lock);
+}
+
 void nvmet_subsys_put(struct nvmet_subsys *subsys)
 {
        kref_put(&subsys->ref, nvmet_subsys_free);
index 173e842..ba57f98 100644 (file)
@@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
                        (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
        struct fcnvme_ls_disconnect_acc *acc =
                        (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
-       struct nvmet_fc_tgt_queue *queue;
+       struct nvmet_fc_tgt_queue *queue = NULL;
        struct nvmet_fc_tgt_assoc *assoc;
        int ret = 0;
        bool del_assoc = false;
@@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
                assoc = nvmet_fc_find_target_assoc(tgtport,
                                be64_to_cpu(rqst->associd.association_id));
                iod->assoc = assoc;
-               if (!assoc)
+               if (assoc) {
+                       if (rqst->discon_cmd.scope ==
+                                       FCNVME_DISCONN_CONNECTION) {
+                               queue = nvmet_fc_find_target_queue(tgtport,
+                                               be64_to_cpu(
+                                                       rqst->discon_cmd.id));
+                               if (!queue) {
+                                       nvmet_fc_tgt_a_put(assoc);
+                                       ret = VERR_NO_CONN;
+                               }
+                       }
+               } else
                        ret = VERR_NO_ASSOC;
        }
 
@@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
                        FCNVME_LS_DISCONNECT);
 
 
-       if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) {
-               queue = nvmet_fc_find_target_queue(tgtport,
-                                       be64_to_cpu(rqst->discon_cmd.id));
-               if (queue) {
-                       int qid = queue->qid;
+       /* are we to delete a Connection ID (queue) */
+       if (queue) {
+               int qid = queue->qid;
 
-                       nvmet_fc_delete_target_queue(queue);
+               nvmet_fc_delete_target_queue(queue);
 
-                       /* release the get taken by find_target_queue */
-                       nvmet_fc_tgt_q_put(queue);
+               /* release the get taken by find_target_queue */
+               nvmet_fc_tgt_q_put(queue);
 
-                       /* tear association down if io queue terminated */
-                       if (!qid)
-                               del_assoc = true;
-               }
+               /* tear association down if io queue terminated */
+               if (!qid)
+                       del_assoc = true;
        }
 
        /* release get taken in nvmet_fc_find_target_assoc */
index 23d5eb1..cc7ad06 100644 (file)
@@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
                enum nvme_subsys_type type);
 void nvmet_subsys_put(struct nvmet_subsys *subsys);
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
 
 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
 void nvmet_put_namespace(struct nvmet_ns *ns);
index 8c3760a..6099022 100644 (file)
@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
 {
        struct ib_recv_wr *bad_wr;
 
+       ib_dma_sync_single_for_device(ndev->device,
+               cmd->sge[0].addr, cmd->sge[0].length,
+               DMA_FROM_DEVICE);
+
        if (ndev->srq)
                return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
        return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
                first_wr = &rsp->send_wr;
 
        nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+       ib_dma_sync_single_for_device(rsp->queue->dev->device,
+               rsp->send_sge.addr, rsp->send_sge.length,
+               DMA_TO_DEVICE);
+
        if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
                pr_err("sending cmd response failed\n");
                nvmet_rdma_release_rsp(rsp);
@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
        cmd->n_rdma = 0;
        cmd->req.port = queue->port;
 
+
+       ib_dma_sync_single_for_cpu(queue->dev->device,
+               cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
+               DMA_FROM_DEVICE);
+       ib_dma_sync_single_for_cpu(queue->dev->device,
+               cmd->send_sge.addr, cmd->send_sge.length,
+               DMA_TO_DEVICE);
+
        if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
                        &queue->nvme_sq, &nvmet_rdma_ops))
                return;
index 1f38d08..f1b633b 100644 (file)
@@ -517,7 +517,7 @@ static int xgene_msi_probe(struct platform_device *pdev)
 
        rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
                               xgene_msi_hwirq_alloc, NULL);
-       if (rc)
+       if (rc < 0)
                goto err_cpuhp;
        pci_xgene_online = rc;
        rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
index bed1999..af8f6e9 100644 (file)
@@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
 {
        u32 val;
 
-       /* get iATU unroll support */
-       pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
-       dev_dbg(pp->dev, "iATU unroll: %s\n",
-               pp->iatu_unroll_enabled ? "enabled" : "disabled");
-
        /* set the number of lanes */
        val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
        val &= ~PORT_LINK_MODE_MASK;
@@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
         * we should not program the ATU here.
         */
        if (!pp->ops->rd_other_conf) {
+               /* get iATU unroll support */
+               pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
+               dev_dbg(pp->dev, "iATU unroll: %s\n",
+                       pp->iatu_unroll_enabled ? "enabled" : "disabled");
+
                dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
                                          PCIE_ATU_TYPE_MEM, pp->mem_base,
                                          pp->mem_bus_addr, pp->mem_size);
index e164b5c..204960e 100644 (file)
@@ -1169,6 +1169,7 @@ void set_pcie_port_type(struct pci_dev *pdev)
        pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
        if (!pos)
                return;
+
        pdev->pcie_cap = pos;
        pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
        pdev->pcie_flags_reg = reg16;
@@ -1176,13 +1177,14 @@ void set_pcie_port_type(struct pci_dev *pdev)
        pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
 
        /*
-        * A Root Port is always the upstream end of a Link.  No PCIe
-        * component has two Links.  Two Links are connected by a Switch
-        * that has a Port on each Link and internal logic to connect the
-        * two Ports.
+        * A Root Port or a PCI-to-PCIe bridge is always the upstream end
+        * of a Link.  No PCIe component has two Links.  Two Links are
+        * connected by a Switch that has a Port on each Link and internal
+        * logic to connect the two Ports.
         */
        type = pci_pcie_type(pdev);
-       if (type == PCI_EXP_TYPE_ROOT_PORT)
+       if (type == PCI_EXP_TYPE_ROOT_PORT ||
+           type == PCI_EXP_TYPE_PCIE_BRIDGE)
                pdev->has_secondary_link = 1;
        else if (type == PCI_EXP_TYPE_UPSTREAM ||
                 type == PCI_EXP_TYPE_DOWNSTREAM) {
index 3730063..c123488 100644 (file)
@@ -1092,6 +1092,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
        enum pin_config_param param = pinconf_to_config_param(*config);
        void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
        unsigned long flags;
        u32 conf, pull, val, debounce;
        u16 arg = 0;
@@ -1128,7 +1129,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
                        return -EINVAL;
 
                raw_spin_lock_irqsave(&vg->lock, flags);
-               debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG));
+               debounce = readl(db_reg);
                raw_spin_unlock_irqrestore(&vg->lock, flags);
 
                switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
@@ -1176,6 +1177,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
        unsigned int param, arg;
        void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
        void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+       void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
        unsigned long flags;
        u32 conf, val, debounce;
        int i, ret = 0;
@@ -1238,36 +1240,40 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
 
                        break;
                case PIN_CONFIG_INPUT_DEBOUNCE:
-                       debounce = readl(byt_gpio_reg(vg, offset,
-                                                     BYT_DEBOUNCE_REG));
-                       conf &= ~BYT_DEBOUNCE_PULSE_MASK;
+                       debounce = readl(db_reg);
+                       debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
 
                        switch (arg) {
+                       case 0:
+                               conf &= BYT_DEBOUNCE_EN;
+                               break;
                        case 375:
-                               conf |= BYT_DEBOUNCE_PULSE_375US;
+                               debounce |= BYT_DEBOUNCE_PULSE_375US;
                                break;
                        case 750:
-                               conf |= BYT_DEBOUNCE_PULSE_750US;
+                               debounce |= BYT_DEBOUNCE_PULSE_750US;
                                break;
                        case 1500:
-                               conf |= BYT_DEBOUNCE_PULSE_1500US;
+                               debounce |= BYT_DEBOUNCE_PULSE_1500US;
                                break;
                        case 3000:
-                               conf |= BYT_DEBOUNCE_PULSE_3MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_3MS;
                                break;
                        case 6000:
-                               conf |= BYT_DEBOUNCE_PULSE_6MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_6MS;
                                break;
                        case 12000:
-                               conf |= BYT_DEBOUNCE_PULSE_12MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_12MS;
                                break;
                        case 24000:
-                               conf |= BYT_DEBOUNCE_PULSE_24MS;
+                               debounce |= BYT_DEBOUNCE_PULSE_24MS;
                                break;
                        default:
                                ret = -EINVAL;
                        }
 
+                       if (!ret)
+                               writel(debounce, db_reg);
                        break;
                default:
                        ret = -ENOTSUPP;
@@ -1617,6 +1623,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
 
 static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
 {
+       struct gpio_chip *gc = &vg->chip;
+       struct device *dev = &vg->pdev->dev;
        void __iomem *reg;
        u32 base, value;
        int i;
@@ -1638,10 +1646,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
                }
 
                value = readl(reg);
-               if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
-                   !(value & BYT_DIRECT_IRQ_EN)) {
+               if (value & BYT_DIRECT_IRQ_EN) {
+                       clear_bit(i, gc->irq_valid_mask);
+                       dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
+               } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
                        byt_gpio_clear_triggering(vg, i);
-                       dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
+                       dev_dbg(dev, "disabling GPIO %d\n", i);
                }
        }
 
@@ -1680,6 +1690,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
        gc->can_sleep   = false;
        gc->parent      = &vg->pdev->dev;
        gc->ngpio       = vg->soc_data->npins;
+       gc->irq_need_valid_mask = true;
 
 #ifdef CONFIG_PM_SLEEP
        vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
index 59cb7a6..901b356 100644 (file)
@@ -19,7 +19,7 @@
 
 #define BXT_PAD_OWN    0x020
 #define BXT_HOSTSW_OWN 0x080
-#define BXT_PADCFGLOCK 0x090
+#define BXT_PADCFGLOCK 0x060
 #define BXT_GPI_IE     0x110
 
 #define BXT_COMMUNITY(s, e)                            \
index 1e13967..6df35dc 100644 (file)
@@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
        return 0;
 }
 
+static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
+{
+       u32 value;
+
+       value = readl(padcfg0);
+       if (input) {
+               value &= ~PADCFG0_GPIORXDIS;
+               value |= PADCFG0_GPIOTXDIS;
+       } else {
+               value &= ~PADCFG0_GPIOTXDIS;
+               value |= PADCFG0_GPIORXDIS;
+       }
+       writel(value, padcfg0);
+}
+
 static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
                                     struct pinctrl_gpio_range *range,
                                     unsigned pin)
@@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
        /* Disable SCI/SMI/NMI generation */
        value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
        value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
-       /* Disable TX buffer and enable RX (this will be input) */
-       value &= ~PADCFG0_GPIORXDIS;
-       value |= PADCFG0_GPIOTXDIS;
        writel(value, padcfg0);
 
+       /* Disable TX buffer and enable RX (this will be input) */
+       __intel_gpio_set_direction(padcfg0, true);
+
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
        return 0;
@@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
        struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
        void __iomem *padcfg0;
        unsigned long flags;
-       u32 value;
 
        raw_spin_lock_irqsave(&pctrl->lock, flags);
 
        padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
-
-       value = readl(padcfg0);
-       if (input)
-               value |= PADCFG0_GPIOTXDIS;
-       else
-               value &= ~PADCFG0_GPIOTXDIS;
-       writel(value, padcfg0);
+       __intel_gpio_set_direction(padcfg0, input);
 
        raw_spin_unlock_irqrestore(&pctrl->lock, flags);
 
index c3928aa..e0bca4d 100644 (file)
@@ -253,9 +253,8 @@ static const unsigned int uart_tx_ao_a_pins[]       = { PIN(GPIOAO_0, 0) };
 static const unsigned int uart_rx_ao_a_pins[]  = { PIN(GPIOAO_1, 0) };
 static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_1, 0),
-                                                   PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_5, 0) };
 static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
 
@@ -498,7 +497,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
        GPIO_GROUP(GPIOAO_13, 0),
 
        /* bank AO */
-       GROUP(uart_tx_ao_b,     0,      26),
+       GROUP(uart_tx_ao_b,     0,      24),
        GROUP(uart_rx_ao_b,     0,      25),
        GROUP(uart_tx_ao_a,     0,      12),
        GROUP(uart_rx_ao_a,     0,      11),
index 25694f7..b69743b 100644 (file)
@@ -214,9 +214,8 @@ static const unsigned int uart_tx_ao_a_pins[]       = { PIN(GPIOAO_0, 0) };
 static const unsigned int uart_rx_ao_a_pins[]  = { PIN(GPIOAO_1, 0) };
 static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_1, 0),
-                                                   PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[]  = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[]  = { PIN(GPIOAO_5, 0) };
 static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
 static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
 
@@ -409,7 +408,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
        GPIO_GROUP(GPIOAO_9, 0),
 
        /* bank AO */
-       GROUP(uart_tx_ao_b,     0,      26),
+       GROUP(uart_tx_ao_b,     0,      24),
        GROUP(uart_rx_ao_b,     0,      25),
        GROUP(uart_tx_ao_a,     0,      12),
        GROUP(uart_rx_ao_a,     0,      11),
index c9a1469..537b520 100644 (file)
@@ -202,6 +202,8 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
                        i = 128;
                        pin_num = AMD_GPIO_PINS_BANK2 + i;
                        break;
+               default:
+                       return;
                }
 
                for (; i < pin_num; i++) {
index aa8bd97..9668633 100644 (file)
@@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                          0, 0, 0, 0};
 static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
                                           41, 42, 45};
-static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
 static const unsigned i2c0_pins[] = {63, 64};
 static const int i2c0_muxvals[] = {0, 0};
 static const unsigned i2c1_pins[] = {65, 66};
index 410741a..f46ece2 100644 (file)
@@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
                        case 8:
                        case 7:
                        case 6:
+                       case 1:
                                ideapad_input_report(priv, vpc_bit);
                                break;
                        case 5:
index 1fc0de8..3617705 100644 (file)
@@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev)
 
        input_set_capability(input, EV_KEY, KEY_POWER);
 
-       error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+       error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
                                     DRIVER_NAME, input);
        if (error) {
                dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
index 97b4c3a..25f15df 100644 (file)
@@ -326,7 +326,7 @@ static int __init mlxplat_init(void)
        return 0;
 
 fail_platform_mux_register:
-       for (i--; i > 0 ; i--)
+       while (--i >= 0)
                platform_device_unregister(priv->pdev_mux[i]);
        platform_device_unregister(priv->pdev_i2c);
 fail_alloc:
index cbf4d83..25b1769 100644 (file)
@@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
 
 static int s3_wmi_check_platform_device(struct device *dev, void *data)
 {
-       struct acpi_device *adev, *ts_adev;
+       struct acpi_device *adev, *ts_adev = NULL;
        acpi_handle handle;
        acpi_status status;
 
@@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int s3_wmi_resume(struct device *dev)
+static int __maybe_unused s3_wmi_resume(struct device *dev)
 {
        s3_wmi_send_lid_state();
        return 0;
 }
-#endif
 static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
 
 static struct platform_driver s3_wmi_driver = {
index 639ed4e..070c4da 100644 (file)
@@ -145,6 +145,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
 #define CCW_CMD_WRITE_CONF 0x21
 #define CCW_CMD_WRITE_STATUS 0x31
 #define CCW_CMD_READ_VQ_CONF 0x32
+#define CCW_CMD_READ_STATUS 0x72
 #define CCW_CMD_SET_IND_ADAPTER 0x73
 #define CCW_CMD_SET_VIRTIO_REV 0x83
 
@@ -160,6 +161,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
+#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000
 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000
 
 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -452,7 +454,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
         * This may happen on device detach.
         */
        if (ret && (ret != -ENODEV))
-               dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
+               dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n",
                         ret, index);
 
        vring_del_virtqueue(vq);
@@ -892,6 +894,28 @@ out_free:
 static u8 virtio_ccw_get_status(struct virtio_device *vdev)
 {
        struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+       u8 old_status = *vcdev->status;
+       struct ccw1 *ccw;
+
+       if (vcdev->revision < 1)
+               return *vcdev->status;
+
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+               return old_status;
+
+       ccw->cmd_code = CCW_CMD_READ_STATUS;
+       ccw->flags = 0;
+       ccw->count = sizeof(*vcdev->status);
+       ccw->cda = (__u32)(unsigned long)vcdev->status;
+       ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
+/*
+ * If the channel program failed (should only happen if the device
+ * was hotunplugged, and then we clean up via the machine check
+ * handler anyway), vcdev->status was not overwritten and we just
+ * return the old status, which is fine.
+*/
+       kfree(ccw);
 
        return *vcdev->status;
 }
@@ -920,7 +944,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
        kfree(ccw);
 }
 
-static struct virtio_config_ops virtio_ccw_config_ops = {
+static const struct virtio_config_ops virtio_ccw_config_ops = {
        .get_features = virtio_ccw_get_features,
        .finalize_features = virtio_ccw_finalize_features,
        .get = virtio_ccw_get_config,
@@ -987,6 +1011,7 @@ static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
                case VIRTIO_CCW_DOING_READ_CONFIG:
                case VIRTIO_CCW_DOING_WRITE_CONFIG:
                case VIRTIO_CCW_DOING_WRITE_STATUS:
+               case VIRTIO_CCW_DOING_READ_STATUS:
                case VIRTIO_CCW_DOING_SET_VQ:
                case VIRTIO_CCW_DOING_SET_IND:
                case VIRTIO_CCW_DOING_SET_CONF_IND:
index a9a0016..b2e8c0d 100644 (file)
@@ -3363,7 +3363,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job)
        struct bfad_fcxp    *drv_fcxp;
        struct bfa_fcs_lport_s *fcs_port;
        struct bfa_fcs_rport_s *fcs_rport;
-       struct fc_bsg_request *bsg_request = bsg_request;
+       struct fc_bsg_request *bsg_request = job->request;
        struct fc_bsg_reply *bsg_reply = job->reply;
        uint32_t command_type = bsg_request->msgcode;
        unsigned long flags;
index 8fb5c54..99b747c 100644 (file)
@@ -46,6 +46,7 @@
 
 #define        INITIAL_SRP_LIMIT       800
 #define        DEFAULT_MAX_SECTORS     256
+#define MAX_TXU                        1024 * 1024
 
 static uint max_vdma_size = MAX_H_COPY_RDMA;
 
@@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
        }
 
        info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
-                                 GFP_KERNEL);
+                                 GFP_ATOMIC);
        if (!info) {
                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
                        iue->target);
@@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
        info->mad_version = cpu_to_be32(MAD_VERSION_1);
        info->os_type = cpu_to_be32(LINUX);
        memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
-       info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
+       info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
 
        dma_wmb();
        rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
@@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
        }
 
        cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
-                                GFP_KERNEL);
+                                GFP_ATOMIC);
        if (!cap) {
                dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
                        iue->target);
index 236e4e5..7b6bd8e 100644 (file)
@@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
                } else {
                        buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
                        lpfc_els_free_data(phba, buf_ptr1);
+                       elsiocb->context2 = NULL;
                }
        }
 
        if (elsiocb->context3) {
                buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
                lpfc_els_free_bpl(phba, buf_ptr);
+               elsiocb->context3 = NULL;
        }
        lpfc_sli_release_iocbq(phba, elsiocb);
        return 0;
index 4faa767..a78a3df 100644 (file)
@@ -5954,18 +5954,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
 
  free_vfi_bmask:
        kfree(phba->sli4_hba.vfi_bmask);
+       phba->sli4_hba.vfi_bmask = NULL;
  free_xri_ids:
        kfree(phba->sli4_hba.xri_ids);
+       phba->sli4_hba.xri_ids = NULL;
  free_xri_bmask:
        kfree(phba->sli4_hba.xri_bmask);
+       phba->sli4_hba.xri_bmask = NULL;
  free_vpi_ids:
        kfree(phba->vpi_ids);
+       phba->vpi_ids = NULL;
  free_vpi_bmask:
        kfree(phba->vpi_bmask);
+       phba->vpi_bmask = NULL;
  free_rpi_ids:
        kfree(phba->sli4_hba.rpi_ids);
+       phba->sli4_hba.rpi_ids = NULL;
  free_rpi_bmask:
        kfree(phba->sli4_hba.rpi_bmask);
+       phba->sli4_hba.rpi_bmask = NULL;
  err_exit:
        return rc;
 }
index 394fe13..dcb33f4 100644 (file)
@@ -393,6 +393,7 @@ struct MPT3SAS_TARGET {
  * @eedp_enable: eedp support enable bit
  * @eedp_type: 0(type_1), 1(type_2), 2(type_3)
  * @eedp_block_length: block size
+ * @ata_command_pending: SATL passthrough outstanding for device
  */
 struct MPT3SAS_DEVICE {
        struct MPT3SAS_TARGET *sas_target;
@@ -404,6 +405,17 @@ struct MPT3SAS_DEVICE {
        u8      ignore_delay_remove;
        /* Iopriority Command Handling */
        u8      ncq_prio_enable;
+       /*
+        * Bug workaround for SATL handling: the mpt2/3sas firmware
+        * doesn't return BUSY or TASK_SET_FULL for subsequent
+        * commands while a SATL pass through is in operation as the
+        * spec requires, it simply does nothing with them until the
+        * pass through completes, causing them possibly to timeout if
+        * the passthrough is a long executing command (like format or
+        * secure erase).  This variable allows us to do the right
+        * thing while a SATL command is pending.
+        */
+       unsigned long ata_command_pending;
 
 };
 
index b5c966e..75f3fce 100644 (file)
@@ -3899,9 +3899,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
        }
 }
 
-static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
+static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
 {
-       return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
+       struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
+
+       if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
+               return 0;
+
+       if (pending)
+               return test_and_set_bit(0, &priv->ata_command_pending);
+
+       clear_bit(0, &priv->ata_command_pending);
+       return 0;
 }
 
 /**
@@ -3925,9 +3934,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
                if (!scmd)
                        continue;
                count++;
-               if (ata_12_16_cmd(scmd))
-                       scsi_internal_device_unblock(scmd->device,
-                                                       SDEV_RUNNING);
+               _scsih_set_satl_pending(scmd, false);
                mpt3sas_base_free_smid(ioc, smid);
                scsi_dma_unmap(scmd);
                if (ioc->pci_error_recovery)
@@ -4063,13 +4070,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
        if (ioc->logging_level & MPT_DEBUG_SCSI)
                scsi_print_command(scmd);
 
-       /*
-        * Lock the device for any subsequent command until command is
-        * done.
-        */
-       if (ata_12_16_cmd(scmd))
-               scsi_internal_device_block(scmd->device);
-
        sas_device_priv_data = scmd->device->hostdata;
        if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
                scmd->result = DID_NO_CONNECT << 16;
@@ -4083,6 +4083,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
                return 0;
        }
 
+       /*
+        * Bug work around for firmware SATL handling.  The loop
+        * is based on atomic operations and ensures consistency
+        * since we're lockless at this point
+        */
+       do {
+               if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+                       scmd->result = SAM_STAT_BUSY;
+                       scmd->scsi_done(scmd);
+                       return 0;
+               }
+       } while (_scsih_set_satl_pending(scmd, true));
+
        sas_target_priv_data = sas_device_priv_data->sas_target;
 
        /* invalid device handle */
@@ -4650,8 +4663,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
        if (scmd == NULL)
                return 1;
 
-       if (ata_12_16_cmd(scmd))
-               scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
+       _scsih_set_satl_pending(scmd, false);
 
        mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
 
index 47eb4d5..f201f40 100644 (file)
@@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
        struct qla_hw_data *ha = vha->hw;
        ssize_t rval = 0;
 
+       mutex_lock(&ha->optrom_mutex);
+
        if (ha->optrom_state != QLA_SREADING)
-               return 0;
+               goto out;
 
-       mutex_lock(&ha->optrom_mutex);
        rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
            ha->optrom_region_size);
+
+out:
        mutex_unlock(&ha->optrom_mutex);
 
        return rval;
@@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
            struct device, kobj)));
        struct qla_hw_data *ha = vha->hw;
 
-       if (ha->optrom_state != QLA_SWRITING)
+       mutex_lock(&ha->optrom_mutex);
+
+       if (ha->optrom_state != QLA_SWRITING) {
+               mutex_unlock(&ha->optrom_mutex);
                return -EINVAL;
-       if (off > ha->optrom_region_size)
+       }
+       if (off > ha->optrom_region_size) {
+               mutex_unlock(&ha->optrom_mutex);
                return -ERANGE;
+       }
        if (off + count > ha->optrom_region_size)
                count = ha->optrom_region_size - off;
 
-       mutex_lock(&ha->optrom_mutex);
        memcpy(&ha->optrom_buffer[off], buf, count);
        mutex_unlock(&ha->optrom_mutex);
 
@@ -753,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
        struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
            struct device, kobj)));
        int type;
-       int rval = 0;
        port_id_t did;
 
        type = simple_strtol(buf, NULL, 10);
@@ -767,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
 
        ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
 
-       rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
+       qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
        return count;
 }
 
index f7df01b..5b1287a 100644 (file)
@@ -1556,7 +1556,8 @@ typedef struct {
 struct atio {
        uint8_t         entry_type;             /* Entry type. */
        uint8_t         entry_count;            /* Entry count. */
-       uint8_t         data[58];
+       __le16          attr_n_length;
+       uint8_t         data[56];
        uint32_t        signature;
 #define ATIO_PROCESSED 0xDEADDEAD              /* Signature */
 };
@@ -2732,7 +2733,7 @@ struct isp_operations {
 #define QLA_MSIX_FW_MODE(m)    (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
 #define QLA_MSIX_FW_MODE_1(m)  (QLA_MSIX_FW_MODE(m) == 1)
 
-#define QLA_MSIX_DEFAULT               0x00
+#define QLA_BASE_VECTORS       2 /* default + RSP */
 #define QLA_MSIX_RSP_Q                 0x01
 #define QLA_ATIO_VECTOR                0x02
 #define QLA_MSIX_QPAIR_MULTIQ_RSP_Q    0x03
@@ -2754,7 +2755,6 @@ struct qla_msix_entry {
        uint16_t entry;
        char name[30];
        void *handle;
-       struct irq_affinity_notify irq_notify;
        int cpuid;
 };
 
index 632d5f3..7b6317c 100644 (file)
@@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
 
        /* Wait for soft-reset to complete. */
        RD_REG_DWORD(&reg->ctrl_status);
-       for (cnt = 0; cnt < 6000000; cnt++) {
+       for (cnt = 0; cnt < 60; cnt++) {
                barrier();
                if ((RD_REG_DWORD(&reg->ctrl_status) &
                    CSRX_ISP_SOFT_RESET) == 0)
@@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
        RD_REG_DWORD(&reg->hccr);
 
        RD_REG_WORD(&reg->mailbox0);
-       for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+       for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
            rval == QLA_SUCCESS; cnt--) {
                barrier();
                if (cnt)
index 5093ca9..dc88a09 100644 (file)
@@ -19,10 +19,6 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
        sts_entry_t *);
-static void qla_irq_affinity_notify(struct irq_affinity_notify *,
-    const cpumask_t *);
-static void qla_irq_affinity_release(struct kref *);
-
 
 /**
  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -2496,6 +2492,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
        if (pkt->entry_status & RF_BUSY)
                res = DID_BUS_BUSY << 16;
 
+       if (pkt->entry_type == NOTIFY_ACK_TYPE &&
+           pkt->handle == QLA_TGT_SKIP_HANDLE)
+               return;
+
        sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
        if (sp) {
                sp->done(ha, sp, res);
@@ -2572,14 +2572,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
        if (!vha->flags.online)
                return;
 
-       if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
-               /* if kernel does not notify qla of IRQ's CPU change,
-                * then set it here.
-                */
-               rsp->msix->cpuid = smp_processor_id();
-               ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
-       }
-
        while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
                pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
 
@@ -3018,13 +3010,20 @@ static struct qla_init_msix_entry qla82xx_msix_entries[] = {
 static int
 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
 {
-#define MIN_MSIX_COUNT 2
        int i, ret;
        struct qla_msix_entry *qentry;
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+       struct irq_affinity desc = {
+               .pre_vectors = QLA_BASE_VECTORS,
+       };
+
+       if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha))
+               desc.pre_vectors++;
+
+       ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS,
+                       ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+                       &desc);
 
-       ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
-                                   PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
        if (ret < 0) {
                ql_log(ql_log_fatal, vha, 0x00c7,
                    "MSI-X: Failed to enable support, "
@@ -3069,13 +3068,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
                qentry->have_irq = 0;
                qentry->in_use = 0;
                qentry->handle = NULL;
-               qentry->irq_notify.notify  = qla_irq_affinity_notify;
-               qentry->irq_notify.release = qla_irq_affinity_release;
-               qentry->cpuid = -1;
        }
 
        /* Enable MSI-X vectors for the base queue */
-       for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
+       for (i = 0; i < QLA_BASE_VECTORS; i++) {
                qentry = &ha->msix_entries[i];
                qentry->handle = rsp;
                rsp->msix = qentry;
@@ -3093,18 +3089,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
                        goto msix_register_fail;
                qentry->have_irq = 1;
                qentry->in_use = 1;
-
-               /* Register for CPU affinity notification. */
-               irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
-
-               /* Schedule work (ie. trigger a notification) to read cpu
-                * mask for this specific irq.
-                * kref_get is required because
-               * irq_affinity_notify() will do
-               * kref_put().
-               */
-               kref_get(&qentry->irq_notify.kref);
-               schedule_work(&qentry->irq_notify.work);
        }
 
        /*
@@ -3301,49 +3285,3 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
        msix->handle = qpair;
        return ret;
 }
-
-
-/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
-static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
-       const cpumask_t *mask)
-{
-       struct qla_msix_entry *e =
-               container_of(notify, struct qla_msix_entry, irq_notify);
-       struct qla_hw_data *ha;
-       struct scsi_qla_host *base_vha;
-       struct rsp_que *rsp = e->handle;
-
-       /* user is recommended to set mask to just 1 cpu */
-       e->cpuid = cpumask_first(mask);
-
-       ha = rsp->hw;
-       base_vha = pci_get_drvdata(ha->pdev);
-
-       ql_dbg(ql_dbg_init, base_vha, 0xffff,
-           "%s: host %ld : vector %d cpu %d \n", __func__,
-           base_vha->host_no, e->vector, e->cpuid);
-
-       if (e->have_irq) {
-               if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
-                   (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
-                       ha->tgt.rspq_vector_cpuid = e->cpuid;
-                       ql_dbg(ql_dbg_init, base_vha, 0xffff,
-                           "%s: host%ld: rspq vector %d cpu %d  runtime change\n",
-                           __func__, base_vha->host_no, e->vector, e->cpuid);
-               }
-       }
-}
-
-static void qla_irq_affinity_release(struct kref *ref)
-{
-       struct irq_affinity_notify *notify =
-               container_of(ref, struct irq_affinity_notify, kref);
-       struct qla_msix_entry *e =
-               container_of(notify, struct qla_msix_entry, irq_notify);
-       struct rsp_que *rsp = e->handle;
-       struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
-
-       ql_dbg(ql_dbg_init, base_vha, 0xffff,
-               "%s: host%ld: vector %d cpu %d\n", __func__,
-           base_vha->host_no, e->vector, e->cpuid);
-}
index 2819ceb..67f64db 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/delay.h>
 #include <linux/gfp.h>
 
-struct rom_cmd {
+static struct rom_cmd {
        uint16_t cmd;
 } rom_cmds[] = {
        { MBC_LOAD_RAM },
@@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                return QLA_FUNCTION_TIMEOUT;
        }
 
-        /* if PCI error, then avoid mbx processing.*/
-        if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
+       /* if PCI error, then avoid mbx processing.*/
+       if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
                ql_log(ql_log_warn, vha, 0x1191,
                    "PCI error, exiting.\n");
                return QLA_FUNCTION_TIMEOUT;
-        }
+       }
 
        reg = ha->iobase;
        io_lock_on = base_vha->flags.init_done;
@@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
                }
        } else {
 
-               uint16_t mb0;
-               uint32_t ictrl;
+               uint16_t mb[8];
+               uint32_t ictrl, host_status, hccr;
                uint16_t        w;
 
                if (IS_FWI2_CAPABLE(ha)) {
-                       mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
+                       mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
+                       mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
+                       mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
+                       mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
+                       mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
                        ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
+                       host_status = RD_REG_DWORD(&reg->isp24.host_status);
+                       hccr = RD_REG_DWORD(&reg->isp24.hccr);
+
+                       ql_log(ql_log_warn, vha, 0x1119,
+                           "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+                           "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
+                           command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
+                           mb[7], host_status, hccr);
+
                } else {
-                       mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
+                       mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
                        ictrl = RD_REG_WORD(&reg->isp.ictrl);
+                       ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
+                           "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+                           "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
                }
-               ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
-                   "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
-                   "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
                ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
 
                /* Capture FW dump only, if PCI device active */
@@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
        mbx_cmd_t       mc;
        mbx_cmd_t       *mcp = &mc;
        struct qla_hw_data *ha = vha->hw;
-       int configured_count;
 
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
            "Entered %s.\n", __func__);
@@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
                /*EMPTY*/
                ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
        } else {
-               configured_count = mcp->mb[11];
                ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
                    "Done %s.\n", __func__);
        }
index 54380b4..0a1723c 100644 (file)
@@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized;
        (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
        QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
 
+const int MD_MIU_TEST_AGT_RDDATA[] = {
+       0x410000A8, 0x410000AC,
+       0x410000B8, 0x410000BC
+};
+
 static void qla82xx_crb_addr_transform_setup(void)
 {
        qla82xx_crb_addr_transform(XDMA);
index 6201dce..77624ea 100644 (file)
@@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue {
 #define MD_MIU_TEST_AGT_ADDR_LO                0x41000094
 #define MD_MIU_TEST_AGT_ADDR_HI                0x41000098
 
-static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
-       0x410000B8, 0x410000BC };
+extern const int MD_MIU_TEST_AGT_RDDATA[4];
 
 #define CRB_NIU_XG_PAUSE_CTL_P0        0x1
 #define CRB_NIU_XG_PAUSE_CTL_P1        0x8
index 007192d..dc1ec9b 100644 (file)
 
 #define TIMEOUT_100_MS 100
 
+static const uint32_t qla8044_reg_tbl[] = {
+       QLA8044_PEG_HALT_STATUS1,
+       QLA8044_PEG_HALT_STATUS2,
+       QLA8044_PEG_ALIVE_COUNTER,
+       QLA8044_CRB_DRV_ACTIVE,
+       QLA8044_CRB_DEV_STATE,
+       QLA8044_CRB_DRV_STATE,
+       QLA8044_CRB_DRV_SCRATCH,
+       QLA8044_CRB_DEV_PART_INFO1,
+       QLA8044_CRB_IDC_VER_MAJOR,
+       QLA8044_FW_VER_MAJOR,
+       QLA8044_FW_VER_MINOR,
+       QLA8044_FW_VER_SUB,
+       QLA8044_CMDPEG_STATE,
+       QLA8044_ASIC_TEMP,
+};
+
 /* 8044 Flash Read/Write functions */
 uint32_t
 qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
index 02fe3c4..83c1b7e 100644 (file)
@@ -535,23 +535,6 @@ enum qla_regs {
 #define CRB_CMDPEG_CHECK_RETRY_COUNT    60
 #define CRB_CMDPEG_CHECK_DELAY          500
 
-static const uint32_t qla8044_reg_tbl[] = {
-       QLA8044_PEG_HALT_STATUS1,
-       QLA8044_PEG_HALT_STATUS2,
-       QLA8044_PEG_ALIVE_COUNTER,
-       QLA8044_CRB_DRV_ACTIVE,
-       QLA8044_CRB_DEV_STATE,
-       QLA8044_CRB_DRV_STATE,
-       QLA8044_CRB_DRV_SCRATCH,
-       QLA8044_CRB_DEV_PART_INFO1,
-       QLA8044_CRB_IDC_VER_MAJOR,
-       QLA8044_FW_VER_MAJOR,
-       QLA8044_FW_VER_MINOR,
-       QLA8044_FW_VER_SUB,
-       QLA8044_CMDPEG_STATE,
-       QLA8044_ASIC_TEMP,
-};
-
 /* MiniDump Structures */
 
 /* Driver_code is for driver to write some info about the entry
index 8521cfe..0a000ec 100644 (file)
@@ -466,7 +466,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
                        continue;
 
                rsp = ha->rsp_q_map[cnt];
-               clear_bit(cnt, ha->req_qid_map);
+               clear_bit(cnt, ha->rsp_qid_map);
                ha->rsp_q_map[cnt] =  NULL;
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
                qla2x00_free_rsp_que(ha, rsp);
@@ -3662,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
                                sizeof(struct ct6_dsd), 0,
                                SLAB_HWCACHE_ALIGN, NULL);
                        if (!ctx_cachep)
-                               goto fail_free_gid_list;
+                               goto fail_free_srb_mempool;
                }
                ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
                        ctx_cachep);
@@ -3815,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
        ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
            GFP_KERNEL);
        if (!ha->loop_id_map)
-               goto fail_async_pd;
+               goto fail_loop_id_map;
        else {
                qla2x00_set_reserved_loop_ids(ha);
                ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
@@ -3824,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
 
        return 0;
 
+fail_loop_id_map:
+       dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
 fail_async_pd:
        dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
 fail_ex_init_cb:
@@ -3851,6 +3853,10 @@ fail_free_ms_iocb:
        dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
        ha->ms_iocb = NULL;
        ha->ms_iocb_dma = 0;
+
+       if (ha->sns_cmd)
+               dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
+                   ha->sns_cmd, ha->sns_cmd_dma);
 fail_dma_pool:
        if (IS_QLA82XX(ha) || ql2xenabledif) {
                dma_pool_destroy(ha->fcp_cmnd_dma_pool);
@@ -3868,10 +3874,12 @@ fail_free_nvram:
        kfree(ha->nvram);
        ha->nvram = NULL;
 fail_free_ctx_mempool:
-       mempool_destroy(ha->ctx_mempool);
+       if (ha->ctx_mempool)
+               mempool_destroy(ha->ctx_mempool);
        ha->ctx_mempool = NULL;
 fail_free_srb_mempool:
-       mempool_destroy(ha->srb_mempool);
+       if (ha->srb_mempool)
+               mempool_destroy(ha->srb_mempool);
        ha->srb_mempool = NULL;
 fail_free_gid_list:
        dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
index bff9689..e4fda84 100644 (file)
@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
 {
        struct qla_hw_data *ha = vha->hw;
        struct qla_tgt_sess *sess = NULL;
-       uint32_t unpacked_lun, lun = 0;
        uint16_t loop_id;
        int res = 0;
        struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
-       struct atio_from_isp *a = (struct atio_from_isp *)iocb;
        unsigned long flags;
 
        loop_id = le16_to_cpu(n->u.isp24.nport_handle);
@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
            "loop_id %d)\n", vha->host_no, sess, sess->port_name,
            mcmd, loop_id);
 
-       lun = a->u.isp24.fcp_cmnd.lun;
-       unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
-
-       return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
-           iocb, QLA24XX_MGMT_SEND_NACK);
+       return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
 }
 
 /* ha->tgt.sess_lock supposed to be held on entry */
@@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
 
        pkt->entry_type = NOTIFY_ACK_TYPE;
        pkt->entry_count = 1;
-       pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+       pkt->handle = QLA_TGT_SKIP_HANDLE;
 
        nack = (struct nack_to_isp *)pkt;
        nack->ox_id = ntfy->ox_id;
@@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
 #if 0  /* Todo  */
                if (rc == -ENOMEM)
                        qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#else
+               if (rc) {
+               }
 #endif
                goto done;
        }
@@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
        if (!vha->flags.online)
                return;
 
-       while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+       while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
+           fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
                pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
                cnt = pkt->u.raw.entry_count;
 
-               qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
-                   ha_locked);
+               if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
+                       /*
+                        * This packet is corrupted. The header + payload
+                        * can not be trusted. There is no point in passing
+                        * it further up.
+                        */
+                       ql_log(ql_log_warn, vha, 0xffff,
+                           "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
+                           pkt->u.isp24.fcp_hdr.s_id,
+                           be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
+                           le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
+
+                       adjust_corrupted_atio(pkt);
+                       qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
+               } else {
+                       qlt_24xx_atio_pkt_all_vps(vha,
+                           (struct atio_from_isp *)pkt, ha_locked);
+               }
 
                for (i = 0; i < cnt; i++) {
                        ha->tgt.atio_ring_index++;
@@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
 
                /* Disable Full Login after LIP */
                nv->host_p &= cpu_to_le32(~BIT_10);
+
+               /*
+                * clear BIT 15 explicitly as we have seen at least
+                * a couple of instances where this was set and this
+                * was causing the firmware to not be initialized.
+                */
+               nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
                /* Enable target PRLI control */
                nv->firmware_options_2 |= cpu_to_le32(BIT_14);
        } else {
@@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
                return;
        }
 
-       /* out-of-order frames reassembly */
-       nv->firmware_options_3 |= BIT_6|BIT_9;
-
        if (ha->tgt.enable_class_2) {
                if (vha->flags.init_done)
                        fc_host_supported_classes(vha->host) =
@@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
                /* Disable ini mode, if requested */
                if (!qla_ini_mode_enabled(vha))
                        nv->firmware_options_1 |= cpu_to_le32(BIT_5);
-
                /* Disable Full Login after LIP */
                nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
                /* Enable initial LIP */
                nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
+               /*
+                * clear BIT 15 explicitly as we have seen at
+                * least a couple of instances where this was set
+                * and this was causing the firmware to not be
+                * initialized.
+                */
+               nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
                if (ql2xtgt_tape_enable)
                        /* Enable FC tape support */
                        nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
                return;
        }
 
-       /* out-of-order frames reassembly */
-       nv->firmware_options_3 |= BIT_6|BIT_9;
-
        if (ha->tgt.enable_class_2) {
                if (vha->flags.init_done)
                        fc_host_supported_classes(vha->host) =
index f26c5f6..0824a81 100644 (file)
@@ -427,13 +427,33 @@ struct atio_from_isp {
                struct {
                        uint8_t  entry_type;    /* Entry type. */
                        uint8_t  entry_count;   /* Entry count. */
-                       uint8_t  data[58];
+                       __le16   attr_n_length;
+#define FCP_CMD_LENGTH_MASK 0x0fff
+#define FCP_CMD_LENGTH_MIN  0x38
+                       uint8_t  data[56];
                        uint32_t signature;
 #define ATIO_PROCESSED 0xDEADDEAD              /* Signature */
                } raw;
        } u;
 } __packed;
 
+static inline int fcpcmd_is_corrupted(struct atio *atio)
+{
+       if (atio->entry_type == ATIO_TYPE7 &&
+           (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
+           FCP_CMD_LENGTH_MIN))
+               return 1;
+       else
+               return 0;
+}
+
+/* adjust corrupted atio so we won't trip over the same entry again. */
+static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
+{
+       atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN);
+       atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
+}
+
 #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
 
 /*
index 36935c9..8a58ef3 100644 (file)
@@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
                                count++;
                        }
                }
+       } else if (QLA_TGT_MODE_ENABLED() &&
+           ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
+               struct qla_hw_data *ha = vha->hw;
+               struct atio *atr = ha->tgt.atio_ring;
+
+               if (atr || !buf) {
+                       length = ha->tgt.atio_q_length;
+                       qla27xx_insert16(0, buf, len);
+                       qla27xx_insert16(length, buf, len);
+                       qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
+                       count++;
+               }
        } else {
                ql_dbg(ql_dbg_misc, vha, 0xd026,
                    "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
@@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
                                count++;
                        }
                }
+       } else if (QLA_TGT_MODE_ENABLED() &&
+           ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
+               struct qla_hw_data *ha = vha->hw;
+               struct atio *atr = ha->tgt.atio_ring_ptr;
+
+               if (atr || !buf) {
+                       qla27xx_insert16(0, buf, len);
+                       qla27xx_insert16(1, buf, len);
+                       qla27xx_insert32(ha->tgt.atio_q_in ?
+                           readl(ha->tgt.atio_q_in) : 0, buf, len);
+                       count++;
+               }
        } else {
                ql_dbg(ql_dbg_misc, vha, 0xd02f,
                    "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
index 6643f6f..d925910 100644 (file)
@@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item,
 {
        return sprintf(page,
            "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
-           UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+           UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
            utsname()->machine);
 }
 
@@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void)
        int ret;
 
        pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
-           UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+           UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
            utsname()->machine);
 
        ret = target_register_template(&tcm_qla2xxx_ops);
index 37e026a..cf8430b 100644 (file)
@@ -1,7 +1,6 @@
 #include <target/target_core_base.h>
 #include <linux/btree.h>
 
-#define TCM_QLA2XXX_VERSION    "v0.1"
 /* length of ASCII WWPNs including pad */
 #define TCM_QLA2XXX_NAMELEN    32
 /*
index 1fbb1ec..1f5d92a 100644 (file)
@@ -836,6 +836,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
        struct bio *bio = rq->bio;
        sector_t sector = blk_rq_pos(rq);
        unsigned int nr_sectors = blk_rq_sectors(rq);
+       unsigned int nr_bytes = blk_rq_bytes(rq);
        int ret;
 
        if (sdkp->device->no_write_same)
@@ -868,7 +869,21 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
 
        cmd->transfersize = sdp->sector_size;
        cmd->allowed = SD_MAX_RETRIES;
-       return scsi_init_io(cmd);
+
+       /*
+        * For WRITE SAME the data transferred via the DATA OUT buffer is
+        * different from the amount of data actually written to the target.
+        *
+        * We set up __data_len to the amount of data transferred via the
+        * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
+        * to transfer a single sector of data first, but then reset it to
+        * the amount of data to be written right after so that the I/O path
+        * knows how much to actually write.
+        */
+       rq->__data_len = sdp->sector_size;
+       ret = scsi_init_io(cmd);
+       rq->__data_len = nr_bytes;
+       return ret;
 }
 
 static int sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
@@ -2585,7 +2600,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
                if (sdp->broken_fua) {
                        sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
                        sdkp->DPOFUA = 0;
-               } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+               } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
+                          !sdkp->device->use_16_for_rw) {
                        sd_first_printk(KERN_NOTICE, sdkp,
                                  "Uses READ/WRITE(6), disabling FUA\n");
                        sdkp->DPOFUA = 0;
@@ -2768,13 +2784,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
                queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
        }
 
-       sdkp->zoned = (buffer[8] >> 4) & 3;
-       if (sdkp->zoned == 1)
-               q->limits.zoned = BLK_ZONED_HA;
-       else if (sdkp->device->type == TYPE_ZBC)
+       if (sdkp->device->type == TYPE_ZBC) {
+               /* Host-managed */
                q->limits.zoned = BLK_ZONED_HM;
-       else
-               q->limits.zoned = BLK_ZONED_NONE;
+       } else {
+               sdkp->zoned = (buffer[8] >> 4) & 3;
+               if (sdkp->zoned == 1)
+                       /* Host-aware */
+                       q->limits.zoned = BLK_ZONED_HA;
+               else
+                       /*
+                        * Treat drive-managed devices as
+                        * regular block devices.
+                        */
+                       q->limits.zoned = BLK_ZONED_NONE;
+       }
        if (blk_queue_is_zoned(q) && sdkp->first_scan)
                sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
                      q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
index 8c9a35c..50adabb 100644 (file)
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
 
        ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
 
-       if (scsi_is_sas_rphy(&sdev->sdev_gendev))
+       if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
                efd.addr = sas_get_address(sdev);
 
        if (efd.addr) {
index 8823cc8..5bb3760 100644 (file)
@@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
 
        if (IS_ERR(task)) {
                dev_err(dev, "can't create rproc_boot thread\n");
+               ret = PTR_ERR(task);
                goto err_put_rproc;
        }
 
index ec4aa25..2922a99 100644 (file)
@@ -378,6 +378,7 @@ config SPI_FSL_SPI
 config SPI_FSL_DSPI
        tristate "Freescale DSPI controller"
        select REGMAP_MMIO
+       depends on HAS_DMA
        depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
        help
          This enables support for the Freescale DSPI controller in master
index e89da0a..0314c6b 100644 (file)
@@ -800,7 +800,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
        struct spi_master *master;
        struct a3700_spi *spi;
        u32 num_cs = 0;
-       int ret = 0;
+       int irq, ret = 0;
 
        master = spi_alloc_master(dev, sizeof(*spi));
        if (!master) {
@@ -825,7 +825,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
        master->unprepare_message = a3700_spi_unprepare_message;
        master->set_cs = a3700_spi_set_cs;
        master->flags = SPI_MASTER_HALF_DUPLEX;
-       master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL |
+       master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
                              SPI_RX_QUAD | SPI_TX_QUAD);
 
        platform_set_drvdata(pdev, master);
@@ -846,12 +846,13 @@ static int a3700_spi_probe(struct platform_device *pdev)
                goto error;
        }
 
-       spi->irq = platform_get_irq(pdev, 0);
-       if (spi->irq < 0) {
-               dev_err(dev, "could not get irq: %d\n", spi->irq);
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(dev, "could not get irq: %d\n", irq);
                ret = -ENXIO;
                goto error;
        }
+       spi->irq = irq;
 
        init_completion(&spi->done);
 
index 319225d..6ab4c77 100644 (file)
@@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev)
                        SPI_ENGINE_VERSION_MAJOR(version),
                        SPI_ENGINE_VERSION_MINOR(version),
                        SPI_ENGINE_VERSION_PATCH(version));
-               return -ENODEV;
+               ret = -ENODEV;
+               goto err_put_master;
        }
 
        spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
index d36c11b..02fb967 100644 (file)
@@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
                        buf = t->rx_buf;
                t->rx_dma = dma_map_single(&spi->dev, buf,
                                t->len, DMA_FROM_DEVICE);
-               if (!t->rx_dma) {
+               if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
                        ret = -EFAULT;
                        goto err_rx_map;
                }
@@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
                        buf = (void *)t->tx_buf;
                t->tx_dma = dma_map_single(&spi->dev, buf,
                                t->len, DMA_TO_DEVICE);
-               if (!t->tx_dma) {
+               if (dma_mapping_error(&spi->dev, t->tx_dma)) {
                        ret = -EFAULT;
                        goto err_tx_map;
                }
index e31971f..837cb8d 100644 (file)
@@ -274,11 +274,11 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
 static void mid_spi_dma_stop(struct dw_spi *dws)
 {
        if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
-               dmaengine_terminate_all(dws->txchan);
+               dmaengine_terminate_sync(dws->txchan);
                clear_bit(TX_BUSY, &dws->dma_chan_busy);
        }
        if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
-               dmaengine_terminate_all(dws->rxchan);
+               dmaengine_terminate_sync(dws->rxchan);
                clear_bit(RX_BUSY, &dws->dma_chan_busy);
        }
 }
index b715a26..054012f 100644 (file)
@@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = {
 
 static int dw_spi_debugfs_init(struct dw_spi *dws)
 {
-       dws->debugfs = debugfs_create_dir("dw_spi", NULL);
+       char name[128];
+
+       snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
+       dws->debugfs = debugfs_create_dir(name, NULL);
        if (!dws->debugfs)
                return -ENOMEM;
 
index dd7b5b4..d6239fa 100644 (file)
@@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
                pxa2xx_spi_write(drv_data, SSCR1, tmp);
                tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
                pxa2xx_spi_write(drv_data, SSCR0, tmp);
+               break;
        default:
                tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
                      SSCR1_TxTresh(TX_THRESH_DFLT);
index 0012ad0..1f00eeb 100644 (file)
@@ -973,14 +973,16 @@ static const struct sh_msiof_chipdata r8a779x_data = {
 };
 
 static const struct of_device_id sh_msiof_match[] = {
-       { .compatible = "renesas,sh-msiof",        .data = &sh_data },
        { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
        { .compatible = "renesas,msiof-r8a7790",   .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7791",   .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7792",   .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7793",   .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7794",   .data = &r8a779x_data },
+       { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data },
        { .compatible = "renesas,msiof-r8a7796",   .data = &r8a779x_data },
+       { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data },
+       { .compatible = "renesas,sh-msiof",        .data = &sh_data }, /* Deprecated */
        {},
 };
 MODULE_DEVICE_TABLE(of, sh_msiof_match);
index b811b0f..4c77965 100644 (file)
@@ -118,12 +118,12 @@ struct rockchip_tsadc_chip {
        void (*control)(void __iomem *reg, bool on);
 
        /* Per-sensor methods */
-       int (*get_temp)(struct chip_tsadc_table table,
+       int (*get_temp)(const struct chip_tsadc_table *table,
                        int chn, void __iomem *reg, int *temp);
-       void (*set_alarm_temp)(struct chip_tsadc_table table,
-                              int chn, void __iomem *reg, int temp);
-       void (*set_tshut_temp)(struct chip_tsadc_table table,
-                              int chn, void __iomem *reg, int temp);
+       int (*set_alarm_temp)(const struct chip_tsadc_table *table,
+                             int chn, void __iomem *reg, int temp);
+       int (*set_tshut_temp)(const struct chip_tsadc_table *table,
+                             int chn, void __iomem *reg, int temp);
        void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
 
        /* Per-table methods */
@@ -317,6 +317,7 @@ static const struct tsadc_table rk3288_code_table[] = {
        {3452, 115000},
        {3437, 120000},
        {3421, 125000},
+       {0, 125000},
 };
 
 static const struct tsadc_table rk3368_code_table[] = {
@@ -397,59 +398,80 @@ static const struct tsadc_table rk3399_code_table[] = {
        {TSADCV3_DATA_MASK, 125000},
 };
 
-static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
+static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table,
                                   int temp)
 {
        int high, low, mid;
-       u32 error = 0;
+       unsigned long num;
+       unsigned int denom;
+       u32 error = table->data_mask;
 
        low = 0;
-       high = table.length - 1;
+       high = (table->length - 1) - 1; /* ignore the last check for table */
        mid = (high + low) / 2;
 
        /* Return mask code data when the temp is over table range */
-       if (temp < table.id[low].temp || temp > table.id[high].temp) {
-               error = table.data_mask;
+       if (temp < table->id[low].temp || temp > table->id[high].temp)
                goto exit;
-       }
 
        while (low <= high) {
-               if (temp == table.id[mid].temp)
-                       return table.id[mid].code;
-               else if (temp < table.id[mid].temp)
+               if (temp == table->id[mid].temp)
+                       return table->id[mid].code;
+               else if (temp < table->id[mid].temp)
                        high = mid - 1;
                else
                        low = mid + 1;
                mid = (low + high) / 2;
        }
 
+       /*
+        * The conversion code granularity provided by the table. Let's
+        * assume that the relationship between temperature and
+        * analog value between 2 table entries is linear and interpolate
+        * to produce less granular result.
+        */
+       num = abs(table->id[mid + 1].code - table->id[mid].code);
+       num *= temp - table->id[mid].temp;
+       denom = table->id[mid + 1].temp - table->id[mid].temp;
+
+       switch (table->mode) {
+       case ADC_DECREMENT:
+               return table->id[mid].code - (num / denom);
+       case ADC_INCREMENT:
+               return table->id[mid].code + (num / denom);
+       default:
+               pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
+               return error;
+       }
+
 exit:
-       pr_err("Invalid the conversion, error=%d\n", error);
+       pr_err("%s: invalid temperature, temp=%d error=%d\n",
+              __func__, temp, error);
        return error;
 }
 
-static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
-                                  int *temp)
+static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
+                                  u32 code, int *temp)
 {
        unsigned int low = 1;
-       unsigned int high = table.length - 1;
+       unsigned int high = table->length - 1;
        unsigned int mid = (low + high) / 2;
        unsigned int num;
        unsigned long denom;
 
-       WARN_ON(table.length < 2);
+       WARN_ON(table->length < 2);
 
-       switch (table.mode) {
+       switch (table->mode) {
        case ADC_DECREMENT:
-               code &= table.data_mask;
-               if (code < table.id[high].code)
+               code &= table->data_mask;
+               if (code <= table->id[high].code)
                        return -EAGAIN;         /* Incorrect reading */
 
                while (low <= high) {
-                       if (code >= table.id[mid].code &&
-                           code < table.id[mid - 1].code)
+                       if (code >= table->id[mid].code &&
+                           code < table->id[mid - 1].code)
                                break;
-                       else if (code < table.id[mid].code)
+                       else if (code < table->id[mid].code)
                                low = mid + 1;
                        else
                                high = mid - 1;
@@ -458,15 +480,15 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
                }
                break;
        case ADC_INCREMENT:
-               code &= table.data_mask;
-               if (code < table.id[low].code)
+               code &= table->data_mask;
+               if (code < table->id[low].code)
                        return -EAGAIN;         /* Incorrect reading */
 
                while (low <= high) {
-                       if (code <= table.id[mid].code &&
-                           code > table.id[mid - 1].code)
+                       if (code <= table->id[mid].code &&
+                           code > table->id[mid - 1].code)
                                break;
-                       else if (code > table.id[mid].code)
+                       else if (code > table->id[mid].code)
                                low = mid + 1;
                        else
                                high = mid - 1;
@@ -475,7 +497,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
                }
                break;
        default:
-               pr_err("Invalid the conversion table\n");
+               pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
+               return -EINVAL;
        }
 
        /*
@@ -484,10 +507,10 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
         * temperature between 2 table entries is linear and interpolate
         * to produce less granular result.
         */
-       num = table.id[mid].temp - table.id[mid - 1].temp;
-       num *= abs(table.id[mid - 1].code - code);
-       denom = abs(table.id[mid - 1].code - table.id[mid].code);
-       *temp = table.id[mid - 1].temp + (num / denom);
+       num = table->id[mid].temp - table->id[mid - 1].temp;
+       num *= abs(table->id[mid - 1].code - code);
+       denom = abs(table->id[mid - 1].code - table->id[mid].code);
+       *temp = table->id[mid - 1].temp + (num / denom);
 
        return 0;
 }
@@ -638,7 +661,7 @@ static void rk_tsadcv3_control(void __iomem *regs, bool enable)
        writel_relaxed(val, regs + TSADCV2_AUTO_CON);
 }
 
-static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
+static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table,
                               int chn, void __iomem *regs, int *temp)
 {
        u32 val;
@@ -648,39 +671,57 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
        return rk_tsadcv2_code_to_temp(table, val, temp);
 }
 
-static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table,
-                                 int chn, void __iomem *regs, int temp)
+static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table,
+                                int chn, void __iomem *regs, int temp)
 {
-       u32 alarm_value, int_en;
+       u32 alarm_value;
+       u32 int_en, int_clr;
+
+       /*
+        * In some cases, some sensors didn't need the trip points, the
+        * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm
+        * in the end, ignore this case and disable the high temperature
+        * interrupt.
+        */
+       if (temp == INT_MAX) {
+               int_clr = readl_relaxed(regs + TSADCV2_INT_EN);
+               int_clr &= ~TSADCV2_INT_SRC_EN(chn);
+               writel_relaxed(int_clr, regs + TSADCV2_INT_EN);
+               return 0;
+       }
 
        /* Make sure the value is valid */
        alarm_value = rk_tsadcv2_temp_to_code(table, temp);
-       if (alarm_value == table.data_mask)
-               return;
+       if (alarm_value == table->data_mask)
+               return -ERANGE;
 
-       writel_relaxed(alarm_value & table.data_mask,
+       writel_relaxed(alarm_value & table->data_mask,
                       regs + TSADCV2_COMP_INT(chn));
 
        int_en = readl_relaxed(regs + TSADCV2_INT_EN);
        int_en |= TSADCV2_INT_SRC_EN(chn);
        writel_relaxed(int_en, regs + TSADCV2_INT_EN);
+
+       return 0;
 }
 
-static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table,
-                                 int chn, void __iomem *regs, int temp)
+static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table,
+                                int chn, void __iomem *regs, int temp)
 {
        u32 tshut_value, val;
 
        /* Make sure the value is valid */
        tshut_value = rk_tsadcv2_temp_to_code(table, temp);
-       if (tshut_value == table.data_mask)
-               return;
+       if (tshut_value == table->data_mask)
+               return -ERANGE;
 
        writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
 
        /* TSHUT will be valid */
        val = readl_relaxed(regs + TSADCV2_AUTO_CON);
        writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON);
+
+       return 0;
 }
 
 static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
@@ -883,10 +924,8 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
        dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n",
                __func__, sensor->id, low, high);
 
-       tsadc->set_alarm_temp(tsadc->table,
-                             sensor->id, thermal->regs, high);
-
-       return 0;
+       return tsadc->set_alarm_temp(&tsadc->table,
+                                    sensor->id, thermal->regs, high);
 }
 
 static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
@@ -896,7 +935,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
        const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
        int retval;
 
-       retval = tsadc->get_temp(tsadc->table,
+       retval = tsadc->get_temp(&tsadc->table,
                                 sensor->id, thermal->regs, out_temp);
        dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
                sensor->id, *out_temp, retval);
@@ -982,8 +1021,12 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
        int error;
 
        tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
-       tsadc->set_tshut_temp(tsadc->table, id, thermal->regs,
+
+       error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs,
                              thermal->tshut_temp);
+       if (error)
+               dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
+                       __func__, thermal->tshut_temp, error);
 
        sensor->thermal = thermal;
        sensor->id = id;
@@ -1196,9 +1239,13 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
 
                thermal->chip->set_tshut_mode(id, thermal->regs,
                                              thermal->tshut_mode);
-               thermal->chip->set_tshut_temp(thermal->chip->table,
+
+               error = thermal->chip->set_tshut_temp(&thermal->chip->table,
                                              id, thermal->regs,
                                              thermal->tshut_temp);
+               if (error)
+                       dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
+                               __func__, thermal->tshut_temp, error);
        }
 
        thermal->chip->control(thermal->regs, true);
index 641faab..6555913 100644 (file)
@@ -799,6 +799,11 @@ static void thermal_release(struct device *dev)
        if (!strncmp(dev_name(dev), "thermal_zone",
                     sizeof("thermal_zone") - 1)) {
                tz = to_thermal_zone(dev);
+               kfree(tz->trip_type_attrs);
+               kfree(tz->trip_temp_attrs);
+               kfree(tz->trip_hyst_attrs);
+               kfree(tz->trips_attribute_group.attrs);
+               kfree(tz->device.groups);
                kfree(tz);
        } else if (!strncmp(dev_name(dev), "cooling_device",
                            sizeof("cooling_device") - 1)) {
@@ -1305,10 +1310,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
 
        thermal_zone_device_set_polling(tz, 0);
 
-       kfree(tz->trip_type_attrs);
-       kfree(tz->trip_temp_attrs);
-       kfree(tz->trip_hyst_attrs);
-       kfree(tz->trips_attribute_group.attrs);
        thermal_set_governor(tz, NULL);
 
        thermal_remove_hwmon_sysfs(tz);
@@ -1316,7 +1317,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
        idr_destroy(&tz->idr);
        mutex_destroy(&tz->lock);
        device_unregister(&tz->device);
-       kfree(tz->device.groups);
 }
 EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
 
index 9548d3e..302b8f5 100644 (file)
@@ -513,8 +513,8 @@ struct dwc2_core_params {
        /* Gadget parameters */
        bool g_dma;
        bool g_dma_desc;
-       u16 g_rx_fifo_size;
-       u16 g_np_tx_fifo_size;
+       u32 g_rx_fifo_size;
+       u32 g_np_tx_fifo_size;
        u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
 };
 
index c55db4a..77c5fcf 100644 (file)
@@ -3169,7 +3169,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
        /* keep other bits untouched (so e.g. forced modes are not lost) */
        usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
        usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
-               GUSBCFG_HNPCAP);
+               GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
 
        if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
            (hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
@@ -3749,8 +3749,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
                __func__, epctrl, epctrl_reg);
 
        /* Allocate DMA descriptor chain for non-ctrl endpoints */
-       if (using_desc_dma(hsotg)) {
-               hs_ep->desc_list = dma_alloc_coherent(hsotg->dev,
+       if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
+               hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
                        MAX_DMA_DESC_NUM_GENERIC *
                        sizeof(struct dwc2_dma_desc),
                        &hs_ep->desc_list_dma, GFP_ATOMIC);
@@ -3872,7 +3872,7 @@ error1:
 
 error2:
        if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
-               dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+               dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
                        sizeof(struct dwc2_dma_desc),
                        hs_ep->desc_list, hs_ep->desc_list_dma);
                hs_ep->desc_list = NULL;
@@ -3902,14 +3902,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
                return -EINVAL;
        }
 
-       /* Remove DMA memory allocated for non-control Endpoints */
-       if (using_desc_dma(hsotg)) {
-               dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
-                                 sizeof(struct dwc2_dma_desc),
-                                 hs_ep->desc_list, hs_ep->desc_list_dma);
-               hs_ep->desc_list = NULL;
-       }
-
        epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
 
        spin_lock_irqsave(&hsotg->lock, flags);
@@ -4131,7 +4123,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
        /* keep other bits untouched (so e.g. forced modes are not lost) */
        usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
        usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
-               GUSBCFG_HNPCAP);
+               GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
 
        /* set the PLL on, remove the HNP/SRP and set the PHY */
        trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
index 911c3b3..46d0ad5 100644 (file)
@@ -4367,6 +4367,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
        if (!HCD_HW_ACCESSIBLE(hcd))
                goto unlock;
 
+       if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
+               goto unlock;
+
        if (!hsotg->params.hibernation)
                goto skip_power_saving;
 
@@ -4489,8 +4492,8 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
 {
 #ifdef VERBOSE_DEBUG
        struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
-       char *pipetype;
-       char *speed;
+       char *pipetype = NULL;
+       char *speed = NULL;
 
        dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
        dev_vdbg(hsotg->dev, "  Device address: %d\n",
index 11fe68a..bcd1e19 100644 (file)
@@ -385,16 +385,16 @@ static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param,
 }
 
 /**
- * dwc2_set_param_u16() - Set a u16 parameter
+ * dwc2_set_param_u32() - Set a u32 parameter
  *
  * See dwc2_set_param().
  */
-static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param,
+static void dwc2_set_param_u32(struct dwc2_hsotg *hsotg, u32 *param,
                               bool lookup, char *property, u16 legacy,
                               u16 def, u16 min, u16 max)
 {
        dwc2_set_param(hsotg, param, lookup, property,
-                      legacy, def, min, max, 2);
+                      legacy, def, min, max, 4);
 }
 
 /**
@@ -1178,12 +1178,12 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
                 * auto-detect if the hardware does not support the
                 * default.
                 */
-               dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size,
+               dwc2_set_param_u32(hsotg, &p->g_rx_fifo_size,
                                   true, "g-rx-fifo-size", 2048,
                                   hw->rx_fifo_size,
                                   16, hw->rx_fifo_size);
 
-               dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size,
+               dwc2_set_param_u32(hsotg, &p->g_np_tx_fifo_size,
                                   true, "g-np-tx-fifo-size", 1024,
                                   hw->dev_nperio_tx_fifo_size,
                                   16, hw->dev_nperio_tx_fifo_size);
index e27899b..e956306 100644 (file)
@@ -138,7 +138,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
                exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
                if (IS_ERR(exynos->axius_clk)) {
                        dev_err(dev, "no AXI UpScaler clk specified\n");
-                       return -ENODEV;
+                       ret = -ENODEV;
+                       goto axius_clk_err;
                }
                clk_prepare_enable(exynos->axius_clk);
        } else {
@@ -196,6 +197,7 @@ err3:
        regulator_disable(exynos->vdd33);
 err2:
        clk_disable_unprepare(exynos->axius_clk);
+axius_clk_err:
        clk_disable_unprepare(exynos->susp_clk);
        clk_disable_unprepare(exynos->clk);
        return ret;
index 002822d..49d685a 100644 (file)
@@ -2147,7 +2147,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
        cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
        if (!cdev->os_desc_req->buf) {
                ret = -ENOMEM;
-               kfree(cdev->os_desc_req);
+               usb_ep_free_request(ep0, cdev->os_desc_req);
                goto end;
        }
        cdev->os_desc_req->context = cdev;
index 5e746ad..5490fc5 100644 (file)
@@ -1806,7 +1806,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
        unsigned long flags;
 
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
-       do {
+       while (count--) {
                /* pending requests get nuked */
                if (likely(ep->ep))
                        usb_ep_disable(ep->ep);
@@ -1817,7 +1817,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
                        __ffs_epfile_read_buffer_free(epfile);
                        ++epfile;
                }
-       } while (--count);
+       }
        spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
 }
 
@@ -1831,7 +1831,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
        int ret = 0;
 
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
-       do {
+       while(count--) {
                struct usb_endpoint_descriptor *ds;
                int desc_idx;
 
@@ -1867,7 +1867,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
 
                ++ep;
                ++epfile;
-       } while (--count);
+       }
        spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
 
        return ret;
@@ -3448,12 +3448,12 @@ static void ffs_func_unbind(struct usb_configuration *c,
 
        /* cleanup after autoconfig */
        spin_lock_irqsave(&func->ffs->eps_lock, flags);
-       do {
+       while (count--) {
                if (ep->ep && ep->req)
                        usb_ep_free_request(ep->ep, ep->req);
                ep->req = NULL;
                ++ep;
-       } while (--count);
+       }
        spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
        kfree(func->eps);
        func->eps = NULL;
index f3212db..12c7687 100644 (file)
@@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
                        dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
                        goto err;
                }
-               ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
+               sprintf(ep->name, "ep%d", ep->index);
+               ep->ep.name = ep->name;
 
                ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
                ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
index 3e1c9d5..b03b2eb 100644 (file)
@@ -280,6 +280,7 @@ struct usba_ep {
        void __iomem                            *ep_regs;
        void __iomem                            *dma_regs;
        void __iomem                            *fifo;
+       char                                    name[8];
        struct usb_ep                           ep;
        struct usba_udc                         *udc;
 
index ddfab30..e5834dd 100644 (file)
@@ -165,7 +165,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
                return -ENODEV;
 
        /* Try to set 64-bit DMA first */
-       if (WARN_ON(!pdev->dev.dma_mask))
+       if (!pdev->dev.dma_mask)
                /* Platform did not initialize dma_mask */
                ret = dma_coerce_mask_and_coherent(&pdev->dev,
                                                   DMA_BIT_MASK(64));
index c882357..128d102 100644 (file)
@@ -1270,6 +1270,10 @@ static int tce_iommu_attach_group(void *iommu_data,
        /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
                        iommu_group_id(iommu_group), iommu_group); */
        table_group = iommu_group_get_iommudata(iommu_group);
+       if (!table_group) {
+               ret = -ENODEV;
+               goto unlock_exit;
+       }
 
        if (tce_groups_attached(container) && (!table_group->ops ||
                        !table_group->ops->take_ownership ||
index 253310c..fd6c8b6 100644 (file)
@@ -843,7 +843,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
        struct iov_iter out_iter, in_iter, prot_iter, data_iter;
        u64 tag;
        u32 exp_data_len, data_direction;
-       unsigned out, in;
+       unsigned int out = 0, in = 0;
        int head, ret, prot_bytes;
        size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
        size_t out_size, in_size;
@@ -2087,7 +2087,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
        NULL,
 };
 
-static struct target_core_fabric_ops vhost_scsi_ops = {
+static const struct target_core_fabric_ops vhost_scsi_ops = {
        .module                         = THIS_MODULE,
        .name                           = "vhost",
        .get_fabric_name                = vhost_scsi_get_fabric_name,
index bbbf588..ce5e63d 100644 (file)
@@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
 
 static int vhost_vsock_start(struct vhost_vsock *vsock)
 {
+       struct vhost_virtqueue *vq;
        size_t i;
        int ret;
 
@@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
                goto err;
 
        for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-               struct vhost_virtqueue *vq = &vsock->vqs[i];
+               vq = &vsock->vqs[i];
 
                mutex_lock(&vq->mutex);
 
                if (!vhost_vq_access_ok(vq)) {
                        ret = -EFAULT;
-                       mutex_unlock(&vq->mutex);
                        goto err_vq;
                }
 
                if (!vq->private_data) {
                        vq->private_data = vsock;
-                       vhost_vq_init_access(vq);
+                       ret = vhost_vq_init_access(vq);
+                       if (ret)
+                               goto err_vq;
                }
 
                mutex_unlock(&vq->mutex);
@@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
        return 0;
 
 err_vq:
+       vq->private_data = NULL;
+       mutex_unlock(&vq->mutex);
+
        for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
-               struct vhost_virtqueue *vq = &vsock->vqs[i];
+               vq = &vsock->vqs[i];
 
                mutex_lock(&vq->mutex);
                vq->private_data = NULL;
index f89245b..68a1135 100644 (file)
@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
 
 int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
 {
-       int tooff = 0, fromoff = 0;
-       int size;
+       unsigned int tooff = 0, fromoff = 0;
+       size_t size;
 
        if (to->start > from->start)
                fromoff = to->start - from->start;
        else
                tooff = from->start - to->start;
-       size = to->len - tooff;
-       if (size > (int) (from->len - fromoff))
-               size = from->len - fromoff;
-       if (size <= 0)
+       if (fromoff >= from->len || tooff >= to->len)
+               return -EINVAL;
+
+       size = min_t(size_t, to->len - tooff, from->len - fromoff);
+       if (size == 0)
                return -EINVAL;
        size *= sizeof(u16);
 
@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
 
 int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
 {
-       int tooff = 0, fromoff = 0;
-       int size;
+       unsigned int tooff = 0, fromoff = 0;
+       size_t size;
 
        if (to->start > from->start)
                fromoff = to->start - from->start;
        else
                tooff = from->start - to->start;
-       size = to->len - tooff;
-       if (size > (int) (from->len - fromoff))
-               size = from->len - fromoff;
-       if (size <= 0)
+       if (fromoff >= from->len || tooff >= to->len)
+               return -EINVAL;
+
+       size = min_t(size_t, to->len - tooff, from->len - fromoff);
+       if (size == 0)
                return -EINVAL;
        size *= sizeof(u16);
 
index d47a2fc..c71fde5 100644 (file)
@@ -59,6 +59,7 @@
 #define pr_fmt(fmt) "virtio-mmio: " fmt
 
 #include <linux/acpi.h>
+#include <linux/dma-mapping.h>
 #include <linux/highmem.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
        struct virtio_mmio_device *vm_dev;
        struct resource *mem;
        unsigned long magic;
+       int rc;
 
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!mem)
@@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev)
        }
        vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
 
-       if (vm_dev->version == 1)
+       if (vm_dev->version == 1) {
                writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
 
+               rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+               /*
+                * In the legacy case, ensure our coherently-allocated virtio
+                * ring will be at an address expressable as a 32-bit PFN.
+                */
+               if (!rc)
+                       dma_set_coherent_mask(&pdev->dev,
+                                             DMA_BIT_MASK(32 + PAGE_SHIFT));
+       } else {
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       }
+       if (rc)
+               rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+       if (rc)
+               dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.\n");
+
        platform_set_drvdata(pdev, vm_dev);
 
        return register_virtio_device(&vm_dev->vdev);
index 409aeaa..7e38ed7 100644 (file)
@@ -159,6 +159,13 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
        if (xen_domain())
                return true;
 
+       /*
+        * On ARM-based machines, the DMA ops will do the right thing,
+        * so always use them with legacy devices.
+        */
+       if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
+               return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
+
        return false;
 }
 
index 112ce42..2a165cc 100644 (file)
@@ -42,6 +42,7 @@
 static unsigned long platform_mmio;
 static unsigned long platform_mmio_alloc;
 static unsigned long platform_mmiolen;
+static uint64_t callback_via;
 
 static unsigned long alloc_xen_mmio(unsigned long len)
 {
@@ -54,6 +55,51 @@ static unsigned long alloc_xen_mmio(unsigned long len)
        return addr;
 }
 
+static uint64_t get_callback_via(struct pci_dev *pdev)
+{
+       u8 pin;
+       int irq;
+
+       irq = pdev->irq;
+       if (irq < 16)
+               return irq; /* ISA IRQ */
+
+       pin = pdev->pin;
+
+       /* We don't know the GSI. Specify the PCI INTx line instead. */
+       return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
+               ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
+               ((uint64_t)pdev->bus->number << 16) |
+               ((uint64_t)(pdev->devfn & 0xff) << 8) |
+               ((uint64_t)(pin - 1) & 3);
+}
+
+static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
+{
+       xen_hvm_evtchn_do_upcall();
+       return IRQ_HANDLED;
+}
+
+static int xen_allocate_irq(struct pci_dev *pdev)
+{
+       return request_irq(pdev->irq, do_hvm_evtchn_intr,
+                       IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+                       "xen-platform-pci", pdev);
+}
+
+static int platform_pci_resume(struct pci_dev *pdev)
+{
+       int err;
+       if (!xen_pv_domain())
+               return 0;
+       err = xen_set_callback_via(callback_via);
+       if (err) {
+               dev_err(&pdev->dev, "platform_pci_resume failure!\n");
+               return err;
+       }
+       return 0;
+}
+
 static int platform_pci_probe(struct pci_dev *pdev,
                              const struct pci_device_id *ent)
 {
@@ -92,6 +138,28 @@ static int platform_pci_probe(struct pci_dev *pdev,
        platform_mmio = mmio_addr;
        platform_mmiolen = mmio_len;
 
+       /* 
+        * Xen HVM guests always use the vector callback mechanism.
+        * L1 Dom0 in a nested Xen environment is a PV guest inside in an
+        * HVM environment. It needs the platform-pci driver to get
+        * notifications from L0 Xen, but it cannot use the vector callback
+        * as it is not exported by L1 Xen.
+        */
+       if (xen_pv_domain()) {
+               ret = xen_allocate_irq(pdev);
+               if (ret) {
+                       dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
+                       goto out;
+               }
+               callback_via = get_callback_via(pdev);
+               ret = xen_set_callback_via(callback_via);
+               if (ret) {
+                       dev_warn(&pdev->dev, "Unable to set the evtchn callback "
+                                        "err=%d\n", ret);
+                       goto out;
+               }
+       }
+
        max_nr_gframes = gnttab_max_grant_frames();
        grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
        ret = gnttab_setup_auto_xlat_frames(grant_frames);
@@ -123,6 +191,9 @@ static struct pci_driver platform_driver = {
        .name =           DRV_NAME,
        .probe =          platform_pci_probe,
        .id_table =       platform_pci_tbl,
+#ifdef CONFIG_PM
+       .resume_early =   platform_pci_resume,
+#endif
 };
 
 builtin_pci_driver(platform_driver);
index f905d6e..f8afc6d 100644 (file)
@@ -414,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
        if (map == SWIOTLB_MAP_ERROR)
                return DMA_ERROR_CODE;
 
+       dev_addr = xen_phys_to_bus(map);
        xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
                                        dev_addr, map & ~PAGE_MASK, size, dir, attrs);
-       dev_addr = xen_phys_to_bus(map);
 
        /*
         * Ensure that the address returned is DMA'ble
@@ -575,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
                                sg_dma_len(sgl) = 0;
                                return 0;
                        }
+                       dev_addr = xen_phys_to_bus(map);
                        xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
                                                dev_addr,
                                                map & ~PAGE_MASK,
                                                sg->length,
                                                dir,
                                                attrs);
-                       sg->dma_address = xen_phys_to_bus(map);
+                       sg->dma_address = dev_addr;
                } else {
                        /* we are not interested in the dma_addr returned by
                         * xen_dma_map_page, only in the potential cache flushes executed
index c2a377c..83eab52 100644 (file)
@@ -38,6 +38,7 @@ config FS_DAX
        bool "Direct Access (DAX) support"
        depends on MMU
        depends on !(ARM || MIPS || SPARC)
+       select FS_IOMAP
        help
          Direct Access (DAX) can be used on memory-backed block devices.
          If the block device supports DAX and the filesystem supports DAX,
index 5db5d13..3c47614 100644 (file)
@@ -331,7 +331,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        struct blk_plug plug;
        struct blkdev_dio *dio;
        struct bio *bio;
-       bool is_read = (iov_iter_rw(iter) == READ);
+       bool is_read = (iov_iter_rw(iter) == READ), is_sync;
        loff_t pos = iocb->ki_pos;
        blk_qc_t qc = BLK_QC_T_NONE;
        int ret;
@@ -344,7 +344,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        bio_get(bio); /* extra ref for the completion handler */
 
        dio = container_of(bio, struct blkdev_dio, bio);
-       dio->is_sync = is_sync_kiocb(iocb);
+       dio->is_sync = is_sync = is_sync_kiocb(iocb);
        if (dio->is_sync)
                dio->waiter = current;
        else
@@ -398,7 +398,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        }
        blk_finish_plug(&plug);
 
-       if (!dio->is_sync)
+       if (!is_sync)
                return -EIOCBQUEUED;
 
        for (;;) {
index 4e02426..1e861a0 100644 (file)
@@ -3835,10 +3835,7 @@ cache_acl:
                break;
        case S_IFDIR:
                inode->i_fop = &btrfs_dir_file_operations;
-               if (root == fs_info->tree_root)
-                       inode->i_op = &btrfs_dir_ro_inode_operations;
-               else
-                       inode->i_op = &btrfs_dir_inode_operations;
+               inode->i_op = &btrfs_dir_inode_operations;
                break;
        case S_IFLNK:
                inode->i_op = &btrfs_symlink_inode_operations;
@@ -4505,8 +4502,19 @@ search_again:
                if (found_type > min_type) {
                        del_item = 1;
                } else {
-                       if (item_end < new_size)
+                       if (item_end < new_size) {
+                               /*
+                                * With NO_HOLES mode, for the following mapping
+                                *
+                                * [0-4k][hole][8k-12k]
+                                *
+                                * if truncating isize down to 6k, it ends up
+                                * isize being 8k.
+                                */
+                               if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
+                                       last_size = new_size;
                                break;
+                       }
                        if (found_key.offset >= new_size)
                                del_item = 1;
                        else
@@ -5710,6 +5718,7 @@ static struct inode *new_simple_dir(struct super_block *s,
 
        inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
        inode->i_op = &btrfs_dir_ro_inode_operations;
+       inode->i_opflags &= ~IOP_XATTR;
        inode->i_fop = &simple_dir_operations;
        inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
        inode->i_mtime = current_time(inode);
@@ -7215,7 +7224,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
        struct extent_map *em = NULL;
        int ret;
 
-       down_read(&BTRFS_I(inode)->dio_sem);
        if (type != BTRFS_ORDERED_NOCOW) {
                em = create_pinned_em(inode, start, len, orig_start,
                                      block_start, block_len, orig_block_len,
@@ -7234,7 +7242,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
                em = ERR_PTR(ret);
        }
  out:
-       up_read(&BTRFS_I(inode)->dio_sem);
 
        return em;
 }
@@ -8692,6 +8699,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                dio_data.unsubmitted_oe_range_start = (u64)offset;
                dio_data.unsubmitted_oe_range_end = (u64)offset;
                current->journal_info = &dio_data;
+               down_read(&BTRFS_I(inode)->dio_sem);
        } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
                                     &BTRFS_I(inode)->runtime_flags)) {
                inode_dio_end(inode);
@@ -8704,6 +8712,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                                   iter, btrfs_get_blocks_direct, NULL,
                                   btrfs_submit_direct, flags);
        if (iov_iter_rw(iter) == WRITE) {
+               up_read(&BTRFS_I(inode)->dio_sem);
                current->journal_info = NULL;
                if (ret < 0 && ret != -EIOCBQUEUED) {
                        if (dio_data.reserve)
@@ -9212,6 +9221,7 @@ static int btrfs_truncate(struct inode *inode)
                        break;
                }
 
+               btrfs_block_rsv_release(fs_info, rsv, -1);
                ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
                                              rsv, min_size, 0);
                BUG_ON(ret);    /* shouldn't happen */
@@ -10579,8 +10589,6 @@ static const struct inode_operations btrfs_dir_inode_operations = {
 static const struct inode_operations btrfs_dir_ro_inode_operations = {
        .lookup         = btrfs_lookup,
        .permission     = btrfs_permission,
-       .get_acl        = btrfs_get_acl,
-       .set_acl        = btrfs_set_acl,
        .update_time    = btrfs_update_time,
 };
 
index baea866..94fd76d 100644 (file)
@@ -2591,8 +2591,13 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
                        add_wait_queue(&ci->i_cap_wq, &wait);
 
                        while (!try_get_cap_refs(ci, need, want, endoff,
-                                                true, &_got, &err))
+                                                true, &_got, &err)) {
+                               if (signal_pending(current)) {
+                                       ret = -ERESTARTSYS;
+                                       break;
+                               }
                                wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+                       }
 
                        remove_wait_queue(&ci->i_cap_wq, &wait);
 
index d7a9369..8ab1fdf 100644 (file)
@@ -1230,7 +1230,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
                struct ceph_mds_client *mdsc =
                        ceph_sb_to_client(dir->i_sb)->mdsc;
                struct ceph_mds_request *req;
-               int op, mask, err;
+               int op, err;
+               u32 mask;
 
                if (flags & LOOKUP_RCU)
                        return -ECHILD;
@@ -1245,7 +1246,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
                        mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
                        if (ceph_security_xattr_wanted(dir))
                                mask |= CEPH_CAP_XATTR_SHARED;
-                       req->r_args.getattr.mask = mask;
+                       req->r_args.getattr.mask = cpu_to_le32(mask);
 
                        err = ceph_mdsc_do_request(mdsc, NULL, req);
                        switch (err) {
index 398e532..5e659d0 100644 (file)
@@ -305,7 +305,8 @@ static int frag_tree_split_cmp(const void *l, const void *r)
 {
        struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
        struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
-       return ceph_frag_compare(ls->frag, rs->frag);
+       return ceph_frag_compare(le32_to_cpu(ls->frag),
+                                le32_to_cpu(rs->frag));
 }
 
 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
index ec6b35e..c9d2e55 100644 (file)
@@ -288,12 +288,13 @@ static int parse_reply_info_extra(void **p, void *end,
                                  struct ceph_mds_reply_info_parsed *info,
                                  u64 features)
 {
-       if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
+       u32 op = le32_to_cpu(info->head->op);
+
+       if (op == CEPH_MDS_OP_GETFILELOCK)
                return parse_reply_info_filelock(p, end, info, features);
-       else if (info->head->op == CEPH_MDS_OP_READDIR ||
-                info->head->op == CEPH_MDS_OP_LSSNAP)
+       else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
                return parse_reply_info_dir(p, end, info, features);
-       else if (info->head->op == CEPH_MDS_OP_CREATE)
+       else if (op == CEPH_MDS_OP_CREATE)
                return parse_reply_info_create(p, end, info, features);
        else
                return -EIO;
index ddcddfe..3af2da5 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -990,7 +990,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
 }
 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
 
-#ifdef CONFIG_FS_IOMAP
 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
 {
        return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
@@ -1428,4 +1427,3 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
 #endif /* CONFIG_FS_DAX_PMD */
-#endif /* CONFIG_FS_IOMAP */
index 36bea5a..c634874 100644 (file)
@@ -1,6 +1,5 @@
 config EXT2_FS
        tristate "Second extended fs support"
-       select FS_IOMAP if FS_DAX
        help
          Ext2 is a standard Linux file system for hard disks.
 
index 7b90691..e38039f 100644 (file)
@@ -37,7 +37,6 @@ config EXT4_FS
        select CRC16
        select CRYPTO
        select CRYPTO_CRC32C
-       select FS_IOMAP if FS_DAX
        help
          This is the next generation of the ext3 filesystem.
 
index 70ea57c..4e06a27 100644 (file)
@@ -2025,7 +2025,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
                struct fuse_req *req;
                req = list_entry(head->next, struct fuse_req, list);
                req->out.h.error = -ECONNABORTED;
-               clear_bit(FR_PENDING, &req->flags);
                clear_bit(FR_SENT, &req->flags);
                list_del_init(&req->list);
                request_end(fc, req);
@@ -2103,6 +2102,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
                spin_lock(&fiq->waitq.lock);
                fiq->connected = 0;
                list_splice_init(&fiq->pending, &to_end2);
+               list_for_each_entry(req, &to_end2, list)
+                       clear_bit(FR_PENDING, &req->flags);
                while (forget_pending(fiq))
                        kfree(dequeue_forget(fiq, 1, NULL));
                wake_up_all_locked(&fiq->waitq);
index 1f7c732..811fd89 100644 (file)
@@ -68,7 +68,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
        if (sec || nsec) {
                struct timespec64 ts = {
                        sec,
-                       max_t(u32, nsec, NSEC_PER_SEC - 1)
+                       min_t(u32, nsec, NSEC_PER_SEC - 1)
                };
 
                return get_jiffies_64() + timespec64_to_jiffies(&ts);
index 9ad48d9..023bb0b 100644 (file)
@@ -154,29 +154,38 @@ out_err:
 static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
                            struct dentry **ret)
 {
-       const char *s = d->name.name;
+       /* Counting down from the end, since the prefix can change */
+       size_t rem = d->name.len - 1;
        struct dentry *dentry = NULL;
        int err;
 
-       if (*s != '/')
+       if (d->name.name[0] != '/')
                return ovl_lookup_single(base, d, d->name.name, d->name.len,
                                         0, "", ret);
 
-       while (*s++ == '/' && !IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
+       while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
+               const char *s = d->name.name + d->name.len - rem;
                const char *next = strchrnul(s, '/');
-               size_t slen = strlen(s);
+               size_t thislen = next - s;
+               bool end = !next[0];
 
-               if (WARN_ON(slen > d->name.len) ||
-                   WARN_ON(strcmp(d->name.name + d->name.len - slen, s)))
+               /* Verify we did not go off the rails */
+               if (WARN_ON(s[-1] != '/'))
                        return -EIO;
 
-               err = ovl_lookup_single(base, d, s, next - s,
-                                       d->name.len - slen, next, &base);
+               err = ovl_lookup_single(base, d, s, thislen,
+                                       d->name.len - rem, next, &base);
                dput(dentry);
                if (err)
                        return err;
                dentry = base;
-               s = next;
+               if (end)
+                       break;
+
+               rem -= thislen + 1;
+
+               if (WARN_ON(rem >= d->name.len))
+                       return -EIO;
        }
        *ret = dentry;
        return 0;
index 8e7e61b..87c9a9a 100644 (file)
@@ -3179,6 +3179,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
             iter.tgid += 1, iter = next_tgid(ns, iter)) {
                char name[PROC_NUMBUF];
                int len;
+
+               cond_resched();
                if (!has_pid_permissions(ns, iter.task, 2))
                        continue;
 
index d0f8a38..0186fe6 100644 (file)
@@ -74,6 +74,7 @@
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/uaccess.h>
+#include <linux/major.h>
 #include "internal.h"
 
 static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode)
 static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct super_block *sb = dentry->d_sb;
-       u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+       u64 id = 0;
+
+       /* When calling huge_encode_dev(),
+        * use sb->s_bdev->bd_dev when,
+        *   - CONFIG_ROMFS_ON_BLOCK defined
+        * use sb->s_dev when,
+        *   - CONFIG_ROMFS_ON_BLOCK undefined and
+        *   - CONFIG_ROMFS_ON_MTD defined
+        * leave id as 0 when,
+        *   - CONFIG_ROMFS_ON_BLOCK undefined and
+        *   - CONFIG_ROMFS_ON_MTD undefined
+        */
+       if (sb->s_bdev)
+               id = huge_encode_dev(sb->s_bdev->bd_dev);
+       else if (sb->s_dev)
+               id = huge_encode_dev(sb->s_dev);
 
        buf->f_type = ROMFS_MAGIC;
        buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_flags |= MS_RDONLY | MS_NOATIME;
        sb->s_op = &romfs_super_ops;
 
+#ifdef CONFIG_ROMFS_ON_MTD
+       /* Use same dev ID from the underlying mtdblock device */
+       if (sb->s_mtd)
+               sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
+#endif
        /* read the image superblock and check it */
        rsb = kmalloc(512, GFP_KERNEL);
        if (!rsb)
index 0a908ae..b0d0623 100644 (file)
@@ -53,7 +53,7 @@ config UBIFS_ATIME_SUPPORT
 
 config UBIFS_FS_ENCRYPTION
        bool "UBIFS Encryption"
-       depends on UBIFS_FS
+       depends on UBIFS_FS && BLOCK
        select FS_ENCRYPTION
        default n
        help
index 1c5331a..528369f 100644 (file)
@@ -390,16 +390,6 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
        dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
                dentry, mode, dir->i_ino);
 
-       if (ubifs_crypt_is_encrypted(dir)) {
-               err = fscrypt_get_encryption_info(dir);
-               if (err)
-                       return err;
-
-               if (!fscrypt_has_encryption_key(dir)) {
-                       return -EPERM;
-               }
-       }
-
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
        if (err)
                return err;
@@ -741,17 +731,9 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
        ubifs_assert(inode_is_locked(dir));
        ubifs_assert(inode_is_locked(inode));
 
-       if (ubifs_crypt_is_encrypted(dir)) {
-               if (!fscrypt_has_permitted_context(dir, inode))
-                       return -EPERM;
-
-               err = fscrypt_get_encryption_info(inode);
-               if (err)
-                       return err;
-
-               if (!fscrypt_has_encryption_key(inode))
-                       return -EPERM;
-       }
+       if (ubifs_crypt_is_encrypted(dir) &&
+           !fscrypt_has_permitted_context(dir, inode))
+               return -EPERM;
 
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
        if (err)
@@ -1000,17 +982,6 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
        if (err)
                return err;
 
-       if (ubifs_crypt_is_encrypted(dir)) {
-               err = fscrypt_get_encryption_info(dir);
-               if (err)
-                       goto out_budg;
-
-               if (!fscrypt_has_encryption_key(dir)) {
-                       err = -EPERM;
-                       goto out_budg;
-               }
-       }
-
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
        if (err)
                goto out_budg;
@@ -1096,17 +1067,6 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
                return err;
        }
 
-       if (ubifs_crypt_is_encrypted(dir)) {
-               err = fscrypt_get_encryption_info(dir);
-               if (err)
-                       goto out_budg;
-
-               if (!fscrypt_has_encryption_key(dir)) {
-                       err = -EPERM;
-                       goto out_budg;
-               }
-       }
-
        err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
        if (err)
                goto out_budg;
@@ -1231,18 +1191,6 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
                        goto out_inode;
                }
 
-               err = fscrypt_get_encryption_info(inode);
-               if (err) {
-                       kfree(sd);
-                       goto out_inode;
-               }
-
-               if (!fscrypt_has_encryption_key(inode)) {
-                       kfree(sd);
-                       err = -EPERM;
-                       goto out_inode;
-               }
-
                ostr.name = sd->encrypted_path;
                ostr.len = disk_link.len;
 
index 78d7136..da519ba 100644 (file)
@@ -217,6 +217,9 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case FS_IOC32_SETFLAGS:
                cmd = FS_IOC_SETFLAGS;
                break;
+       case FS_IOC_SET_ENCRYPTION_POLICY:
+       case FS_IOC_GET_ENCRYPTION_POLICY:
+               break;
        default:
                return -ENOIOCTLCMD;
        }
index a459211..294519b 100644 (file)
@@ -744,6 +744,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
 
        } else {
                data->compr_size = 0;
+               out_len = compr_len;
        }
 
        dlen = UBIFS_DATA_NODE_SZ + out_len;
@@ -1319,6 +1320,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
        dn->compr_type = cpu_to_le16(compr_type);
        dn->size = cpu_to_le32(*new_len);
        *new_len = UBIFS_DATA_NODE_SZ + out_len;
+       err = 0;
 out:
        kfree(buf);
        return err;
index 74ae2de..709aa09 100644 (file)
 #include <linux/slab.h>
 #include "ubifs.h"
 
+static int try_read_node(const struct ubifs_info *c, void *buf, int type,
+                        int len, int lnum, int offs);
+static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
+                             struct ubifs_zbranch *zbr, void *node);
+
 /*
  * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
  * @NAME_LESS: name corresponding to the first argument is less than second
@@ -402,7 +407,19 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
                return 0;
        }
 
-       err = ubifs_tnc_read_node(c, zbr, node);
+       if (c->replaying) {
+               err = fallible_read_node(c, &zbr->key, zbr, node);
+               /*
+                * When the node was not found, return -ENOENT, 0 otherwise.
+                * Negative return codes stay as-is.
+                */
+               if (err == 0)
+                       err = -ENOENT;
+               else if (err == 1)
+                       err = 0;
+       } else {
+               err = ubifs_tnc_read_node(c, zbr, node);
+       }
        if (err)
                return err;
 
@@ -2857,7 +2874,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
        if (fname_len(nm) > 0) {
                if (err) {
                        /* Handle collisions */
-                       err = resolve_collision(c, key, &znode, &n, nm);
+                       if (c->replaying)
+                               err = fallible_resolve_collision(c, key, &znode, &n,
+                                                        nm, 0);
+                       else
+                               err = resolve_collision(c, key, &znode, &n, nm);
                        dbg_tnc("rc returned %d, znode %p, n %d",
                                err, znode, n);
                        if (unlikely(err < 0))
index d96e2f3..43953e0 100644 (file)
@@ -63,6 +63,7 @@ struct userfaultfd_wait_queue {
        struct uffd_msg msg;
        wait_queue_t wq;
        struct userfaultfd_ctx *ctx;
+       bool waken;
 };
 
 struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
        if (len && (start > uwq->msg.arg.pagefault.address ||
                    start + len <= uwq->msg.arg.pagefault.address))
                goto out;
+       WRITE_ONCE(uwq->waken, true);
+       /*
+        * The implicit smp_mb__before_spinlock in try_to_wake_up()
+        * renders uwq->waken visible to other CPUs before the task is
+        * waken.
+        */
        ret = wake_up_state(wq->private, mode);
        if (ret)
                /*
@@ -264,6 +271,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
        struct userfaultfd_wait_queue uwq;
        int ret;
        bool must_wait, return_to_userland;
+       long blocking_state;
 
        BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
@@ -334,10 +342,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
        uwq.wq.private = current;
        uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
        uwq.ctx = ctx;
+       uwq.waken = false;
 
        return_to_userland =
                (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
                (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
+       blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
+                        TASK_KILLABLE;
 
        spin_lock(&ctx->fault_pending_wqh.lock);
        /*
@@ -350,8 +361,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
         * following the spin_unlock to happen before the list_add in
         * __add_wait_queue.
         */
-       set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
-                         TASK_KILLABLE);
+       set_current_state(blocking_state);
        spin_unlock(&ctx->fault_pending_wqh.lock);
 
        must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
@@ -364,6 +374,29 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
                wake_up_poll(&ctx->fd_wqh, POLLIN);
                schedule();
                ret |= VM_FAULT_MAJOR;
+
+               /*
+                * False wakeups can orginate even from rwsem before
+                * up_read() however userfaults will wait either for a
+                * targeted wakeup on the specific uwq waitqueue from
+                * wake_userfault() or for signals or for uffd
+                * release.
+                */
+               while (!READ_ONCE(uwq.waken)) {
+                       /*
+                        * This needs the full smp_store_mb()
+                        * guarantee as the state write must be
+                        * visible to other CPUs before reading
+                        * uwq.waken from other CPUs.
+                        */
+                       set_current_state(blocking_state);
+                       if (READ_ONCE(uwq.waken) ||
+                           READ_ONCE(ctx->released) ||
+                           (return_to_userland ? signal_pending(current) :
+                            fatal_signal_pending(current)))
+                               break;
+                       schedule();
+               }
        }
 
        __set_current_state(TASK_RUNNING);
index d346d42..33db69b 100644 (file)
@@ -39,6 +39,7 @@
 #include "xfs_rmap_btree.h"
 #include "xfs_btree.h"
 #include "xfs_refcount_btree.h"
+#include "xfs_ialloc_btree.h"
 
 /*
  * Per-AG Block Reservations
@@ -200,22 +201,30 @@ __xfs_ag_resv_init(
        struct xfs_mount                *mp = pag->pag_mount;
        struct xfs_ag_resv              *resv;
        int                             error;
+       xfs_extlen_t                    reserved;
 
-       resv = xfs_perag_resv(pag, type);
        if (used > ask)
                ask = used;
-       resv->ar_asked = ask;
-       resv->ar_reserved = resv->ar_orig_reserved = ask - used;
-       mp->m_ag_max_usable -= ask;
+       reserved = ask - used;
 
-       trace_xfs_ag_resv_init(pag, type, ask);
-
-       error = xfs_mod_fdblocks(mp, -(int64_t)resv->ar_reserved, true);
-       if (error)
+       error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
+       if (error) {
                trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
                                error, _RET_IP_);
+               xfs_warn(mp,
+"Per-AG reservation for AG %u failed.  Filesystem may run out of space.",
+                               pag->pag_agno);
+               return error;
+       }
 
-       return error;
+       mp->m_ag_max_usable -= ask;
+
+       resv = xfs_perag_resv(pag, type);
+       resv->ar_asked = ask;
+       resv->ar_reserved = resv->ar_orig_reserved = reserved;
+
+       trace_xfs_ag_resv_init(pag, type, ask);
+       return 0;
 }
 
 /* Create a per-AG block reservation. */
@@ -223,6 +232,8 @@ int
 xfs_ag_resv_init(
        struct xfs_perag                *pag)
 {
+       struct xfs_mount                *mp = pag->pag_mount;
+       xfs_agnumber_t                  agno = pag->pag_agno;
        xfs_extlen_t                    ask;
        xfs_extlen_t                    used;
        int                             error = 0;
@@ -231,23 +242,45 @@ xfs_ag_resv_init(
        if (pag->pag_meta_resv.ar_asked == 0) {
                ask = used = 0;
 
-               error = xfs_refcountbt_calc_reserves(pag->pag_mount,
-                               pag->pag_agno, &ask, &used);
+               error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used);
                if (error)
                        goto out;
 
-               error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
-                               ask, used);
+               error = xfs_finobt_calc_reserves(mp, agno, &ask, &used);
                if (error)
                        goto out;
+
+               error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
+                               ask, used);
+               if (error) {
+                       /*
+                        * Because we didn't have per-AG reservations when the
+                        * finobt feature was added we might not be able to
+                        * reserve all needed blocks.  Warn and fall back to the
+                        * old and potentially buggy code in that case, but
+                        * ensure we do have the reservation for the refcountbt.
+                        */
+                       ask = used = 0;
+
+                       mp->m_inotbt_nores = true;
+
+                       error = xfs_refcountbt_calc_reserves(mp, agno, &ask,
+                                       &used);
+                       if (error)
+                               goto out;
+
+                       error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
+                                       ask, used);
+                       if (error)
+                               goto out;
+               }
        }
 
        /* Create the AGFL metadata reservation */
        if (pag->pag_agfl_resv.ar_asked == 0) {
                ask = used = 0;
 
-               error = xfs_rmapbt_calc_reserves(pag->pag_mount, pag->pag_agno,
-                               &ask, &used);
+               error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used);
                if (error)
                        goto out;
 
@@ -256,9 +289,16 @@ xfs_ag_resv_init(
                        goto out;
        }
 
+#ifdef DEBUG
+       /* need to read in the AGF for the ASSERT below to work */
+       error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0);
+       if (error)
+               return error;
+
        ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
               xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <=
               pag->pagf_freeblks + pag->pagf_flcount);
+#endif
 out:
        return error;
 }
index af1ecb1..6622d46 100644 (file)
@@ -131,9 +131,6 @@ xfs_attr_get(
        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
                return -EIO;
 
-       if (!xfs_inode_hasattr(ip))
-               return -ENOATTR;
-
        error = xfs_attr_args_init(&args, ip, name, flags);
        if (error)
                return error;
@@ -392,9 +389,6 @@ xfs_attr_remove(
        if (XFS_FORCED_SHUTDOWN(dp->i_mount))
                return -EIO;
 
-       if (!xfs_inode_hasattr(dp))
-               return -ENOATTR;
-
        error = xfs_attr_args_init(&args, dp, name, flags);
        if (error)
                return error;
index 44773c9..bfc00de 100644 (file)
@@ -3629,7 +3629,7 @@ xfs_bmap_btalloc(
                align = xfs_get_cowextsz_hint(ap->ip);
        else if (xfs_alloc_is_userdata(ap->datatype))
                align = xfs_get_extsz_hint(ap->ip);
-       if (unlikely(align)) {
+       if (align) {
                error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
                                                align, 0, ap->eof, 0, ap->conv,
                                                &ap->offset, &ap->length);
@@ -3701,7 +3701,7 @@ xfs_bmap_btalloc(
                args.minlen = ap->minlen;
        }
        /* apply extent size hints if obtained earlier */
-       if (unlikely(align)) {
+       if (align) {
                args.prod = align;
                if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
                        args.mod = (xfs_extlen_t)(args.prod - args.mod);
@@ -4514,8 +4514,6 @@ xfs_bmapi_write(
        int                     n;              /* current extent index */
        xfs_fileoff_t           obno;           /* old block number (offset) */
        int                     whichfork;      /* data or attr fork */
-       char                    inhole;         /* current location is hole in file */
-       char                    wasdelay;       /* old extent was delayed */
 
 #ifdef DEBUG
        xfs_fileoff_t           orig_bno;       /* original block number value */
@@ -4603,22 +4601,44 @@ xfs_bmapi_write(
        bma.firstblock = firstblock;
 
        while (bno < end && n < *nmap) {
-               inhole = eof || bma.got.br_startoff > bno;
-               wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
+               bool                    need_alloc = false, wasdelay = false;
 
-               /*
-                * Make sure we only reflink into a hole.
-                */
-               if (flags & XFS_BMAPI_REMAP)
-                       ASSERT(inhole);
-               if (flags & XFS_BMAPI_COWFORK)
-                       ASSERT(!inhole);
+               /* in hole or beyoned EOF? */
+               if (eof || bma.got.br_startoff > bno) {
+                       if (flags & XFS_BMAPI_DELALLOC) {
+                               /*
+                                * For the COW fork we can reasonably get a
+                                * request for converting an extent that races
+                                * with other threads already having converted
+                                * part of it, as there converting COW to
+                                * regular blocks is not protected using the
+                                * IOLOCK.
+                                */
+                               ASSERT(flags & XFS_BMAPI_COWFORK);
+                               if (!(flags & XFS_BMAPI_COWFORK)) {
+                                       error = -EIO;
+                                       goto error0;
+                               }
+
+                               if (eof || bno >= end)
+                                       break;
+                       } else {
+                               need_alloc = true;
+                       }
+               } else {
+                       /*
+                        * Make sure we only reflink into a hole.
+                        */
+                       ASSERT(!(flags & XFS_BMAPI_REMAP));
+                       if (isnullstartblock(bma.got.br_startblock))
+                               wasdelay = true;
+               }
 
                /*
                 * First, deal with the hole before the allocated space
                 * that we found, if any.
                 */
-               if (inhole || wasdelay) {
+               if (need_alloc || wasdelay) {
                        bma.eof = eof;
                        bma.conv = !!(flags & XFS_BMAPI_CONVERT);
                        bma.wasdel = wasdelay;
index cecd094..cdef87d 100644 (file)
@@ -110,6 +110,9 @@ struct xfs_extent_free_item
 /* Map something in the CoW fork. */
 #define XFS_BMAPI_COWFORK      0x200
 
+/* Only convert delalloc space, don't allocate entirely new extents */
+#define XFS_BMAPI_DELALLOC     0x400
+
 #define XFS_BMAPI_FLAGS \
        { XFS_BMAPI_ENTIRE,     "ENTIRE" }, \
        { XFS_BMAPI_METADATA,   "METADATA" }, \
@@ -120,7 +123,8 @@ struct xfs_extent_free_item
        { XFS_BMAPI_CONVERT,    "CONVERT" }, \
        { XFS_BMAPI_ZERO,       "ZERO" }, \
        { XFS_BMAPI_REMAP,      "REMAP" }, \
-       { XFS_BMAPI_COWFORK,    "COWFORK" }
+       { XFS_BMAPI_COWFORK,    "COWFORK" }, \
+       { XFS_BMAPI_DELALLOC,   "DELALLOC" }
 
 
 static inline int xfs_bmapi_aflag(int w)
index c58d72c..2f389d3 100644 (file)
 struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
 
 /*
- * @mode, if set, indicates that the type field needs to be set up.
- * This uses the transformation from file mode to DT_* as defined in linux/fs.h
- * for file type specification. This will be propagated into the directory
- * structure if appropriate for the given operation and filesystem config.
+ * Convert inode mode to directory entry filetype
  */
-const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = {
-       [0]                     = XFS_DIR3_FT_UNKNOWN,
-       [S_IFREG >> S_SHIFT]    = XFS_DIR3_FT_REG_FILE,
-       [S_IFDIR >> S_SHIFT]    = XFS_DIR3_FT_DIR,
-       [S_IFCHR >> S_SHIFT]    = XFS_DIR3_FT_CHRDEV,
-       [S_IFBLK >> S_SHIFT]    = XFS_DIR3_FT_BLKDEV,
-       [S_IFIFO >> S_SHIFT]    = XFS_DIR3_FT_FIFO,
-       [S_IFSOCK >> S_SHIFT]   = XFS_DIR3_FT_SOCK,
-       [S_IFLNK >> S_SHIFT]    = XFS_DIR3_FT_SYMLINK,
-};
+unsigned char xfs_mode_to_ftype(int mode)
+{
+       switch (mode & S_IFMT) {
+       case S_IFREG:
+               return XFS_DIR3_FT_REG_FILE;
+       case S_IFDIR:
+               return XFS_DIR3_FT_DIR;
+       case S_IFCHR:
+               return XFS_DIR3_FT_CHRDEV;
+       case S_IFBLK:
+               return XFS_DIR3_FT_BLKDEV;
+       case S_IFIFO:
+               return XFS_DIR3_FT_FIFO;
+       case S_IFSOCK:
+               return XFS_DIR3_FT_SOCK;
+       case S_IFLNK:
+               return XFS_DIR3_FT_SYMLINK;
+       default:
+               return XFS_DIR3_FT_UNKNOWN;
+       }
+}
 
 /*
  * ASCII case-insensitive (ie. A-Z) support for directories that was
@@ -631,7 +639,8 @@ xfs_dir2_isblock(
        if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
                return rval;
        rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
-       ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize);
+       if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
+               return -EFSCORRUPTED;
        *vp = rval;
        return 0;
 }
index 0197590..d6e6d9d 100644 (file)
@@ -18,6 +18,9 @@
 #ifndef __XFS_DIR2_H__
 #define __XFS_DIR2_H__
 
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+
 struct xfs_defer_ops;
 struct xfs_da_args;
 struct xfs_inode;
@@ -32,10 +35,9 @@ struct xfs_dir2_data_unused;
 extern struct xfs_name xfs_name_dotdot;
 
 /*
- * directory filetype conversion tables.
+ * Convert inode mode to directory entry filetype
  */
-#define S_SHIFT 12
-extern const unsigned char xfs_mode_to_ftype[];
+extern unsigned char xfs_mode_to_ftype(int mode);
 
 /*
  * directory operations vector for encode/decode routines
index 0fd086d..7c47188 100644 (file)
@@ -82,11 +82,12 @@ xfs_finobt_set_root(
 }
 
 STATIC int
-xfs_inobt_alloc_block(
+__xfs_inobt_alloc_block(
        struct xfs_btree_cur    *cur,
        union xfs_btree_ptr     *start,
        union xfs_btree_ptr     *new,
-       int                     *stat)
+       int                     *stat,
+       enum xfs_ag_resv_type   resv)
 {
        xfs_alloc_arg_t         args;           /* block allocation args */
        int                     error;          /* error return value */
@@ -103,6 +104,7 @@ xfs_inobt_alloc_block(
        args.maxlen = 1;
        args.prod = 1;
        args.type = XFS_ALLOCTYPE_NEAR_BNO;
+       args.resv = resv;
 
        error = xfs_alloc_vextent(&args);
        if (error) {
@@ -123,6 +125,27 @@ xfs_inobt_alloc_block(
 }
 
 STATIC int
+xfs_inobt_alloc_block(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *start,
+       union xfs_btree_ptr     *new,
+       int                     *stat)
+{
+       return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
+}
+
+STATIC int
+xfs_finobt_alloc_block(
+       struct xfs_btree_cur    *cur,
+       union xfs_btree_ptr     *start,
+       union xfs_btree_ptr     *new,
+       int                     *stat)
+{
+       return __xfs_inobt_alloc_block(cur, start, new, stat,
+                       XFS_AG_RESV_METADATA);
+}
+
+STATIC int
 xfs_inobt_free_block(
        struct xfs_btree_cur    *cur,
        struct xfs_buf          *bp)
@@ -328,7 +351,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
 
        .dup_cursor             = xfs_inobt_dup_cursor,
        .set_root               = xfs_finobt_set_root,
-       .alloc_block            = xfs_inobt_alloc_block,
+       .alloc_block            = xfs_finobt_alloc_block,
        .free_block             = xfs_inobt_free_block,
        .get_minrecs            = xfs_inobt_get_minrecs,
        .get_maxrecs            = xfs_inobt_get_maxrecs,
@@ -480,3 +503,64 @@ xfs_inobt_rec_check_count(
        return 0;
 }
 #endif /* DEBUG */
+
+static xfs_extlen_t
+xfs_inobt_max_size(
+       struct xfs_mount        *mp)
+{
+       /* Bail out if we're uninitialized, which can happen in mkfs. */
+       if (mp->m_inobt_mxr[0] == 0)
+               return 0;
+
+       return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
+               (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
+                               XFS_INODES_PER_CHUNK);
+}
+
+static int
+xfs_inobt_count_blocks(
+       struct xfs_mount        *mp,
+       xfs_agnumber_t          agno,
+       xfs_btnum_t             btnum,
+       xfs_extlen_t            *tree_blocks)
+{
+       struct xfs_buf          *agbp;
+       struct xfs_btree_cur    *cur;
+       int                     error;
+
+       error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
+       if (error)
+               return error;
+
+       cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
+       error = xfs_btree_count_blocks(cur, tree_blocks);
+       xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+       xfs_buf_relse(agbp);
+
+       return error;
+}
+
+/*
+ * Figure out how many blocks to reserve and how many are used by this btree.
+ */
+int
+xfs_finobt_calc_reserves(
+       struct xfs_mount        *mp,
+       xfs_agnumber_t          agno,
+       xfs_extlen_t            *ask,
+       xfs_extlen_t            *used)
+{
+       xfs_extlen_t            tree_len = 0;
+       int                     error;
+
+       if (!xfs_sb_version_hasfinobt(&mp->m_sb))
+               return 0;
+
+       error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
+       if (error)
+               return error;
+
+       *ask += xfs_inobt_max_size(mp);
+       *used += tree_len;
+       return 0;
+}
index bd88453..aa81e2e 100644 (file)
@@ -72,4 +72,7 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
 #define xfs_inobt_rec_check_count(mp, rec)     0
 #endif /* DEBUG */
 
+int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno,
+               xfs_extlen_t *ask, xfs_extlen_t *used);
+
 #endif /* __XFS_IALLOC_BTREE_H__ */
index dd483e2..d93f9d9 100644 (file)
@@ -29,6 +29,7 @@
 #include "xfs_icache.h"
 #include "xfs_trans.h"
 #include "xfs_ialloc.h"
+#include "xfs_dir2.h"
 
 /*
  * Check that none of the inode's in the buffer have a next
@@ -386,6 +387,7 @@ xfs_dinode_verify(
        xfs_ino_t               ino,
        struct xfs_dinode       *dip)
 {
+       uint16_t                mode;
        uint16_t                flags;
        uint64_t                flags2;
 
@@ -396,8 +398,12 @@ xfs_dinode_verify(
        if (be64_to_cpu(dip->di_size) & (1ULL << 63))
                return false;
 
-       /* No zero-length symlinks. */
-       if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0)
+       mode = be16_to_cpu(dip->di_mode);
+       if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
+               return false;
+
+       /* No zero-length symlinks/dirs. */
+       if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
                return false;
 
        /* only version 3 or greater inodes are extensively verified here */
index 2580262..584ec89 100644 (file)
@@ -242,7 +242,7 @@ xfs_mount_validate_sb(
            sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG                    ||
            sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG                    ||
            sbp->sb_blocksize != (1 << sbp->sb_blocklog)                ||
-           sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG                   ||
+           sbp->sb_dirblklog + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
            sbp->sb_inodesize < XFS_DINODE_MIN_SIZE                     ||
            sbp->sb_inodesize > XFS_DINODE_MAX_SIZE                     ||
            sbp->sb_inodelog < XFS_DINODE_MIN_LOG                       ||
index b9abce5..c141791 100644 (file)
@@ -528,7 +528,6 @@ xfs_getbmap(
        xfs_bmbt_irec_t         *map;           /* buffer for user's data */
        xfs_mount_t             *mp;            /* file system mount point */
        int                     nex;            /* # of user extents can do */
-       int                     nexleft;        /* # of user extents left */
        int                     subnex;         /* # of bmapi's can do */
        int                     nmap;           /* number of map entries */
        struct getbmapx         *out;           /* output structure */
@@ -686,10 +685,8 @@ xfs_getbmap(
                goto out_free_map;
        }
 
-       nexleft = nex;
-
        do {
-               nmap = (nexleft > subnex) ? subnex : nexleft;
+               nmap = (nex> subnex) ? subnex : nex;
                error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
                                       XFS_BB_TO_FSB(mp, bmv->bmv_length),
                                       map, &nmap, bmapi_flags);
@@ -697,8 +694,8 @@ xfs_getbmap(
                        goto out_free_map;
                ASSERT(nmap <= subnex);
 
-               for (i = 0; i < nmap && nexleft && bmv->bmv_length &&
-                               cur_ext < bmv->bmv_count; i++) {
+               for (i = 0; i < nmap && bmv->bmv_length &&
+                               cur_ext < bmv->bmv_count - 1; i++) {
                        out[cur_ext].bmv_oflags = 0;
                        if (map[i].br_state == XFS_EXT_UNWRITTEN)
                                out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
@@ -760,16 +757,27 @@ xfs_getbmap(
                                continue;
                        }
 
+                       /*
+                        * In order to report shared extents accurately,
+                        * we report each distinct shared/unshared part
+                        * of a single bmbt record using multiple bmap
+                        * extents.  To make that happen, we iterate the
+                        * same map array item multiple times, each
+                        * time trimming out the subextent that we just
+                        * reported.
+                        *
+                        * Because of this, we must check the out array
+                        * index (cur_ext) directly against bmv_count-1
+                        * to avoid overflows.
+                        */
                        if (inject_map.br_startblock != NULLFSBLOCK) {
                                map[i] = inject_map;
                                i--;
-                       } else
-                               nexleft--;
+                       }
                        bmv->bmv_entries++;
                        cur_ext++;
                }
-       } while (nmap && nexleft && bmv->bmv_length &&
-                cur_ext < bmv->bmv_count);
+       } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
 
  out_free_map:
        kmem_free(map);
index 7f0a01f..ac3b4db 100644 (file)
@@ -422,6 +422,7 @@ retry:
 out_free_pages:
        for (i = 0; i < bp->b_page_count; i++)
                __free_page(bp->b_pages[i]);
+       bp->b_flags &= ~_XBF_PAGES;
        return error;
 }
 
index 7a30b8f..9d06cc3 100644 (file)
@@ -710,6 +710,10 @@ xfs_dq_get_next_id(
        /* Simple advance */
        next_id = *id + 1;
 
+       /* If we'd wrap past the max ID, stop */
+       if (next_id < *id)
+               return -ENOENT;
+
        /* If new ID is within the current chunk, advancing it sufficed */
        if (next_id % mp->m_quotainfo->qi_dqperchunk) {
                *id = next_id;
index b955779..de32f0f 100644 (file)
@@ -1792,22 +1792,23 @@ xfs_inactive_ifree(
        int                     error;
 
        /*
-        * The ifree transaction might need to allocate blocks for record
-        * insertion to the finobt. We don't want to fail here at ENOSPC, so
-        * allow ifree to dip into the reserved block pool if necessary.
-        *
-        * Freeing large sets of inodes generally means freeing inode chunks,
-        * directory and file data blocks, so this should be relatively safe.
-        * Only under severe circumstances should it be possible to free enough
-        * inodes to exhaust the reserve block pool via finobt expansion while
-        * at the same time not creating free space in the filesystem.
+        * We try to use a per-AG reservation for any block needed by the finobt
+        * tree, but as the finobt feature predates the per-AG reservation
+        * support a degraded file system might not have enough space for the
+        * reservation at mount time.  In that case try to dip into the reserved
+        * pool and pray.
         *
         * Send a warning if the reservation does happen to fail, as the inode
         * now remains allocated and sits on the unlinked list until the fs is
         * repaired.
         */
-       error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
-                       XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
+       if (unlikely(mp->m_inotbt_nores)) {
+               error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
+                               XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
+                               &tp);
+       } else {
+               error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
+       }
        if (error) {
                if (error == -ENOSPC) {
                        xfs_warn_ratelimited(mp,
index 0d14742..1aa3abd 100644 (file)
@@ -681,7 +681,7 @@ xfs_iomap_write_allocate(
        xfs_trans_t     *tp;
        int             nimaps;
        int             error = 0;
-       int             flags = 0;
+       int             flags = XFS_BMAPI_DELALLOC;
        int             nres;
 
        if (whichfork == XFS_COW_FORK)
index 308bebb..22c1615 100644 (file)
@@ -98,12 +98,27 @@ xfs_init_security(
 static void
 xfs_dentry_to_name(
        struct xfs_name *namep,
+       struct dentry   *dentry)
+{
+       namep->name = dentry->d_name.name;
+       namep->len = dentry->d_name.len;
+       namep->type = XFS_DIR3_FT_UNKNOWN;
+}
+
+static int
+xfs_dentry_mode_to_name(
+       struct xfs_name *namep,
        struct dentry   *dentry,
        int             mode)
 {
        namep->name = dentry->d_name.name;
        namep->len = dentry->d_name.len;
-       namep->type = xfs_mode_to_ftype[(mode & S_IFMT) >> S_SHIFT];
+       namep->type = xfs_mode_to_ftype(mode);
+
+       if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
+               return -EFSCORRUPTED;
+
+       return 0;
 }
 
 STATIC void
@@ -119,7 +134,7 @@ xfs_cleanup_inode(
         * xfs_init_security we must back out.
         * ENOSPC can hit here, among other things.
         */
-       xfs_dentry_to_name(&teardown, dentry, 0);
+       xfs_dentry_to_name(&teardown, dentry);
 
        xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
 }
@@ -154,8 +169,12 @@ xfs_generic_create(
        if (error)
                return error;
 
+       /* Verify mode is valid also for tmpfile case */
+       error = xfs_dentry_mode_to_name(&name, dentry, mode);
+       if (unlikely(error))
+               goto out_free_acl;
+
        if (!tmpfile) {
-               xfs_dentry_to_name(&name, dentry, mode);
                error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
        } else {
                error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
@@ -248,7 +267,7 @@ xfs_vn_lookup(
        if (dentry->d_name.len >= MAXNAMELEN)
                return ERR_PTR(-ENAMETOOLONG);
 
-       xfs_dentry_to_name(&name, dentry, 0);
+       xfs_dentry_to_name(&name, dentry);
        error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
        if (unlikely(error)) {
                if (unlikely(error != -ENOENT))
@@ -275,7 +294,7 @@ xfs_vn_ci_lookup(
        if (dentry->d_name.len >= MAXNAMELEN)
                return ERR_PTR(-ENAMETOOLONG);
 
-       xfs_dentry_to_name(&xname, dentry, 0);
+       xfs_dentry_to_name(&xname, dentry);
        error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
        if (unlikely(error)) {
                if (unlikely(error != -ENOENT))
@@ -310,7 +329,9 @@ xfs_vn_link(
        struct xfs_name name;
        int             error;
 
-       xfs_dentry_to_name(&name, dentry, inode->i_mode);
+       error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
+       if (unlikely(error))
+               return error;
 
        error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
        if (unlikely(error))
@@ -329,7 +350,7 @@ xfs_vn_unlink(
        struct xfs_name name;
        int             error;
 
-       xfs_dentry_to_name(&name, dentry, 0);
+       xfs_dentry_to_name(&name, dentry);
 
        error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
        if (error)
@@ -359,7 +380,9 @@ xfs_vn_symlink(
 
        mode = S_IFLNK |
                (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
-       xfs_dentry_to_name(&name, dentry, mode);
+       error = xfs_dentry_mode_to_name(&name, dentry, mode);
+       if (unlikely(error))
+               goto out;
 
        error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
        if (unlikely(error))
@@ -395,6 +418,7 @@ xfs_vn_rename(
 {
        struct inode    *new_inode = d_inode(ndentry);
        int             omode = 0;
+       int             error;
        struct xfs_name oname;
        struct xfs_name nname;
 
@@ -405,8 +429,14 @@ xfs_vn_rename(
        if (flags & RENAME_EXCHANGE)
                omode = d_inode(ndentry)->i_mode;
 
-       xfs_dentry_to_name(&oname, odentry, omode);
-       xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode);
+       error = xfs_dentry_mode_to_name(&oname, odentry, omode);
+       if (omode && unlikely(error))
+               return error;
+
+       error = xfs_dentry_mode_to_name(&nname, ndentry,
+                                       d_inode(odentry)->i_mode);
+       if (unlikely(error))
+               return error;
 
        return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
                          XFS_I(ndir), &nname,
index e467218..7a989de 100644 (file)
@@ -331,11 +331,11 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
 }
 
 #define ASSERT_ALWAYS(expr)    \
-       (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+       (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
 
 #ifdef DEBUG
 #define ASSERT(expr)   \
-       (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+       (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
 
 #ifndef STATIC
 # define STATIC noinline
@@ -346,7 +346,7 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
 #ifdef XFS_WARN
 
 #define ASSERT(expr)   \
-       (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
+       (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
 
 #ifndef STATIC
 # define STATIC static noinline
index 84f7852..7f351f7 100644 (file)
@@ -140,6 +140,7 @@ typedef struct xfs_mount {
        int                     m_fixedfsid[2]; /* unchanged for life of FS */
        uint                    m_dmevmask;     /* DMI events for this FS */
        __uint64_t              m_flags;        /* global mount flags */
+       bool                    m_inotbt_nores; /* no per-AG finobt resv. */
        int                     m_ialloc_inos;  /* inodes in inode allocation */
        int                     m_ialloc_blks;  /* blocks in inode allocation */
        int                     m_ialloc_min_blks;/* min blocks in sparse inode
index 45e50ea..b669b12 100644 (file)
@@ -1177,7 +1177,8 @@ xfs_qm_dqusage_adjust(
         * the case in all other instances. It's OK that we do this because
         * quotacheck is done only at mount time.
         */
-       error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
+       error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL,
+                        &ip);
        if (error) {
                *res = BULKSTAT_RV_NOTHING;
                return error;
index d6d241f..56814e8 100644 (file)
@@ -144,7 +144,7 @@ struct __drm_crtcs_state {
        struct drm_crtc *ptr;
        struct drm_crtc_state *state;
        struct drm_crtc_commit *commit;
-       s64 __user *out_fence_ptr;
+       s32 __user *out_fence_ptr;
 };
 
 struct __drm_connnectors_state {
index bf9991b..1374323 100644 (file)
@@ -488,7 +488,7 @@ struct drm_mode_config {
        /**
         * @prop_out_fence_ptr: Sync File fd pointer representing the
         * outgoing fences for a CRTC. Userspace should provide a pointer to a
-        * value of type s64, and then cast that pointer to u64.
+        * value of type s32, and then cast that pointer to u64.
         */
        struct drm_property *prop_out_fence_ptr;
        /**
index b717ed9..5c970ce 100644 (file)
@@ -76,4 +76,5 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
 
 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
 
+void kvm_timer_init_vhe(void);
 #endif
index 20bfefb..d936a00 100644 (file)
@@ -74,6 +74,8 @@ enum cpuhp_state {
        CPUHP_ZCOMP_PREPARE,
        CPUHP_TIMERS_DEAD,
        CPUHP_MIPS_SOC_PREPARE,
+       CPUHP_BP_PREPARE_DYN,
+       CPUHP_BP_PREPARE_DYN_END                = CPUHP_BP_PREPARE_DYN + 20,
        CPUHP_BRINGUP_CPU,
        CPUHP_AP_IDLE_DEAD,
        CPUHP_AP_OFFLINE,
index c2748ac..e973fab 100644 (file)
@@ -274,37 +274,67 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
                struct irq_chip *irqchip,
                int parent_irq);
 
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+                            struct irq_chip *irqchip,
+                            unsigned int first_irq,
+                            irq_flow_handler_t handler,
+                            unsigned int type,
+                            bool nested,
+                            struct lock_class_key *lock_key);
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * Lockdep requires that each irqchip instance be created with a
+ * unique key so as to avoid unnecessary warnings. This upfront
+ * boilerplate static inlines provides such a key for each
+ * unique instance.
+ */
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+                                      struct irq_chip *irqchip,
+                                      unsigned int first_irq,
+                                      irq_flow_handler_t handler,
+                                      unsigned int type)
+{
+       static struct lock_class_key key;
+
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, false, &key);
+}
+
+static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
                          struct irq_chip *irqchip,
                          unsigned int first_irq,
                          irq_flow_handler_t handler,
-                         unsigned int type,
-                         bool nested,
-                         struct lock_class_key *lock_key);
+                         unsigned int type)
+{
+
+       static struct lock_class_key key;
+
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, true, &key);
+}
+#else
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+                                      struct irq_chip *irqchip,
+                                      unsigned int first_irq,
+                                      irq_flow_handler_t handler,
+                                      unsigned int type)
+{
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, false, NULL);
+}
 
-/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
 static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
                          struct irq_chip *irqchip,
                          unsigned int first_irq,
                          irq_flow_handler_t handler,
                          unsigned int type)
 {
-       return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq,
-                                    handler, type, true, NULL);
+       return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+                                       handler, type, true, NULL);
 }
-
-#ifdef CONFIG_LOCKDEP
-#define gpiochip_irqchip_add(...)                              \
-(                                                              \
-       ({                                                      \
-               static struct lock_class_key _key;              \
-               _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
-       })                                                      \
-)
-#else
-#define gpiochip_irqchip_add(...)                              \
-       _gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
-#endif
+#endif /* CONFIG_LOCKDEP */
 
 #endif /* CONFIG_GPIOLIB_IRQCHIP */
 
index 56aec84..cb09238 100644 (file)
@@ -514,8 +514,8 @@ extern enum system_states {
 #define TAINT_FLAGS_COUNT              16
 
 struct taint_flag {
-       char true;      /* character printed when tainted */
-       char false;     /* character printed when not tainted */
+       char c_true;    /* character printed when tainted */
+       char c_false;   /* character printed when not tainted */
        bool module;    /* also show as a per-module taint flag */
 };
 
index 01033fa..c1784c0 100644 (file)
@@ -284,7 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
                unsigned long map_offset);
 extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
                                          unsigned long pnum);
-extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
-                         enum zone_type target);
+extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+                         enum zone_type target, int *zone_shift);
 
 #endif /* __LINUX_MEMORY_HOTPLUG_H */
index 36d9896..f4aac87 100644 (file)
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
  * @zonelist - The zonelist to search for a suitable zone
  * @highest_zoneidx - The zone index of the highest zone to return
  * @nodes - An optional nodemask to filter the zonelist with
- * @zone - The first suitable zone found is returned via this parameter
+ * @return - Zoneref pointer for the first suitable zone found (see below)
  *
  * This function returns the first zone at or below a given zone index that is
  * within the allowed nodemask. The zoneref returned is a cursor that can be
  * used to iterate the zonelist with next_zones_zonelist by advancing it by
  * one before calling.
+ *
+ * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
+ * never NULL). This may happen either genuinely, or due to concurrent nodemask
+ * update due to cpuset modification.
  */
 static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
                                        enum zone_type highest_zoneidx,
index aacca82..0a3fadc 100644 (file)
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
 extern int watchdog_thresh;
 extern unsigned long watchdog_enabled;
 extern unsigned long *watchdog_cpumask_bits;
+extern atomic_t watchdog_park_in_progress;
 #ifdef CONFIG_SMP
 extern int sysctl_softlockup_all_cpu_backtrace;
 extern int sysctl_hardlockup_all_cpu_backtrace;
index 321f9ed..01f71e1 100644 (file)
@@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
 #error "Unknown RCU implementation specified to kernel configuration"
 #endif
 
+#define RCU_SCHEDULER_INACTIVE 0
+#define RCU_SCHEDULER_INIT     1
+#define RCU_SCHEDULER_RUNNING  2
+
 /*
  * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
  * initialization and destruction of rcu_head on the stack. rcu_head structures
index 0c729c3..d971837 100644 (file)
@@ -194,8 +194,6 @@ struct platform_freeze_ops {
 };
 
 #ifdef CONFIG_SUSPEND
-extern suspend_state_t mem_sleep_default;
-
 /**
  * suspend_set_ops - set platform dependent suspend operations
  * @ops: The new suspend operations to set.
index 958a24d..b567e44 100644 (file)
@@ -352,6 +352,20 @@ static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
        }
 }
 
+static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
+{
+       if (mtu >= 4096)
+               return IB_MTU_4096;
+       else if (mtu >= 2048)
+               return IB_MTU_2048;
+       else if (mtu >= 1024)
+               return IB_MTU_1024;
+       else if (mtu >= 512)
+               return IB_MTU_512;
+       else
+               return IB_MTU_256;
+}
+
 enum ib_port_state {
        IB_PORT_NOP             = 0,
        IB_PORT_DOWN            = 1,
index 96dd0b3..da5033d 100644 (file)
@@ -809,11 +809,11 @@ static inline void fc_set_wwnn(struct fc_lport *lport, u64 wwnn)
 /**
  * fc_set_wwpn() - Set the World Wide Port Name of a local port
  * @lport: The local port whose WWPN is to be set
- * @wwnn:  The new WWPN
+ * @wwpn:  The new WWPN
  */
-static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwnn)
+static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwpn)
 {
-       lport->wwpn = wwnn;
+       lport->wwpn = wwpn;
 }
 
 /**
index 3cbc327..c451eec 100644 (file)
@@ -1665,14 +1665,15 @@ static inline void cec_msg_report_current_latency(struct cec_msg *msg,
                                                  __u8 audio_out_compensated,
                                                  __u8 audio_out_delay)
 {
-       msg->len = 7;
+       msg->len = 6;
        msg->msg[0] |= 0xf; /* broadcast */
        msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
        msg->msg[2] = phys_addr >> 8;
        msg->msg[3] = phys_addr & 0xff;
        msg->msg[4] = video_latency;
        msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
-       msg->msg[6] = audio_out_delay;
+       if (audio_out_compensated == 3)
+               msg->msg[msg->len++] = audio_out_delay;
 }
 
 static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
@@ -1686,7 +1687,10 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
        *video_latency = msg->msg[4];
        *low_latency_mode = (msg->msg[5] >> 2) & 1;
        *audio_out_compensated = msg->msg[5] & 3;
-       *audio_out_delay = msg->msg[6];
+       if (*audio_out_compensated == 3 && msg->len >= 7)
+               *audio_out_delay = msg->msg[6];
+       else
+               *audio_out_delay = 0;
 }
 
 static inline void cec_msg_request_current_latency(struct cec_msg *msg,
index 82bdf56..bb68cb1 100644 (file)
@@ -16,3 +16,4 @@ header-y += nes-abi.h
 header-y += ocrdma-abi.h
 header-y += hns-abi.h
 header-y += vmw_pvrdma-abi.h
+header-y += qedr-abi.h
index 48a19bd..d24eee1 100644 (file)
@@ -30,7 +30,7 @@
  * SOFTWARE.
  */
 #ifndef CXGB3_ABI_USER_H
-#define CXBG3_ABI_USER_H
+#define CXGB3_ABI_USER_H
 
 #include <linux/types.h>
 
index f75c4d0..0a5f630 100644 (file)
@@ -764,7 +764,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
 {
        struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
        int prev_state, ret = 0;
-       bool hasdied = false;
 
        if (num_online_cpus() == 1)
                return -EBUSY;
@@ -809,7 +808,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
                cpuhp_kick_ap_work(cpu);
        }
 
-       hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
 out:
        cpu_hotplug_done();
        return ret;
@@ -1302,10 +1300,24 @@ static int cpuhp_cb_check(enum cpuhp_state state)
  */
 static int cpuhp_reserve_state(enum cpuhp_state state)
 {
-       enum cpuhp_state i;
+       enum cpuhp_state i, end;
+       struct cpuhp_step *step;
 
-       for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
-               if (!cpuhp_ap_states[i].name)
+       switch (state) {
+       case CPUHP_AP_ONLINE_DYN:
+               step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
+               end = CPUHP_AP_ONLINE_DYN_END;
+               break;
+       case CPUHP_BP_PREPARE_DYN:
+               step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
+               end = CPUHP_BP_PREPARE_DYN_END;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       for (i = state; i <= end; i++, step++) {
+               if (!step->name)
                        return i;
        }
        WARN(1, "No more dynamic states available for CPU hotplug\n");
@@ -1323,7 +1335,7 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
 
        mutex_lock(&cpuhp_state_mutex);
 
-       if (state == CPUHP_AP_ONLINE_DYN) {
+       if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
                ret = cpuhp_reserve_state(state);
                if (ret < 0)
                        goto out;
index 5088784..38d4270 100644 (file)
@@ -1145,7 +1145,7 @@ static size_t module_flags_taint(struct module *mod, char *buf)
 
        for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
                if (taint_flags[i].module && test_bit(i, &mod->taints))
-                       buf[l++] = taint_flags[i].true;
+                       buf[l++] = taint_flags[i].c_true;
        }
 
        return l;
index c51edaa..08aa88d 100644 (file)
@@ -249,7 +249,7 @@ void panic(const char *fmt, ...)
                 * Delay timeout seconds before rebooting the machine.
                 * We can't use the "normal" timers since we just panicked.
                 */
-               pr_emerg("Rebooting in %d seconds..", panic_timeout);
+               pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
 
                for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
                        touch_nmi_watchdog();
@@ -355,7 +355,7 @@ const char *print_tainted(void)
                for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
                        const struct taint_flag *t = &taint_flags[i];
                        *s++ = test_bit(i, &tainted_mask) ?
-                                       t->true : t->false;
+                                       t->c_true : t->c_false;
                }
                *s = 0;
        } else
index f67ceb7..15e6bae 100644 (file)
@@ -46,7 +46,7 @@ static const char * const mem_sleep_labels[] = {
 const char *mem_sleep_states[PM_SUSPEND_MAX];
 
 suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE;
-suspend_state_t mem_sleep_default = PM_SUSPEND_MAX;
+static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM;
 
 unsigned int pm_suspend_global_flags;
 EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
@@ -168,7 +168,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
        }
        if (valid_state(PM_SUSPEND_MEM)) {
                mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM];
-               if (mem_sleep_default >= PM_SUSPEND_MEM)
+               if (mem_sleep_default == PM_SUSPEND_MEM)
                        mem_sleep_current = PM_SUSPEND_MEM;
        }
 
index 80adef7..0d6ff3e 100644 (file)
@@ -136,6 +136,7 @@ int rcu_jiffies_till_stall_check(void);
 #define TPS(x)  tracepoint_string(x)
 
 void rcu_early_boot_tests(void);
+void rcu_test_sync_prims(void);
 
 /*
  * This function really isn't for public consumption, but RCU is special in
index 1898559..b23a4d0 100644 (file)
@@ -185,9 +185,6 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
  * benefits of doing might_sleep() to reduce latency.)
  *
  * Cool, huh?  (Due to Josh Triplett.)
- *
- * But we want to make this a static inline later.  The cond_resched()
- * currently makes this problematic.
  */
 void synchronize_sched(void)
 {
@@ -195,7 +192,6 @@ void synchronize_sched(void)
                         lock_is_held(&rcu_lock_map) ||
                         lock_is_held(&rcu_sched_lock_map),
                         "Illegal synchronize_sched() in RCU read-side critical section");
-       cond_resched();
 }
 EXPORT_SYMBOL_GPL(synchronize_sched);
 
index 196f030..c64b827 100644 (file)
@@ -60,12 +60,17 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
 
 /*
  * During boot, we forgive RCU lockdep issues.  After this function is
- * invoked, we start taking RCU lockdep issues seriously.
+ * invoked, we start taking RCU lockdep issues seriously.  Note that unlike
+ * Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE
+ * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
+ * The reason for this is that Tiny RCU does not need kthreads, so does
+ * not have to care about the fact that the scheduler is half-initialized
+ * at a certain phase of the boot process.
  */
 void __init rcu_scheduler_starting(void)
 {
        WARN_ON(nr_context_switches() > 0);
-       rcu_scheduler_active = 1;
+       rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
 }
 
 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
index 96c52e4..cb4e205 100644 (file)
@@ -127,13 +127,16 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
 int sysctl_panic_on_rcu_stall __read_mostly;
 
 /*
- * The rcu_scheduler_active variable transitions from zero to one just
- * before the first task is spawned.  So when this variable is zero, RCU
- * can assume that there is but one task, allowing RCU to (for example)
+ * The rcu_scheduler_active variable is initialized to the value
+ * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
+ * first task is spawned.  So when this variable is RCU_SCHEDULER_INACTIVE,
+ * RCU can assume that there is but one task, allowing RCU to (for example)
  * optimize synchronize_rcu() to a simple barrier().  When this variable
- * is one, RCU must actually do all the hard work required to detect real
- * grace periods.  This variable is also used to suppress boot-time false
- * positives from lockdep-RCU error checking.
+ * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
+ * to detect real grace periods.  This variable is also used to suppress
+ * boot-time false positives from lockdep-RCU error checking.  Finally, it
+ * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
+ * is fully initialized, including all of its kthreads having been spawned.
  */
 int rcu_scheduler_active __read_mostly;
 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
@@ -3980,18 +3983,22 @@ static int __init rcu_spawn_gp_kthread(void)
 early_initcall(rcu_spawn_gp_kthread);
 
 /*
- * This function is invoked towards the end of the scheduler's initialization
- * process.  Before this is called, the idle task might contain
- * RCU read-side critical sections (during which time, this idle
- * task is booting the system).  After this function is called, the
- * idle tasks are prohibited from containing RCU read-side critical
- * sections.  This function also enables RCU lockdep checking.
+ * This function is invoked towards the end of the scheduler's
+ * initialization process.  Before this is called, the idle task might
+ * contain synchronous grace-period primitives (during which time, this idle
+ * task is booting the system, and such primitives are no-ops).  After this
+ * function is called, any synchronous grace-period primitives are run as
+ * expedited, with the requesting task driving the grace period forward.
+ * A later core_initcall() rcu_exp_runtime_mode() will switch to full
+ * runtime RCU functionality.
  */
 void rcu_scheduler_starting(void)
 {
        WARN_ON(num_online_cpus() != 1);
        WARN_ON(nr_context_switches() > 0);
-       rcu_scheduler_active = 1;
+       rcu_test_sync_prims();
+       rcu_scheduler_active = RCU_SCHEDULER_INIT;
+       rcu_test_sync_prims();
 }
 
 /*
index d3053e9..e59e184 100644 (file)
@@ -532,18 +532,28 @@ struct rcu_exp_work {
 };
 
 /*
+ * Common code to drive an expedited grace period forward, used by
+ * workqueues and mid-boot-time tasks.
+ */
+static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
+                                 smp_call_func_t func, unsigned long s)
+{
+       /* Initialize the rcu_node tree in preparation for the wait. */
+       sync_rcu_exp_select_cpus(rsp, func);
+
+       /* Wait and clean up, including waking everyone. */
+       rcu_exp_wait_wake(rsp, s);
+}
+
+/*
  * Work-queue handler to drive an expedited grace period forward.
  */
 static void wait_rcu_exp_gp(struct work_struct *wp)
 {
        struct rcu_exp_work *rewp;
 
-       /* Initialize the rcu_node tree in preparation for the wait. */
        rewp = container_of(wp, struct rcu_exp_work, rew_work);
-       sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func);
-
-       /* Wait and clean up, including waking everyone. */
-       rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s);
+       rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
 }
 
 /*
@@ -569,12 +579,18 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
        if (exp_funnel_lock(rsp, s))
                return;  /* Someone else did our work for us. */
 
-       /* Marshall arguments and schedule the expedited grace period. */
-       rew.rew_func = func;
-       rew.rew_rsp = rsp;
-       rew.rew_s = s;
-       INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
-       schedule_work(&rew.rew_work);
+       /* Ensure that load happens before action based on it. */
+       if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
+               /* Direct call during scheduler init and early_initcalls(). */
+               rcu_exp_sel_wait_wake(rsp, func, s);
+       } else {
+               /* Marshall arguments & schedule the expedited grace period. */
+               rew.rew_func = func;
+               rew.rew_rsp = rsp;
+               rew.rew_s = s;
+               INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
+               schedule_work(&rew.rew_work);
+       }
 
        /* Wait for expedited grace period to complete. */
        rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
@@ -676,6 +692,8 @@ void synchronize_rcu_expedited(void)
 {
        struct rcu_state *rsp = rcu_state_p;
 
+       if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
+               return;
        _synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
@@ -693,3 +711,15 @@ void synchronize_rcu_expedited(void)
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+/*
+ * Switch to run-time mode once Tree RCU has fully initialized.
+ */
+static int __init rcu_exp_runtime_mode(void)
+{
+       rcu_test_sync_prims();
+       rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
+       rcu_test_sync_prims();
+       return 0;
+}
+core_initcall(rcu_exp_runtime_mode);
index 85c5a88..56583e7 100644 (file)
@@ -670,7 +670,7 @@ void synchronize_rcu(void)
                         lock_is_held(&rcu_lock_map) ||
                         lock_is_held(&rcu_sched_lock_map),
                         "Illegal synchronize_rcu() in RCU read-side critical section");
-       if (!rcu_scheduler_active)
+       if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
                return;
        if (rcu_gp_is_expedited())
                synchronize_rcu_expedited();
index f19271d..4f6db7e 100644 (file)
@@ -121,11 +121,14 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
  * Should expedited grace-period primitives always fall back to their
  * non-expedited counterparts?  Intended for use within RCU.  Note
  * that if the user specifies both rcu_expedited and rcu_normal, then
- * rcu_normal wins.
+ * rcu_normal wins.  (Except during the time period during boot from
+ * when the first task is spawned until the rcu_exp_runtime_mode()
+ * core_initcall() is invoked, at which point everything is expedited.)
  */
 bool rcu_gp_is_normal(void)
 {
-       return READ_ONCE(rcu_normal);
+       return READ_ONCE(rcu_normal) &&
+              rcu_scheduler_active != RCU_SCHEDULER_INIT;
 }
 EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
 
@@ -135,13 +138,14 @@ static atomic_t rcu_expedited_nesting =
 /*
  * Should normal grace-period primitives be expedited?  Intended for
  * use within RCU.  Note that this function takes the rcu_expedited
- * sysfs/boot variable into account as well as the rcu_expedite_gp()
- * nesting.  So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
- * returns false is a -really- bad idea.
+ * sysfs/boot variable and rcu_scheduler_active into account as well
+ * as the rcu_expedite_gp() nesting.  So looping on rcu_unexpedite_gp()
+ * until rcu_gp_is_expedited() returns false is a -really- bad idea.
  */
 bool rcu_gp_is_expedited(void)
 {
-       return rcu_expedited || atomic_read(&rcu_expedited_nesting);
+       return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
+              rcu_scheduler_active == RCU_SCHEDULER_INIT;
 }
 EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
 
@@ -257,7 +261,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map);
 
 int notrace debug_lockdep_rcu_enabled(void)
 {
-       return rcu_scheduler_active && debug_locks &&
+       return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
               current->lockdep_recursion == 0;
 }
 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
@@ -591,7 +595,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
 void synchronize_rcu_tasks(void)
 {
        /* Complain if the scheduler has not started.  */
-       RCU_LOCKDEP_WARN(!rcu_scheduler_active,
+       RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
                         "synchronize_rcu_tasks called too soon");
 
        /* Wait for the grace period. */
@@ -813,6 +817,23 @@ static void rcu_spawn_tasks_kthread(void)
 
 #endif /* #ifdef CONFIG_TASKS_RCU */
 
+/*
+ * Test each non-SRCU synchronous grace-period wait API.  This is
+ * useful just after a change in mode for these primitives, and
+ * during early boot.
+ */
+void rcu_test_sync_prims(void)
+{
+       if (!IS_ENABLED(CONFIG_PROVE_RCU))
+               return;
+       synchronize_rcu();
+       synchronize_rcu_bh();
+       synchronize_sched();
+       synchronize_rcu_expedited();
+       synchronize_rcu_bh_expedited();
+       synchronize_sched_expedited();
+}
+
 #ifdef CONFIG_PROVE_RCU
 
 /*
@@ -865,6 +886,7 @@ void rcu_early_boot_tests(void)
                early_boot_test_call_rcu_bh();
        if (rcu_self_test_sched)
                early_boot_test_call_rcu_sched();
+       rcu_test_sync_prims();
 }
 
 static int rcu_verify_early_boot_tests(void)
index 8dbaec0..1aea594 100644 (file)
@@ -2475,6 +2475,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
                                break;
                        if (neg)
                                continue;
+                       val = convmul * val / convdiv;
                        if ((min && val < *min) || (max && val > *max))
                                continue;
                        *i = val;
index 9d20d5d..4bbd38e 100644 (file)
@@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
        struct hlist_head *hashent = ucounts_hashentry(ns, uid);
        struct ucounts *ucounts, *new;
 
-       spin_lock(&ucounts_lock);
+       spin_lock_irq(&ucounts_lock);
        ucounts = find_ucounts(ns, uid, hashent);
        if (!ucounts) {
-               spin_unlock(&ucounts_lock);
+               spin_unlock_irq(&ucounts_lock);
 
                new = kzalloc(sizeof(*new), GFP_KERNEL);
                if (!new)
@@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
                new->uid = uid;
                atomic_set(&new->count, 0);
 
-               spin_lock(&ucounts_lock);
+               spin_lock_irq(&ucounts_lock);
                ucounts = find_ucounts(ns, uid, hashent);
                if (ucounts) {
                        kfree(new);
@@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
        }
        if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
                ucounts = NULL;
-       spin_unlock(&ucounts_lock);
+       spin_unlock_irq(&ucounts_lock);
        return ucounts;
 }
 
 static void put_ucounts(struct ucounts *ucounts)
 {
+       unsigned long flags;
+
        if (atomic_dec_and_test(&ucounts->count)) {
-               spin_lock(&ucounts_lock);
+               spin_lock_irqsave(&ucounts_lock, flags);
                hlist_del_init(&ucounts->node);
-               spin_unlock(&ucounts_lock);
+               spin_unlock_irqrestore(&ucounts_lock, flags);
 
                kfree(ucounts);
        }
index d4b0fa0..63177be 100644 (file)
@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
 #define for_each_watchdog_cpu(cpu) \
        for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
 
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+
 /*
  * The 'watchdog_running' variable is set to 1 when the watchdog threads
  * are registered/started and is set to 0 when the watchdog threads are
@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        int duration;
        int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
 
+       if (atomic_read(&watchdog_park_in_progress) != 0)
+               return HRTIMER_NORESTART;
+
        /* kick the hardlockup detector */
        watchdog_interrupt_count();
 
@@ -467,12 +472,16 @@ static int watchdog_park_threads(void)
 {
        int cpu, ret = 0;
 
+       atomic_set(&watchdog_park_in_progress, 1);
+
        for_each_watchdog_cpu(cpu) {
                ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
                if (ret)
                        break;
        }
 
+       atomic_set(&watchdog_park_in_progress, 0);
+
        return ret;
 }
 
index 84016c8..12b8dd6 100644 (file)
@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
        /* Ensure the watchdog never gets throttled */
        event->hw.interrupts = 0;
 
+       if (atomic_read(&watchdog_park_in_progress) != 0)
+               return;
+
        if (__this_cpu_read(watchdog_nmi_touch) == true) {
                __this_cpu_write(watchdog_nmi_touch, false);
                return;
index 86c8911..a3e14ce 100644 (file)
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr,
 
        return err;
 }
-EXPORT_SYMBOL_GPL(ioremap_page_range);
index 0b92d60..84812a9 100644 (file)
@@ -769,7 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
                        struct radix_tree_node *old = child;
                        offset = child->offset + 1;
                        child = child->parent;
-                       WARN_ON_ONCE(!list_empty(&node->private_list));
+                       WARN_ON_ONCE(!list_empty(&old->private_list));
                        radix_tree_node_free(old);
                        if (old == entry_to_node(node))
                                return;
index 9a6bd6c..5f3ad65 100644 (file)
@@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
+       /*
+        * When we COW a devmap PMD entry, we split it into PTEs, so we should
+        * not be in this function with `flags & FOLL_COW` set.
+        */
+       WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
+
        if (flags & FOLL_WRITE && !pmd_write(*pmd))
                return NULL;
 
@@ -1128,6 +1134,16 @@ out_unlock:
        return ret;
 }
 
+/*
+ * FOLL_FORCE can write to even unwritable pmd's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+{
+       return pmd_write(pmd) ||
+              ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+}
+
 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
                                   unsigned long addr,
                                   pmd_t *pmd,
@@ -1138,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
 
        assert_spin_locked(pmd_lockptr(mm, pmd));
 
-       if (flags & FOLL_WRITE && !pmd_write(*pmd))
+       if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
                goto out;
 
        /* Avoid dumping huge zero page */
index a63a8f8..b822e15 100644 (file)
@@ -4353,9 +4353,9 @@ static int mem_cgroup_do_precharge(unsigned long count)
                return ret;
        }
 
-       /* Try charges one by one with reclaim */
+       /* Try charges one by one with reclaim, but do not retry */
        while (count--) {
-               ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
+               ret = try_charge(mc.to, GFP_KERNEL __GFP_NORETRY, 1);
                if (ret)
                        return ret;
                mc.precharge++;
index e43142c..ca2723d 100644 (file)
@@ -1033,36 +1033,39 @@ static void node_states_set_node(int node, struct memory_notify *arg)
        node_set_state(node, N_MEMORY);
 }
 
-int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
-                  enum zone_type target)
+bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+                  enum zone_type target, int *zone_shift)
 {
        struct zone *zone = page_zone(pfn_to_page(pfn));
        enum zone_type idx = zone_idx(zone);
        int i;
 
+       *zone_shift = 0;
+
        if (idx < target) {
                /* pages must be at end of current zone */
                if (pfn + nr_pages != zone_end_pfn(zone))
-                       return 0;
+                       return false;
 
                /* no zones in use between current zone and target */
                for (i = idx + 1; i < target; i++)
                        if (zone_is_initialized(zone - idx + i))
-                               return 0;
+                               return false;
        }
 
        if (target < idx) {
                /* pages must be at beginning of current zone */
                if (pfn != zone->zone_start_pfn)
-                       return 0;
+                       return false;
 
                /* no zones in use between current zone and target */
                for (i = target + 1; i < idx; i++)
                        if (zone_is_initialized(zone - idx + i))
-                               return 0;
+                               return false;
        }
 
-       return target - idx;
+       *zone_shift = target - idx;
+       return true;
 }
 
 /* Must be protected by mem_hotplug_begin() */
@@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
            !can_online_high_movable(zone))
                return -EINVAL;
 
-       if (online_type == MMOP_ONLINE_KERNEL)
-               zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL);
-       else if (online_type == MMOP_ONLINE_MOVABLE)
-               zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE);
+       if (online_type == MMOP_ONLINE_KERNEL) {
+               if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
+                       return -EINVAL;
+       } else if (online_type == MMOP_ONLINE_MOVABLE) {
+               if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
+                       return -EINVAL;
+       }
 
        zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
        if (!zone)
index 2e34664..1e7873e 100644 (file)
@@ -2017,8 +2017,8 @@ retry_cpuset:
 
        nmask = policy_nodemask(gfp, pol);
        zl = policy_zonelist(gfp, pol, node);
-       mpol_cond_put(pol);
        page = __alloc_pages_nodemask(gfp, order, zl, nmask);
+       mpol_cond_put(pol);
 out:
        if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
                goto retry_cpuset;
index d604d25..f3e0c69 100644 (file)
@@ -3523,12 +3523,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        struct page *page = NULL;
        unsigned int alloc_flags;
        unsigned long did_some_progress;
-       enum compact_priority compact_priority = DEF_COMPACT_PRIORITY;
+       enum compact_priority compact_priority;
        enum compact_result compact_result;
-       int compaction_retries = 0;
-       int no_progress_loops = 0;
+       int compaction_retries;
+       int no_progress_loops;
        unsigned long alloc_start = jiffies;
        unsigned int stall_timeout = 10 * HZ;
+       unsigned int cpuset_mems_cookie;
 
        /*
         * In the slowpath, we sanity check order to avoid ever trying to
@@ -3549,6 +3550,23 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
                gfp_mask &= ~__GFP_ATOMIC;
 
+retry_cpuset:
+       compaction_retries = 0;
+       no_progress_loops = 0;
+       compact_priority = DEF_COMPACT_PRIORITY;
+       cpuset_mems_cookie = read_mems_allowed_begin();
+       /*
+        * We need to recalculate the starting point for the zonelist iterator
+        * because we might have used different nodemask in the fast path, or
+        * there was a cpuset modification and we are retrying - otherwise we
+        * could end up iterating over non-eligible zones endlessly.
+        */
+       ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+                                       ac->high_zoneidx, ac->nodemask);
+       if (!ac->preferred_zoneref->zone)
+               goto nopage;
+
+
        /*
         * The fast path uses conservative alloc_flags to succeed only until
         * kswapd needs to be woken up, and to avoid the cost of setting up
@@ -3708,6 +3726,13 @@ retry:
                                &compaction_retries))
                goto retry;
 
+       /*
+        * It's possible we raced with cpuset update so the OOM would be
+        * premature (see below the nopage: label for full explanation).
+        */
+       if (read_mems_allowed_retry(cpuset_mems_cookie))
+               goto retry_cpuset;
+
        /* Reclaim has failed us, start killing things */
        page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
        if (page)
@@ -3720,6 +3745,16 @@ retry:
        }
 
 nopage:
+       /*
+        * When updating a task's mems_allowed or mempolicy nodemask, it is
+        * possible to race with parallel threads in such a way that our
+        * allocation can fail while the mask is being updated. If we are about
+        * to fail, check if the cpuset changed during allocation and if so,
+        * retry.
+        */
+       if (read_mems_allowed_retry(cpuset_mems_cookie))
+               goto retry_cpuset;
+
        warn_alloc(gfp_mask,
                        "page allocation failure: order:%u", order);
 got_pg:
@@ -3734,7 +3769,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                        struct zonelist *zonelist, nodemask_t *nodemask)
 {
        struct page *page;
-       unsigned int cpuset_mems_cookie;
        unsigned int alloc_flags = ALLOC_WMARK_LOW;
        gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
        struct alloc_context ac = {
@@ -3771,9 +3805,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
                alloc_flags |= ALLOC_CMA;
 
-retry_cpuset:
-       cpuset_mems_cookie = read_mems_allowed_begin();
-
        /* Dirty zone balancing only done in the fast path */
        ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
 
@@ -3784,8 +3815,13 @@ retry_cpuset:
         */
        ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
                                        ac.high_zoneidx, ac.nodemask);
-       if (!ac.preferred_zoneref) {
+       if (!ac.preferred_zoneref->zone) {
                page = NULL;
+               /*
+                * This might be due to race with cpuset_current_mems_allowed
+                * update, so make sure we retry with original nodemask in the
+                * slow path.
+                */
                goto no_zone;
        }
 
@@ -3794,6 +3830,7 @@ retry_cpuset:
        if (likely(page))
                goto out;
 
+no_zone:
        /*
         * Runtime PM, block IO and its error handling path can deadlock
         * because I/O on the device might not complete.
@@ -3805,21 +3842,10 @@ retry_cpuset:
         * Restore the original nodemask if it was potentially replaced with
         * &cpuset_current_mems_allowed to optimize the fast-path attempt.
         */
-       if (cpusets_enabled())
+       if (unlikely(ac.nodemask != nodemask))
                ac.nodemask = nodemask;
-       page = __alloc_pages_slowpath(alloc_mask, order, &ac);
 
-no_zone:
-       /*
-        * When updating a task's mems_allowed, it is possible to race with
-        * parallel threads in such a way that an allocation can fail while
-        * the mask is being updated. If a page allocation is about to fail,
-        * check if the cpuset changed during allocation and if so, retry.
-        */
-       if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
-               alloc_mask = gfp_mask;
-               goto retry_cpuset;
-       }
+       page = __alloc_pages_slowpath(alloc_mask, order, &ac);
 
 out:
        if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
@@ -7248,6 +7274,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
                .zone = page_zone(pfn_to_page(start)),
                .mode = MIGRATE_SYNC,
                .ignore_skip_hint = true,
+               .gfp_mask = GFP_KERNEL,
        };
        INIT_LIST_HEAD(&cc.migratepages);
 
index 067598a..7aa6f43 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s,
        return 1;
 }
 
-static void print_section(char *text, u8 *addr, unsigned int length)
+static void print_section(char *level, char *text, u8 *addr,
+                         unsigned int length)
 {
        metadata_access_enable();
-       print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
+       print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
                        length, 1);
        metadata_access_disable();
 }
@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
               p, p - addr, get_freepointer(s, p));
 
        if (s->flags & SLAB_RED_ZONE)
-               print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+               print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+                             s->red_left_pad);
        else if (p > addr + 16)
-               print_section("Bytes b4 ", p - 16, 16);
+               print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
 
-       print_section("Object ", p, min_t(unsigned long, s->object_size,
-                               PAGE_SIZE));
+       print_section(KERN_ERR, "Object ", p,
+                     min_t(unsigned long, s->object_size, PAGE_SIZE));
        if (s->flags & SLAB_RED_ZONE)
-               print_section("Redzone ", p + s->object_size,
+               print_section(KERN_ERR, "Redzone ", p + s->object_size,
                        s->inuse - s->object_size);
 
        if (s->offset)
@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 
        if (off != size_from_object(s))
                /* Beginning of the filler is the free pointer */
-               print_section("Padding ", p + off, size_from_object(s) - off);
+               print_section(KERN_ERR, "Padding ", p + off,
+                             size_from_object(s) - off);
 
        dump_stack();
 }
@@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
                end--;
 
        slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
-       print_section("Padding ", end - remainder, remainder);
+       print_section(KERN_ERR, "Padding ", end - remainder, remainder);
 
        restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
        return 0;
@@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
                        page->freelist);
 
                if (!alloc)
-                       print_section("Object ", (void *)object,
+                       print_section(KERN_INFO, "Object ", (void *)object,
                                        s->object_size);
 
                dump_stack();
index 3949ce7..292e33b 100644 (file)
@@ -214,7 +214,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
        SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
        struct sg_table sgt;
        struct scatterlist prealloc_sg;
-       char iv[AES_BLOCK_SIZE];
+       char iv[AES_BLOCK_SIZE] __aligned(8);
        int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
        int crypt_len = encrypt ? in_len + pad_byte : in_len;
        int ret;
index 4a57c8a..6a6f44d 100644 (file)
@@ -610,6 +610,33 @@ error:
        return ret ? : -ENOENT;
 }
 
+/* Adjust symbol name and address */
+static int post_process_probe_trace_point(struct probe_trace_point *tp,
+                                          struct map *map, unsigned long offs)
+{
+       struct symbol *sym;
+       u64 addr = tp->address + tp->offset - offs;
+
+       sym = map__find_symbol(map, addr);
+       if (!sym)
+               return -ENOENT;
+
+       if (strcmp(sym->name, tp->symbol)) {
+               /* If we have no realname, use symbol for it */
+               if (!tp->realname)
+                       tp->realname = tp->symbol;
+               else
+                       free(tp->symbol);
+               tp->symbol = strdup(sym->name);
+               if (!tp->symbol)
+                       return -ENOMEM;
+       }
+       tp->offset = addr - sym->start;
+       tp->address -= offs;
+
+       return 0;
+}
+
 /*
  * Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions
  * and generate new symbols with suffixes such as .constprop.N or .isra.N
@@ -622,11 +649,9 @@ static int
 post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
                                        int ntevs, const char *pathname)
 {
-       struct symbol *sym;
        struct map *map;
        unsigned long stext = 0;
-       u64 addr;
-       int i;
+       int i, ret = 0;
 
        /* Prepare a map for offline binary */
        map = dso__new_map(pathname);
@@ -636,23 +661,14 @@ post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
        }
 
        for (i = 0; i < ntevs; i++) {
-               addr = tevs[i].point.address + tevs[i].point.offset - stext;
-               sym = map__find_symbol(map, addr);
-               if (!sym)
-                       continue;
-               if (!strcmp(sym->name, tevs[i].point.symbol))
-                       continue;
-               /* If we have no realname, use symbol for it */
-               if (!tevs[i].point.realname)
-                       tevs[i].point.realname = tevs[i].point.symbol;
-               else
-                       free(tevs[i].point.symbol);
-               tevs[i].point.symbol = strdup(sym->name);
-               tevs[i].point.offset = addr - sym->start;
+               ret = post_process_probe_trace_point(&tevs[i].point,
+                                                    map, stext);
+               if (ret < 0)
+                       break;
        }
        map__put(map);
 
-       return 0;
+       return ret;
 }
 
 static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
@@ -682,18 +698,31 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
        return ret;
 }
 
-static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
-                                           int ntevs, const char *module)
+static int
+post_process_module_probe_trace_events(struct probe_trace_event *tevs,
+                                      int ntevs, const char *module,
+                                      struct debuginfo *dinfo)
 {
+       Dwarf_Addr text_offs = 0;
        int i, ret = 0;
        char *mod_name = NULL;
+       struct map *map;
 
        if (!module)
                return 0;
 
-       mod_name = find_module_name(module);
+       map = get_target_map(module, false);
+       if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) {
+               pr_warning("Failed to get ELF symbols for %s\n", module);
+               return -EINVAL;
+       }
 
+       mod_name = find_module_name(module);
        for (i = 0; i < ntevs; i++) {
+               ret = post_process_probe_trace_point(&tevs[i].point,
+                                               map, (unsigned long)text_offs);
+               if (ret < 0)
+                       break;
                tevs[i].point.module =
                        strdup(mod_name ? mod_name : module);
                if (!tevs[i].point.module) {
@@ -703,6 +732,8 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
        }
 
        free(mod_name);
+       map__put(map);
+
        return ret;
 }
 
@@ -760,7 +791,7 @@ arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unuse
 static int post_process_probe_trace_events(struct perf_probe_event *pev,
                                           struct probe_trace_event *tevs,
                                           int ntevs, const char *module,
-                                          bool uprobe)
+                                          bool uprobe, struct debuginfo *dinfo)
 {
        int ret;
 
@@ -768,7 +799,8 @@ static int post_process_probe_trace_events(struct perf_probe_event *pev,
                ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
        else if (module)
                /* Currently ref_reloc_sym based probe is not for drivers */
-               ret = add_module_to_probe_trace_events(tevs, ntevs, module);
+               ret = post_process_module_probe_trace_events(tevs, ntevs,
+                                                            module, dinfo);
        else
                ret = post_process_kernel_probe_trace_events(tevs, ntevs);
 
@@ -812,30 +844,27 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
                }
        }
 
-       debuginfo__delete(dinfo);
-
        if (ntevs > 0) {        /* Succeeded to find trace events */
                pr_debug("Found %d probe_trace_events.\n", ntevs);
                ret = post_process_probe_trace_events(pev, *tevs, ntevs,
-                                               pev->target, pev->uprobes);
+                                       pev->target, pev->uprobes, dinfo);
                if (ret < 0 || ret == ntevs) {
+                       pr_debug("Post processing failed or all events are skipped. (%d)\n", ret);
                        clear_probe_trace_events(*tevs, ntevs);
                        zfree(tevs);
+                       ntevs = 0;
                }
-               if (ret != ntevs)
-                       return ret < 0 ? ret : ntevs;
-               ntevs = 0;
-               /* Fall through */
        }
 
+       debuginfo__delete(dinfo);
+
        if (ntevs == 0) {       /* No error but failed to find probe point. */
                pr_warning("Probe point '%s' not found.\n",
                           synthesize_perf_probe_point(&pev->point));
                return -ENOENT;
-       }
-       /* Error path : ntevs < 0 */
-       pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
-       if (ntevs < 0) {
+       } else if (ntevs < 0) {
+               /* Error path : ntevs < 0 */
+               pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
                if (ntevs == -EBADF)
                        pr_warning("Warning: No dwarf info found in the vmlinux - "
                                "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
index df4debe..0d9d6e0 100644 (file)
@@ -1501,7 +1501,8 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
 }
 
 /* For the kernel module, we need a special code to get a DIE */
-static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+                               bool adjust_offset)
 {
        int n, i;
        Elf32_Word shndx;
@@ -1530,6 +1531,8 @@ static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
                        if (!shdr)
                                return -ENOENT;
                        *offs = shdr->sh_addr;
+                       if (adjust_offset)
+                               *offs -= shdr->sh_offset;
                }
        }
        return 0;
@@ -1543,16 +1546,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
        Dwarf_Addr _addr = 0, baseaddr = 0;
        const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
        int baseline = 0, lineno = 0, ret = 0;
-       bool reloc = false;
 
-retry:
+       /* We always need to relocate the address for aranges */
+       if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0)
+               addr += baseaddr;
        /* Find cu die */
        if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
-               if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
-                       addr += baseaddr;
-                       reloc = true;
-                       goto retry;
-               }
                pr_warning("Failed to find debug information for address %lx\n",
                           addr);
                ret = -EINVAL;
index f1d8558..2956c51 100644 (file)
@@ -46,6 +46,9 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
 int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
                                struct perf_probe_point *ppt);
 
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+                              bool adjust_offset);
+
 /* Find a line range */
 int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr);
 
index c22860a..30e1ac6 100644 (file)
@@ -66,7 +66,7 @@ int pmc56_overflow(void)
 
        FAIL_IF(ebb_event_enable(&event));
 
-       mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
+       mtspr(SPRN_PMC2, pmc_sample_period(sample_period));
        mtspr(SPRN_PMC5, 0);
        mtspr(SPRN_PMC6, 0);
 
index 34e63cc..14142fa 100644 (file)
@@ -26,6 +26,16 @@ static inline void wait_cycles(unsigned long long cycles)
 #define VMEXIT_CYCLES 500
 #define VMENTRY_CYCLES 500
 
+#elif defined(__s390x__)
+static inline void wait_cycles(unsigned long long cycles)
+{
+       asm volatile("0: brctg %0,0b" : : "d" (cycles));
+}
+
+/* tweak me */
+#define VMEXIT_CYCLES 200
+#define VMENTRY_CYCLES 200
+
 #else
 static inline void wait_cycles(unsigned long long cycles)
 {
@@ -81,6 +91,8 @@ extern unsigned ring_size;
 /* Is there a portable way to do this? */
 #if defined(__x86_64__) || defined(__i386__)
 #define cpu_relax() asm ("rep; nop" ::: "memory")
+#elif defined(__s390x__)
+#define cpu_relax() barrier()
 #else
 #define cpu_relax() assert(0)
 #endif
index 2e69ca8..29b0d39 100755 (executable)
@@ -1,12 +1,13 @@
 #!/bin/sh
 
+CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#')
 #use last CPU for host. Why not the first?
 #many devices tend to use cpu0 by default so
 #it tends to be busier
-HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
+HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1)
 
 #run command on all cpus
-for cpu in $(seq 0 $HOST_AFFINITY)
+for cpu in $CPUS_ONLINE
 do
        #Don't run guest and host on same CPU
        #It actually works ok if using signalling
index a2dbbcc..6a084cd 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <clocksource/arm_arch_timer.h>
 #include <asm/arch_timer.h>
+#include <asm/kvm_hyp.h>
 
 #include <kvm/arm_vgic.h>
 #include <kvm/arm_arch_timer.h>
@@ -89,9 +90,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
        struct kvm_vcpu *vcpu;
 
        vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
-       vcpu->arch.timer_cpu.armed = false;
-
-       WARN_ON(!kvm_timer_should_fire(vcpu));
 
        /*
         * If the vcpu is blocked we want to wake it up so that it will see
@@ -512,3 +510,25 @@ void kvm_timer_init(struct kvm *kvm)
 {
        kvm->arch.timer.cntvoff = kvm_phys_timer_read();
 }
+
+/*
+ * On VHE system, we only need to configure trap on physical timer and counter
+ * accesses in EL0 and EL1 once, not for every world switch.
+ * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
+ * and this makes those bits have no effect for the host kernel execution.
+ */
+void kvm_timer_init_vhe(void)
+{
+       /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
+       u32 cnthctl_shift = 10;
+       u64 val;
+
+       /*
+        * Disallow physical timer access for the guest.
+        * Physical counter access is allowed.
+        */
+       val = read_sysreg(cnthctl_el2);
+       val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
+       val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
+       write_sysreg(val, cnthctl_el2);
+}
index 798866a..63e28dd 100644 (file)
@@ -35,10 +35,16 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
        /* Disable the virtual timer */
        write_sysreg_el0(0, cntv_ctl);
 
-       /* Allow physical timer/counter access for the host */
-       val = read_sysreg(cnthctl_el2);
-       val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
-       write_sysreg(val, cnthctl_el2);
+       /*
+        * We don't need to do this for VHE since the host kernel runs in EL2
+        * with HCR_EL2.TGE ==1, which makes those bits have no impact.
+        */
+       if (!has_vhe()) {
+               /* Allow physical timer/counter access for the host */
+               val = read_sysreg(cnthctl_el2);
+               val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+               write_sysreg(val, cnthctl_el2);
+       }
 
        /* Clear cntvoff for the host */
        write_sysreg(0, cntvoff_el2);
@@ -50,14 +56,17 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
        u64 val;
 
-       /*
-        * Disallow physical timer access for the guest
-        * Physical counter access is allowed
-        */
-       val = read_sysreg(cnthctl_el2);
-       val &= ~CNTHCTL_EL1PCEN;
-       val |= CNTHCTL_EL1PCTEN;
-       write_sysreg(val, cnthctl_el2);
+       /* Those bits are already configured at boot on VHE-system */
+       if (!has_vhe()) {
+               /*
+                * Disallow physical timer access for the guest
+                * Physical counter access is allowed
+                */
+               val = read_sysreg(cnthctl_el2);
+               val &= ~CNTHCTL_EL1PCEN;
+               val |= CNTHCTL_EL1PCTEN;
+               write_sysreg(val, cnthctl_el2);
+       }
 
        if (timer->enabled) {
                write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
index 5114391..c737ea0 100644 (file)
@@ -268,15 +268,11 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
 {
        struct vgic_dist *dist = &kvm->arch.vgic;
 
-       mutex_lock(&kvm->lock);
-
        dist->ready = false;
        dist->initialized = false;
 
        kfree(dist->spis);
        dist->nr_spis = 0;
-
-       mutex_unlock(&kvm->lock);
 }
 
 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -286,7 +282,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
        INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
 }
 
-void kvm_vgic_destroy(struct kvm *kvm)
+/* To be called with kvm->lock held */
+static void __kvm_vgic_destroy(struct kvm *kvm)
 {
        struct kvm_vcpu *vcpu;
        int i;
@@ -297,6 +294,13 @@ void kvm_vgic_destroy(struct kvm *kvm)
                kvm_vgic_vcpu_destroy(vcpu);
 }
 
+void kvm_vgic_destroy(struct kvm *kvm)
+{
+       mutex_lock(&kvm->lock);
+       __kvm_vgic_destroy(kvm);
+       mutex_unlock(&kvm->lock);
+}
+
 /**
  * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
  * is a GICv2. A GICv3 must be explicitly initialized by the guest using the
@@ -348,6 +352,10 @@ int kvm_vgic_map_resources(struct kvm *kvm)
                ret = vgic_v2_map_resources(kvm);
        else
                ret = vgic_v3_map_resources(kvm);
+
+       if (ret)
+               __kvm_vgic_destroy(kvm);
+
 out:
        mutex_unlock(&kvm->lock);
        return ret;
index 9bab867..834137e 100644 (file)
@@ -293,8 +293,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
        dist->ready = true;
 
 out:
-       if (ret)
-               kvm_vgic_destroy(kvm);
        return ret;
 }
 
index 5c9f974..e6b03fd 100644 (file)
@@ -302,8 +302,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
        dist->ready = true;
 
 out:
-       if (ret)
-               kvm_vgic_destroy(kvm);
        return ret;
 }