Merge branches 'x86/mm', 'x86/build', 'x86/apic' and 'x86/platform' into x86/core...
authorIngo Molnar <mingo@kernel.org>
Wed, 3 Jun 2015 08:05:18 +0000 (10:05 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 3 Jun 2015 08:05:18 +0000 (10:05 +0200)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
439 files changed:
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/devicetree/bindings/clock/silabs,si5351.txt
Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt [moved from Documentation/devicetree/bindings/mtd/m25p80.txt with 85% similarity]
Documentation/devicetree/bindings/net/cdns-emac.txt
Documentation/hwmon/tmp401
Documentation/target/tcmu-design.txt
Documentation/virtual/kvm/mmu.txt
Documentation/x86/mtrr.txt
Documentation/x86/pat.txt
MAINTAINERS
Makefile
arch/alpha/boot/Makefile
arch/alpha/boot/main.c
arch/alpha/boot/stdio.c [new file with mode: 0644]
arch/alpha/boot/tools/objstrip.c
arch/alpha/include/asm/types.h
arch/alpha/include/asm/unistd.h
arch/alpha/include/uapi/asm/unistd.h
arch/alpha/kernel/err_ev6.c
arch/alpha/kernel/irq.c
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/process.c
arch/alpha/kernel/smp.c
arch/alpha/kernel/srmcons.c
arch/alpha/kernel/sys_marvel.c
arch/alpha/kernel/systbls.S
arch/alpha/kernel/traps.c
arch/alpha/oprofile/op_model_ev4.c
arch/alpha/oprofile/op_model_ev5.c
arch/alpha/oprofile/op_model_ev6.c
arch/alpha/oprofile/op_model_ev67.c
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-boneblack.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/exynos4412-trats2.dts
arch/arm/boot/dts/imx27.dtsi
arch/arm/boot/dts/omap3-devkit8000.dts
arch/arm/boot/dts/zynq-7000.dtsi
arch/arm/configs/multi_v7_defconfig
arch/arm/kernel/entry-common.S
arch/arm/kernel/perf_event_cpu.c
arch/arm/mach-imx/gpc.c
arch/arm/mach-pxa/pxa_cplds_irqs.c
arch/arm/mm/mmu.c
arch/arm/xen/enlighten.c
arch/ia64/pci/pci.c
arch/mips/ath79/prom.c
arch/mips/configs/fuloong2e_defconfig
arch/mips/kernel/irq.c
arch/mips/kernel/smp-bmips.c
arch/mips/lib/strnlen_user.S
arch/powerpc/kernel/mce.c
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/pgtable_64.c
arch/s390/crypto/ghash_s390.c
arch/s390/crypto/prng.c
arch/s390/include/asm/pgtable.h
arch/s390/net/bpf_jit_comp.c
arch/sparc/include/asm/cpudata_64.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/topology_64.h
arch/sparc/include/asm/trap_block.h
arch/sparc/kernel/entry.h
arch/sparc/kernel/leon_pci_grpci2.c
arch/sparc/kernel/mdesc.c
arch/sparc/kernel/pci.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/mm/init_64.c
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/include/asm/io.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mtrr.h
arch/x86/include/asm/pat.h
arch/x86/include/asm/special_insns.h
arch/x86/include/uapi/asm/msr-index.h
arch/x86/include/uapi/asm/mtrr.h
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mtrr/cleanup.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/mtrr/main.c
arch/x86/kernel/cpu/mtrr/mtrr.h
arch/x86/kernel/i387.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/iomap_32.c
arch/x86/mm/ioremap.c
arch/x86/mm/pageattr.c
arch/x86/mm/pat.c
arch/x86/mm/pat_internal.h
arch/x86/mm/pat_rbtree.c
arch/x86/mm/pgtable.c
arch/x86/net/bpf_jit_comp.c
arch/x86/pci/acpi.c
arch/x86/pci/i386.c
arch/x86/platform/Makefile
arch/x86/platform/atom/Makefile [new file with mode: 0644]
arch/x86/platform/atom/punit_atom_debug.c [new file with mode: 0644]
arch/xtensa/include/asm/dma-mapping.h
block/blk-core.c
crypto/Kconfig
crypto/algif_aead.c
drivers/block/nvme-scsi.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/bus/mips_cdmm.c
drivers/clk/clk-si5351.c
drivers/clk/clk.c
drivers/clk/qcom/gcc-msm8916.c
drivers/clk/samsung/Makefile
drivers/clk/samsung/clk-exynos5420.c
drivers/clk/samsung/clk-exynos5433.c
drivers/gpio/gpio-kempld.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_plane_helper.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_dp_core.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_crtc.h
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_fimd.h [deleted file]
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/dsi/dsi.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/edp/edp_aux.c
drivers/gpu/drm/msm/edp/edp_connector.c
drivers/gpu/drm/msm/edp/edp_ctrl.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_ringbuffer.c
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon_audio.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_dp_auxch.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/vgem/Makefile
drivers/gpu/drm/vgem/vgem_dma_buf.c [deleted file]
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/vgem/vgem_drv.h
drivers/hid/hid-ids.h
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-sensor-hub.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_wac.c
drivers/hwmon/nct6683.c
drivers/hwmon/nct6775.c
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/tmp401.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/hw/ocrdma/ocrdma.h
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/input/joydev.c
drivers/input/mouse/Kconfig
drivers/input/mouse/alps.c
drivers/input/mouse/elantech.c
drivers/input/touchscreen/stmpe-ts.c
drivers/input/touchscreen/sx8654.c
drivers/irqchip/irq-gic-v3-its.c
drivers/lguest/core.c
drivers/md/bitmap.c
drivers/md/dm-mpath.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid0.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/mfd/da9052-core.c
drivers/mmc/host/atmel-mci.c
drivers/mtd/devices/m25p80.c
drivers/mtd/tests/readtest.c
drivers/net/bonding/bond_options.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/brocade/bna/cna_fwimg.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/core.h
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/phy/amd-xgbe-phy.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/dp83640.c
drivers/net/phy/phy.c
drivers/net/usb/cdc_ncm.c
drivers/net/vxlan.c
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/meson/pinctrl-meson.c
drivers/pinctrl/meson/pinctrl-meson8b.c
drivers/platform/x86/thinkpad_acpi.c
drivers/pwm/pwm-img.c
drivers/regulator/da9052-regulator.c
drivers/s390/crypto/ap_bus.c
drivers/scsi/be2iscsi/be.h
drivers/scsi/be2iscsi/be_cmds.c
drivers/scsi/be2iscsi/be_cmds.h
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_iscsi.h
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_main.h
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/be2iscsi/be_mgmt.h
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/sd.c
drivers/scsi/storvsc_drv.c
drivers/ssb/driver_pcicore.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/target_core_alua.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_internal.h
drivers/target/target_core_pr.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_pscsi.h
drivers/target/target_core_rd.c
drivers/target/target_core_sbc.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/target/target_core_xcopy.c
drivers/thermal/armada_thermal.c
drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
drivers/thermal/ti-soc-thermal/omap5-thermal-data.c
drivers/thermal/ti-soc-thermal/ti-bandgap.c
drivers/thermal/ti-soc-thermal/ti-bandgap.h
drivers/tty/hvc/hvc_xen.c
drivers/tty/mips_ejtag_fdc.c
drivers/vhost/scsi.c
drivers/video/backlight/pwm_bl.c
drivers/xen/events/events_base.c
fs/binfmt_elf.c
fs/btrfs/backref.c
fs/btrfs/extent-tree.c
fs/btrfs/volumes.c
fs/cifs/cifs_dfs_ref.c
fs/cifs/cifs_unicode.c
fs/cifs/cifsfs.c
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/link.c
fs/cifs/readdir.c
fs/cifs/smb1ops.c
fs/cifs/smb2pdu.c
fs/dcache.c
fs/nfs/nfs4proc.c
fs/nfs/write.c
fs/omfs/bitmap.c
fs/omfs/inode.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/super.c
fs/xfs/libxfs/xfs_attr_leaf.c
fs/xfs/libxfs/xfs_attr_leaf.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/xfs_attr_inactive.c
fs/xfs/xfs_file.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_mount.c
include/asm-generic/io.h
include/linux/blkdev.h
include/linux/brcmphy.h
include/linux/cpumask.h
include/linux/hid-sensor-hub.h
include/linux/io.h
include/linux/ktime.h
include/linux/percpu_counter.h
include/linux/platform_data/si5351.h
include/linux/rhashtable.h
include/linux/skbuff.h
include/linux/tcp.h
include/net/inet_connection_sock.h
include/net/mac80211.h
include/net/sctp/sctp.h
include/target/target_core_backend.h
include/target/target_core_configfs.h
include/target/target_core_fabric.h
include/trace/events/kmem.h
include/uapi/linux/netfilter/nf_conntrack_tcp.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/virtio_balloon.h
include/xen/events.h
kernel/module.c
kernel/sched/core.c
kernel/time/hrtimer.c
kernel/watchdog.c
lib/cpumask.c
lib/percpu_counter.c
lib/rhashtable.c
net/8021q/vlan.c
net/bluetooth/hci_core.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_stp_timer.c
net/caif/caif_socket.c
net/ceph/osd_client.c
net/core/rtnetlink.c
net/dsa/dsa.c
net/ipv4/esp4.c
net/ipv4/fib_trie.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_minisocks.c
net/ipv4/udp.c
net/ipv6/esp6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_output.c
net/ipv6/ip6_vti.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/key.h
net/mac80211/rx.c
net/mac80211/util.c
net/mac80211/wep.c
net/netfilter/Kconfig
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netlink/af_netlink.c
net/sched/cls_api.c
net/sched/sch_api.c
net/switchdev/switchdev.c
net/unix/af_unix.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_replay.c
net/xfrm/xfrm_state.c
scripts/gdb/linux/modules.py
sound/atmel/ac97c.c
sound/core/pcm_lib.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/hda/thinkpad_helper.c
sound/soc/codecs/mc13783.c
sound/soc/codecs/uda1380.c
sound/soc/codecs/wm8960.c
sound/soc/codecs/wm8994.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/soc-dapm.c
sound/usb/quirks.c
tools/net/bpf_jit_disasm.c
tools/power/x86/turbostat/turbostat.c

index 99983e6..da95513 100644 (file)
@@ -162,7 +162,7 @@ Description:        Discover CPUs in the same CPU frequency coordination domain
 What:          /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1}
 Date:          August 2008
 KernelVersion: 2.6.27
-Contact:       discuss@x86-64.org
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:   Disable L3 cache indices
 
                These files exist in every CPU's cache/index3 directory. Each
index c40711e..28b2830 100644 (file)
@@ -17,7 +17,8 @@ Required properties:
 - #clock-cells: from common clock binding; shall be set to 1.
 - clocks: from common clock binding; list of parent clock
   handles, shall be xtal reference clock or xtal and clkin for
-  si5351c only.
+  si5351c only. Corresponding clock input names are "xtal" and
+  "clkin" respectively.
 - #address-cells: shall be set to 1.
 - #size-cells: shall be set to 0.
 
@@ -71,6 +72,7 @@ i2c-master-node {
 
                /* connect xtal input to 25MHz reference */
                clocks = <&ref25>;
+               clock-names = "xtal";
 
                /* connect xtal input as source of pll0 and pll1 */
                silabs,pll-source = <0 0>, <1 0>;
@@ -8,8 +8,8 @@ Required properties:
                is not Linux-only, but in case of Linux, see the "m25p_ids"
                table in drivers/mtd/devices/m25p80.c for the list of supported
                chips.
-               Must also include "nor-jedec" for any SPI NOR flash that can be
-               identified by the JEDEC READ ID opcode (0x9F).
+               Must also include "jedec,spi-nor" for any SPI NOR flash that can
+               be identified by the JEDEC READ ID opcode (0x9F).
 - reg : Chip-Select number
 - spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at
 
@@ -25,7 +25,7 @@ Example:
        flash: m25p80@0 {
                #address-cells = <1>;
                #size-cells = <1>;
-               compatible = "spansion,m25p80", "nor-jedec";
+               compatible = "spansion,m25p80", "jedec,spi-nor";
                reg = <0>;
                spi-max-frequency = <40000000>;
                m25p,fast-read;
index abd67c1..4451ee9 100644 (file)
@@ -3,7 +3,8 @@
 Required properties:
 - compatible: Should be "cdns,[<chip>-]{emac}"
   Use "cdns,at91rm9200-emac" Atmel at91rm9200 SoC.
-  or the generic form: "cdns,emac".
+  Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
+  Or the generic form: "cdns,emac".
 - reg: Address and length of the register set for the device
 - interrupts: Should contain macb interrupt
 - phy-mode: see ethernet.txt file in the same directory.
index 8eb88e9..711f75e 100644 (file)
@@ -20,7 +20,7 @@ Supported chips:
     Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html
   * Texas Instruments TMP435
     Prefix: 'tmp435'
-    Addresses scanned: I2C 0x37, 0x48 - 0x4f
+    Addresses scanned: I2C 0x48 - 0x4f
     Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html
 
 Authors:
index 43e94ea..263b907 100644 (file)
@@ -15,8 +15,7 @@ Contents:
   a) Discovering and configuring TCMU uio devices
   b) Waiting for events on the device(s)
   c) Managing the command ring
-3) Command filtering and pass_level
-4) A final note
+3) A final note
 
 
 TCM Userspace Design
@@ -324,7 +323,7 @@ int handle_device_events(int fd, void *map)
   /* Process events from cmd ring until we catch up with cmd_head */
   while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) {
 
-    if (tcmu_hdr_get_op(&ent->hdr) == TCMU_OP_CMD) {
+    if (tcmu_hdr_get_op(ent->hdr.len_op) == TCMU_OP_CMD) {
       uint8_t *cdb = (void *)mb + ent->req.cdb_off;
       bool success = true;
 
@@ -339,8 +338,12 @@ int handle_device_events(int fd, void *map)
         ent->rsp.scsi_status = SCSI_CHECK_CONDITION;
       }
     }
+    else if (tcmu_hdr_get_op(ent->hdr.len_op) != TCMU_OP_PAD) {
+      /* Tell the kernel we didn't handle unknown opcodes */
+      ent->hdr.uflags |= TCMU_UFLAG_UNKNOWN_OP;
+    }
     else {
-      /* Do nothing for PAD entries */
+      /* Do nothing for PAD entries except update cmd_tail */
     }
 
     /* update cmd_tail */
@@ -360,28 +363,6 @@ int handle_device_events(int fd, void *map)
 }
 
 
-Command filtering and pass_level
---------------------------------
-
-TCMU supports a "pass_level" option with valid values of 0 or 1.  When
-the value is 0 (the default), nearly all SCSI commands received for
-the device are passed through to the handler. This allows maximum
-flexibility but increases the amount of code required by the handler,
-to support all mandatory SCSI commands. If pass_level is set to 1,
-then only IO-related commands are presented, and the rest are handled
-by LIO's in-kernel command emulation. The commands presented at level
-1 include all versions of:
-
-READ
-WRITE
-WRITE_VERIFY
-XDWRITEREAD
-WRITE_SAME
-COMPARE_AND_WRITE
-SYNCHRONIZE_CACHE
-UNMAP
-
-
 A final note
 ------------
 
index 53838d9..c59bd9b 100644 (file)
@@ -169,6 +169,10 @@ Shadow pages contain the following information:
     Contains the value of cr4.smep && !cr0.wp for which the page is valid
     (pages for which this is true are different from other pages; see the
     treatment of cr0.wp=0 below).
+  role.smap_andnot_wp:
+    Contains the value of cr4.smap && !cr0.wp for which the page is valid
+    (pages for which this is true are different from other pages; see the
+    treatment of cr0.wp=0 below).
   gfn:
     Either the guest page table containing the translations shadowed by this
     page, or the base page frame for linear translations.  See role.direct.
@@ -344,10 +348,16 @@ on fault type:
 
 (user write faults generate a #PF)
 
-In the first case there is an additional complication if CR4.SMEP is
-enabled: since we've turned the page into a kernel page, the kernel may now
-execute it.  We handle this by also setting spte.nx.  If we get a user
-fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back.
+In the first case there are two additional complications:
+- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
+  the kernel may now execute it.  We handle this by also setting spte.nx.
+  If we get a user fetch or read fault, we'll change spte.u=1 and
+  spte.nx=gpte.nx back.
+- if CR4.SMAP is disabled: since the page has been changed to a kernel
+  page, it can not be reused when CR4.SMAP is enabled. We set
+  CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
+  here we do not care the case that CR4.SMAP is enabled since KVM will
+  directly inject #PF to guest due to failed permission check.
 
 To prevent an spte that was converted into a kernel page with cr0.wp=0
 from being written by the kernel after cr0.wp has changed to 1, we make
index cc071dc..860bc3a 100644 (file)
@@ -1,7 +1,19 @@
 MTRR (Memory Type Range Register) control
-3 Jun 1999
-Richard Gooch
-<rgooch@atnf.csiro.au>
+
+Richard Gooch <rgooch@atnf.csiro.au> - 3 Jun 1999
+Luis R. Rodriguez <mcgrof@do-not-panic.com> - April 9, 2015
+
+===============================================================================
+Phasing out MTRR use
+
+MTRR use is replaced on modern x86 hardware with PAT. Over time the only type
+of effective MTRR that is expected to be supported will be for write-combining.
+As MTRR use is phased out device drivers should use arch_phys_wc_add() to make
+MTRR effective on non-PAT systems while a no-op on PAT enabled systems.
+
+For details refer to Documentation/x86/pat.txt.
+
+===============================================================================
 
   On Intel P6 family processors (Pentium Pro, Pentium II and later)
   the Memory Type Range Registers (MTRRs) may be used to control
index cf08c9f..521bd8a 100644 (file)
@@ -34,6 +34,8 @@ ioremap                |    --    |    UC-     |       UC-        |
                        |          |            |                  |
 ioremap_cache          |    --    |    WB      |       WB         |
                        |          |            |                  |
+ioremap_uc             |    --    |    UC      |       UC         |
+                       |          |            |                  |
 ioremap_nocache        |    --    |    UC-     |       UC-        |
                        |          |            |                  |
 ioremap_wc             |    --    |    --      |       WC         |
@@ -102,7 +104,38 @@ wants to export a RAM region, it has to do set_memory_uc() or set_memory_wc()
 as step 0 above and also track the usage of those pages and use set_memory_wb()
 before the page is freed to free pool.
 
-
+MTRR effects on PAT / non-PAT systems
+-------------------------------------
+
+The following table provides the effects of using write-combining MTRRs when
+using ioremap*() calls on x86 for both non-PAT and PAT systems. Ideally
+mtrr_add() usage will be phased out in favor of arch_phys_wc_add() which will
+be a no-op on PAT enabled systems. The region over which a arch_phys_wc_add()
+is made, should already have been ioremapped with WC attributes or PAT entries,
+this can be done by using ioremap_wc() / set_memory_wc().  Devices which
+combine areas of IO memory desired to remain uncacheable with areas where
+write-combining is desirable should consider use of ioremap_uc() followed by
+set_memory_wc() to white-list effective write-combined areas.  Such use is
+nevertheless discouraged as the effective memory type is considered
+implementation defined, yet this strategy can be used as last resort on devices
+with size-constrained regions where otherwise MTRR write-combining would
+otherwise not be effective.
+
+----------------------------------------------------------------------
+MTRR Non-PAT   PAT    Linux ioremap value        Effective memory type
+----------------------------------------------------------------------
+                                                  Non-PAT |  PAT
+     PAT
+     |PCD
+     ||PWT
+     |||
+WC   000      WB      _PAGE_CACHE_MODE_WB            WC   |   WC
+WC   001      WC      _PAGE_CACHE_MODE_WC            WC*  |   WC
+WC   010      UC-     _PAGE_CACHE_MODE_UC_MINUS      WC*  |   UC
+WC   011      UC      _PAGE_CACHE_MODE_UC            UC   |   UC
+----------------------------------------------------------------------
+
+(*) denotes implementation defined and is discouraged
 
 Notes:
 
index f8e0afb..e308718 100644 (file)
@@ -2427,7 +2427,6 @@ L:        linux-security-module@vger.kernel.org
 S:     Supported
 F:     include/linux/capability.h
 F:     include/uapi/linux/capability.h
-F:     security/capability.c
 F:     security/commoncap.c
 F:     kernel/capability.c
 
@@ -3825,10 +3824,11 @@ M:      David Woodhouse <dwmw2@infradead.org>
 L:     linux-embedded@vger.kernel.org
 S:     Maintained
 
-EMULEX LPFC FC SCSI DRIVER
-M:     James Smart <james.smart@emulex.com>
+EMULEX/AVAGO LPFC FC/FCOE SCSI DRIVER
+M:     James Smart <james.smart@avagotech.com>
+M:     Dick Kennedy <dick.kennedy@avagotech.com>
 L:     linux-scsi@vger.kernel.org
-W:     http://sourceforge.net/projects/lpfcxxxx
+W:     http://www.avagotech.com
 S:     Supported
 F:     drivers/scsi/lpfc/
 
@@ -4536,7 +4536,7 @@ M:        Jean Delvare <jdelvare@suse.de>
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     lm-sensors@lm-sensors.org
 W:     http://www.lm-sensors.org/
-T:     quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
+T:     quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
 S:     Maintained
 F:     Documentation/hwmon/
@@ -8829,9 +8829,11 @@ F:       drivers/misc/phantom.c
 F:     include/uapi/linux/phantom.h
 
 SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
-M:     Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
+M:     Jayamohan Kallickal <jayamohan.kallickal@avagotech.com>
+M:     Minh Tran <minh.tran@avagotech.com>
+M:     John Soni Jose <sony.john-n@avagotech.com>
 L:     linux-scsi@vger.kernel.org
-W:     http://www.emulex.com
+W:     http://www.avagotech.com
 S:     Supported
 F:     drivers/scsi/be2iscsi/
 
@@ -10585,8 +10587,7 @@ F:      drivers/virtio/virtio_input.c
 F:     include/uapi/linux/virtio_input.h
 
 VIA RHINE NETWORK DRIVER
-M:     Roger Luethi <rl@hellgate.ch>
-S:     Maintained
+S:     Orphan
 F:     drivers/net/ethernet/via/via-rhine.c
 
 VIA SD/MMC CARD CONTROLLER DRIVER
index eae539d..aee7e5c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc6
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
index cd14388..8399bd0 100644 (file)
@@ -14,6 +14,9 @@ targets               := vmlinux.gz vmlinux \
                   tools/bootpzh bootloader bootpheader bootpzheader 
 OBJSTRIP       := $(obj)/tools/objstrip
 
+HOSTCFLAGS     := -Wall -I$(objtree)/usr/include
+BOOTCFLAGS     += -I$(obj) -I$(srctree)/$(obj)
+
 # SRM bootable image.  Copy to offset 512 of a partition.
 $(obj)/bootimage: $(addprefix $(obj)/tools/,mkbb lxboot bootlx) $(obj)/vmlinux.nh
        ( cat $(obj)/tools/lxboot $(obj)/tools/bootlx $(obj)/vmlinux.nh ) > $@ 
@@ -96,13 +99,14 @@ $(obj)/tools/bootph: $(obj)/bootpheader $(OBJSTRIP) FORCE
 $(obj)/tools/bootpzh: $(obj)/bootpzheader $(OBJSTRIP) FORCE
        $(call if_changed,objstrip)
 
-LDFLAGS_bootloader   := -static -uvsprintf -T  #-N -relax
-LDFLAGS_bootpheader  := -static -uvsprintf -T  #-N -relax
-LDFLAGS_bootpzheader := -static -uvsprintf -T  #-N -relax
+LDFLAGS_bootloader   := -static -T # -N -relax
+LDFLAGS_bootloader   := -static -T # -N -relax
+LDFLAGS_bootpheader  := -static -T # -N -relax
+LDFLAGS_bootpzheader := -static -T # -N -relax
 
-OBJ_bootlx   := $(obj)/head.o $(obj)/main.o
-OBJ_bootph   := $(obj)/head.o $(obj)/bootp.o
-OBJ_bootpzh  := $(obj)/head.o $(obj)/bootpz.o $(obj)/misc.o
+OBJ_bootlx   := $(obj)/head.o $(obj)/stdio.o $(obj)/main.o
+OBJ_bootph   := $(obj)/head.o $(obj)/stdio.o $(obj)/bootp.o
+OBJ_bootpzh  := $(obj)/head.o $(obj)/stdio.o $(obj)/bootpz.o $(obj)/misc.o
 
 $(obj)/bootloader: $(obj)/bootloader.lds $(OBJ_bootlx) $(LIBS_Y) FORCE
        $(call if_changed,ld)
index 3baf2d1..dd6eb4a 100644 (file)
@@ -19,7 +19,6 @@
 
 #include "ksize.h"
 
-extern int vsprintf(char *, const char *, va_list);
 extern unsigned long switch_to_osf_pal(unsigned long nr,
        struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
        unsigned long *vptb);
diff --git a/arch/alpha/boot/stdio.c b/arch/alpha/boot/stdio.c
new file mode 100644 (file)
index 0000000..f844dae
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+
+size_t strnlen(const char * s, size_t count)
+{
+       const char *sc;
+
+       for (sc = s; count-- && *sc != '\0'; ++sc)
+               /* nothing */;
+       return sc - s;
+}
+
+# define do_div(n, base) ({                                            \
+       unsigned int __base = (base);                                   \
+       unsigned int __rem;                                             \
+       __rem = ((unsigned long long)(n)) % __base;                     \
+       (n) = ((unsigned long long)(n)) / __base;                       \
+       __rem;                                                          \
+})
+
+
+static int skip_atoi(const char **s)
+{
+       int i, c;
+
+       for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s)
+               i = i*10 + c - '0';
+       return i;
+}
+
+#define ZEROPAD        1               /* pad with zero */
+#define SIGN   2               /* unsigned/signed long */
+#define PLUS   4               /* show plus */
+#define SPACE  8               /* space if plus */
+#define LEFT   16              /* left justified */
+#define SPECIAL        32              /* 0x */
+#define LARGE  64              /* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * str, unsigned long long num, int base, int size, int precision, int type)
+{
+       char c,sign,tmp[66];
+       const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
+       int i;
+
+       if (type & LARGE)
+               digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+       if (type & LEFT)
+               type &= ~ZEROPAD;
+       if (base < 2 || base > 36)
+               return 0;
+       c = (type & ZEROPAD) ? '0' : ' ';
+       sign = 0;
+       if (type & SIGN) {
+               if ((signed long long)num < 0) {
+                       sign = '-';
+                       num = - (signed long long)num;
+                       size--;
+               } else if (type & PLUS) {
+                       sign = '+';
+                       size--;
+               } else if (type & SPACE) {
+                       sign = ' ';
+                       size--;
+               }
+       }
+       if (type & SPECIAL) {
+               if (base == 16)
+                       size -= 2;
+               else if (base == 8)
+                       size--;
+       }
+       i = 0;
+       if (num == 0)
+               tmp[i++]='0';
+       else while (num != 0) {
+               tmp[i++] = digits[do_div(num, base)];
+       }
+       if (i > precision)
+               precision = i;
+       size -= precision;
+       if (!(type&(ZEROPAD+LEFT)))
+               while(size-->0)
+                       *str++ = ' ';
+       if (sign)
+               *str++ = sign;
+       if (type & SPECIAL) {
+               if (base==8)
+                       *str++ = '0';
+               else if (base==16) {
+                       *str++ = '0';
+                       *str++ = digits[33];
+               }
+       }
+       if (!(type & LEFT))
+               while (size-- > 0)
+                       *str++ = c;
+       while (i < precision--)
+               *str++ = '0';
+       while (i-- > 0)
+               *str++ = tmp[i];
+       while (size-- > 0)
+               *str++ = ' ';
+       return str;
+}
+
+int vsprintf(char *buf, const char *fmt, va_list args)
+{
+       int len;
+       unsigned long long num;
+       int i, base;
+       char * str;
+       const char *s;
+
+       int flags;              /* flags to number() */
+
+       int field_width;        /* width of output field */
+       int precision;          /* min. # of digits for integers; max
+                                  number of chars for from string */
+       int qualifier;          /* 'h', 'l', or 'L' for integer fields */
+                               /* 'z' support added 23/7/1999 S.H.    */
+                               /* 'z' changed to 'Z' --davidm 1/25/99 */
+
+
+       for (str=buf ; *fmt ; ++fmt) {
+               if (*fmt != '%') {
+                       *str++ = *fmt;
+                       continue;
+               }
+
+               /* process flags */
+               flags = 0;
+               repeat:
+                       ++fmt;          /* this also skips first '%' */
+                       switch (*fmt) {
+                               case '-': flags |= LEFT; goto repeat;
+                               case '+': flags |= PLUS; goto repeat;
+                               case ' ': flags |= SPACE; goto repeat;
+                               case '#': flags |= SPECIAL; goto repeat;
+                               case '0': flags |= ZEROPAD; goto repeat;
+                               }
+
+               /* get field width */
+               field_width = -1;
+               if ('0' <= *fmt && *fmt <= '9')
+                       field_width = skip_atoi(&fmt);
+               else if (*fmt == '*') {
+                       ++fmt;
+                       /* it's the next argument */
+                       field_width = va_arg(args, int);
+                       if (field_width < 0) {
+                               field_width = -field_width;
+                               flags |= LEFT;
+                       }
+               }
+
+               /* get the precision */
+               precision = -1;
+               if (*fmt == '.') {
+                       ++fmt;
+                       if ('0' <= *fmt && *fmt <= '9')
+                               precision = skip_atoi(&fmt);
+                       else if (*fmt == '*') {
+                               ++fmt;
+                               /* it's the next argument */
+                               precision = va_arg(args, int);
+                       }
+                       if (precision < 0)
+                               precision = 0;
+               }
+
+               /* get the conversion qualifier */
+               qualifier = -1;
+               if (*fmt == 'l' && *(fmt + 1) == 'l') {
+                       qualifier = 'q';
+                       fmt += 2;
+               } else if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L'
+                       || *fmt == 'Z') {
+                       qualifier = *fmt;
+                       ++fmt;
+               }
+
+               /* default base */
+               base = 10;
+
+               switch (*fmt) {
+               case 'c':
+                       if (!(flags & LEFT))
+                               while (--field_width > 0)
+                                       *str++ = ' ';
+                       *str++ = (unsigned char) va_arg(args, int);
+                       while (--field_width > 0)
+                               *str++ = ' ';
+                       continue;
+
+               case 's':
+                       s = va_arg(args, char *);
+                       if (!s)
+                               s = "<NULL>";
+
+                       len = strnlen(s, precision);
+
+                       if (!(flags & LEFT))
+                               while (len < field_width--)
+                                       *str++ = ' ';
+                       for (i = 0; i < len; ++i)
+                               *str++ = *s++;
+                       while (len < field_width--)
+                               *str++ = ' ';
+                       continue;
+
+               case 'p':
+                       if (field_width == -1) {
+                               field_width = 2*sizeof(void *);
+                               flags |= ZEROPAD;
+                       }
+                       str = number(str,
+                               (unsigned long) va_arg(args, void *), 16,
+                               field_width, precision, flags);
+                       continue;
+
+
+               case 'n':
+                       if (qualifier == 'l') {
+                               long * ip = va_arg(args, long *);
+                               *ip = (str - buf);
+                       } else if (qualifier == 'Z') {
+                               size_t * ip = va_arg(args, size_t *);
+                               *ip = (str - buf);
+                       } else {
+                               int * ip = va_arg(args, int *);
+                               *ip = (str - buf);
+                       }
+                       continue;
+
+               case '%':
+                       *str++ = '%';
+                       continue;
+
+               /* integer number formats - set up the flags and "break" */
+               case 'o':
+                       base = 8;
+                       break;
+
+               case 'X':
+                       flags |= LARGE;
+               case 'x':
+                       base = 16;
+                       break;
+
+               case 'd':
+               case 'i':
+                       flags |= SIGN;
+               case 'u':
+                       break;
+
+               default:
+                       *str++ = '%';
+                       if (*fmt)
+                               *str++ = *fmt;
+                       else
+                               --fmt;
+                       continue;
+               }
+               if (qualifier == 'l') {
+                       num = va_arg(args, unsigned long);
+                       if (flags & SIGN)
+                               num = (signed long) num;
+               } else if (qualifier == 'q') {
+                       num = va_arg(args, unsigned long long);
+                       if (flags & SIGN)
+                               num = (signed long long) num;
+               } else if (qualifier == 'Z') {
+                       num = va_arg(args, size_t);
+               } else if (qualifier == 'h') {
+                       num = (unsigned short) va_arg(args, int);
+                       if (flags & SIGN)
+                               num = (signed short) num;
+               } else {
+                       num = va_arg(args, unsigned int);
+                       if (flags & SIGN)
+                               num = (signed int) num;
+               }
+               str = number(str, num, base, field_width, precision, flags);
+       }
+       *str = '\0';
+       return str-buf;
+}
+
+int sprintf(char * buf, const char *fmt, ...)
+{
+       va_list args;
+       int i;
+
+       va_start(args, fmt);
+       i=vsprintf(buf,fmt,args);
+       va_end(args);
+       return i;
+}
index 367d53d..dee8269 100644 (file)
@@ -27,6 +27,9 @@
 #include <linux/param.h>
 #ifdef __ELF__
 # include <linux/elf.h>
+# define elfhdr elf64_hdr
+# define elf_phdr elf64_phdr
+# define elf_check_arch(x) ((x)->e_machine == EM_ALPHA)
 #endif
 
 /* bootfile size must be multiple of BLOCK_SIZE: */
index f61e1a5..4cb4b6d 100644 (file)
@@ -2,6 +2,5 @@
 #define _ALPHA_TYPES_H
 
 #include <asm-generic/int-ll64.h>
-#include <uapi/asm/types.h>
 
 #endif /* _ALPHA_TYPES_H */
index c509d30..a56e608 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <uapi/asm/unistd.h>
 
-#define NR_SYSCALLS                    511
+#define NR_SYSCALLS                    514
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
index d214a03..aa33bf5 100644 (file)
 #define __NR_sched_setattr             508
 #define __NR_sched_getattr             509
 #define __NR_renameat2                 510
+#define __NR_getrandom                 511
+#define __NR_memfd_create              512
+#define __NR_execveat                  513
 
 #endif /* _UAPI_ALPHA_UNISTD_H */
index 253cf1a..51267ac 100644 (file)
@@ -6,7 +6,6 @@
  *     Error handling code supporting Alpha systems
  */
 
-#include <linux/init.h>
 #include <linux/sched.h>
 
 #include <asm/io.h>
index 7b2be25..51f2c86 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/ptrace.h>
 #include <linux/interrupt.h>
 #include <linux/random.h>
-#include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
index e51f578..36dc91a 100644 (file)
@@ -1019,14 +1019,13 @@ SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
        if (tv) {
                if (get_tv32((struct timeval *)&kts, tv))
                        return -EFAULT;
+               kts.tv_nsec *= 1000;
        }
        if (tz) {
                if (copy_from_user(&ktz, tz, sizeof(*tz)))
                        return -EFAULT;
        }
 
-       kts.tv_nsec *= 1000;
-
        return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
 }
 
index 1941a07..84d1326 100644 (file)
@@ -236,12 +236,11 @@ release_thread(struct task_struct *dead_task)
 }
 
 /*
- * Copy an alpha thread..
+ * Copy architecture-specific thread state
  */
-
 int
 copy_thread(unsigned long clone_flags, unsigned long usp,
-           unsigned long arg,
+           unsigned long kthread_arg,
            struct task_struct *p)
 {
        extern void ret_from_fork(void);
@@ -262,7 +261,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
                        sizeof(struct switch_stack) + sizeof(struct pt_regs));
                childstack->r26 = (unsigned long) ret_from_kernel_thread;
                childstack->r9 = usp;   /* function */
-               childstack->r10 = arg;
+               childstack->r10 = kthread_arg;
                childregs->hae = alpha_mv.hae_cache,
                childti->pcb.usp = 0;
                return 0;
index 99ac36d..2f24447 100644 (file)
@@ -63,7 +63,6 @@ static struct {
 enum ipi_message_type {
        IPI_RESCHEDULE,
        IPI_CALL_FUNC,
-       IPI_CALL_FUNC_SINGLE,
        IPI_CPU_STOP,
 };
 
@@ -506,7 +505,6 @@ setup_profiling_timer(unsigned int multiplier)
        return -EINVAL;
 }
 
-\f
 static void
 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
 {
@@ -552,10 +550,6 @@ handle_ipi(struct pt_regs *regs)
                        generic_smp_call_function_interrupt();
                        break;
 
-               case IPI_CALL_FUNC_SINGLE:
-                       generic_smp_call_function_single_interrupt();
-                       break;
-
                case IPI_CPU_STOP:
                        halt();
 
@@ -606,7 +600,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 
 void arch_send_call_function_single_ipi(int cpu)
 {
-       send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+       send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
 }
 
 static void
index 6f01d9a..72b5951 100644 (file)
@@ -237,8 +237,7 @@ srmcons_init(void)
 
        return -ENODEV;
 }
-
-module_init(srmcons_init);
+device_initcall(srmcons_init);
 
 \f
 /*
index f21d61f..24e41bd 100644 (file)
@@ -331,7 +331,7 @@ marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin)
        pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
        irq = intline;
 
-       msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI);
+       msi_loc = dev->msi_cap;
        msg_ctl = 0;
        if (msi_loc) 
                pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl);
index 2478971..9b62e3f 100644 (file)
@@ -529,6 +529,9 @@ sys_call_table:
        .quad sys_sched_setattr
        .quad sys_sched_getattr
        .quad sys_renameat2                     /* 510 */
+       .quad sys_getrandom
+       .quad sys_memfd_create
+       .quad sys_execveat
 
        .size sys_call_table, . - sys_call_table
        .type sys_call_table, @object
index 9c4c189..74aceea 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/tty.h>
 #include <linux/delay.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/kallsyms.h>
 #include <linux/ratelimit.h>
 
index 18aa9b4..086a0d5 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/oprofile.h>
-#include <linux/init.h>
 #include <linux/smp.h>
 #include <asm/ptrace.h>
 
index c32f8a0..c300f5e 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/oprofile.h>
-#include <linux/init.h>
 #include <linux/smp.h>
 #include <asm/ptrace.h>
 
index 1c84cc2..02edf59 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/oprofile.h>
-#include <linux/init.h>
 #include <linux/smp.h>
 #include <asm/ptrace.h>
 
index 34a57a1..adb1744 100644 (file)
@@ -9,7 +9,6 @@
  */
 
 #include <linux/oprofile.h>
-#include <linux/init.h>
 #include <linux/smp.h>
 #include <asm/ptrace.h>
 
index 86217db..992736b 100644 (file)
@@ -223,7 +223,7 @@ dtb-$(CONFIG_SOC_IMX25) += \
        imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \
        imx25-karo-tx25.dtb \
        imx25-pdk.dtb
-dtb-$(CONFIG_SOC_IMX31) += \
+dtb-$(CONFIG_SOC_IMX27) += \
        imx27-apf27.dtb \
        imx27-apf27dev.dtb \
        imx27-eukrea-mbimxsd27-baseboard.dtb \
index 5c42d25..901739f 100644 (file)
@@ -80,7 +80,3 @@
                status = "okay";
        };
 };
-
-&rtc {
-       system-power-controller;
-};
index 87fc7a3..156d05e 100644 (file)
        wlcore: wlcore@2 {
                compatible = "ti,wl1271";
                reg = <2>;
-               interrupt-parent = <&gpio1>;
+               interrupt-parent = <&gpio0>;
                interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; /* gpio 31 */
                ref-clock-frequency = <38400000>;
        };
index 173ffa4..792394d 100644 (file)
 
                        display-timings {
                                timing-0 {
-                                       clock-frequency = <0>;
+                                       clock-frequency = <57153600>;
                                        hactive = <720>;
                                        vactive = <1280>;
                                        hfront-porch = <5>;
index 6951b66..bc215e4 100644 (file)
 
                        fec: ethernet@1002b000 {
                                compatible = "fsl,imx27-fec";
-                               reg = <0x1002b000 0x4000>;
+                               reg = <0x1002b000 0x1000>;
                                interrupts = <50>;
                                clocks = <&clks IMX27_CLK_FEC_IPG_GATE>,
                                         <&clks IMX27_CLK_FEC_AHB_GATE>;
index 134d3f2..921de66 100644 (file)
        nand@0,0 {
                reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
                nand-bus-width = <16>;
+               gpmc,device-width = <2>;
+               ti,nand-ecc-opt = "sw";
 
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
index a5cd2ed..9ea54b3 100644 (file)
                };
 
                gem0: ethernet@e000b000 {
-                       compatible = "cdns,gem";
+                       compatible = "cdns,zynq-gem";
                        reg = <0xe000b000 0x1000>;
                        status = "disabled";
                        interrupts = <0 22 4>;
                };
 
                gem1: ethernet@e000c000 {
-                       compatible = "cdns,gem";
+                       compatible = "cdns,zynq-gem";
                        reg = <0xe000c000 0x1000>;
                        status = "disabled";
                        interrupts = <0 45 4>;
index 0ca4a3e..fbbb191 100644 (file)
@@ -429,7 +429,7 @@ CONFIG_USB_EHCI_EXYNOS=y
 CONFIG_USB_EHCI_TEGRA=y
 CONFIG_USB_EHCI_HCD_STI=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_ISP1760=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_STI=y
 CONFIG_USB_OHCI_HCD_PLATFORM=y
index f8ccc21..4e7f40c 100644 (file)
@@ -33,7 +33,9 @@ ret_fast_syscall:
  UNWIND(.fnstart       )
  UNWIND(.cantunwind    )
        disable_irq                             @ disable interrupts
-       ldr     r1, [tsk, #TI_FLAGS]
+       ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
+       tst     r1, #_TIF_SYSCALL_WORK
+       bne     __sys_trace_return
        tst     r1, #_TIF_WORK_MASK
        bne     fast_work_pending
        asm_trace_hardirqs_on
index 213919b..3b8c283 100644 (file)
@@ -304,16 +304,17 @@ static int probe_current_pmu(struct arm_pmu *pmu)
 static int of_pmu_irq_cfg(struct platform_device *pdev)
 {
        int i, irq;
-       int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
-
-       if (!irqs)
-               return -ENOMEM;
+       int *irqs;
 
        /* Don't bother with PPIs; they're already affine */
        irq = platform_get_irq(pdev, 0);
        if (irq >= 0 && irq_is_percpu(irq))
                return 0;
 
+       irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+       if (!irqs)
+               return -ENOMEM;
+
        for (i = 0; i < pdev->num_resources; ++i) {
                struct device_node *dn;
                int cpu;
index 4d60005..6d0893a 100644 (file)
@@ -280,9 +280,15 @@ void __init imx_gpc_check_dt(void)
        struct device_node *np;
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc");
-       if (WARN_ON(!np ||
-                   !of_find_property(np, "interrupt-controller", NULL)))
-               pr_warn("Outdated DT detected, system is about to crash!!!\n");
+       if (WARN_ON(!np))
+               return;
+
+       if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
+               pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
+
+               /* map GPC, so that at least CPUidle and WARs keep working */
+               gpc_base = of_iomap(np, 0);
+       }
 }
 
 #ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -443,6 +449,10 @@ static int imx_gpc_probe(struct platform_device *pdev)
        struct regulator *pu_reg;
        int ret;
 
+       /* bail out if DT too old and doesn't provide the necessary info */
+       if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells"))
+               return 0;
+
        pu_reg = devm_regulator_get_optional(&pdev->dev, "pu");
        if (PTR_ERR(pu_reg) == -ENODEV)
                pu_reg = NULL;
index f1aeb54..2385052 100644 (file)
@@ -107,7 +107,7 @@ static int cplds_probe(struct platform_device *pdev)
        struct resource *res;
        struct cplds *fpga;
        int ret;
-       unsigned int base_irq = 0;
+       int base_irq;
        unsigned long irqflags = 0;
 
        fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL);
index 4e6ef89..7186382 100644 (file)
@@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void)
                        }
 
                        /*
-                        * Find the first non-section-aligned page, and point
+                        * Find the first non-pmd-aligned page, and point
                         * memblock_limit at it. This relies on rounding the
-                        * limit down to be section-aligned, which happens at
-                        * the end of this function.
+                        * limit down to be pmd-aligned, which happens at the
+                        * end of this function.
                         *
                         * With this algorithm, the start or end of almost any
-                        * bank can be non-section-aligned. The only exception
-                        * is that the start of the bank 0 must be section-
+                        * bank can be non-pmd-aligned. The only exception is
+                        * that the start of the bank 0 must be section-
                         * aligned, since otherwise memory would need to be
                         * allocated when mapping the start of bank 0, which
                         * occurs before any free memory is mapped.
                         */
                        if (!memblock_limit) {
-                               if (!IS_ALIGNED(block_start, SECTION_SIZE))
+                               if (!IS_ALIGNED(block_start, PMD_SIZE))
                                        memblock_limit = block_start;
-                               else if (!IS_ALIGNED(block_end, SECTION_SIZE))
+                               else if (!IS_ALIGNED(block_end, PMD_SIZE))
                                        memblock_limit = arm_lowmem_limit;
                        }
 
@@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void)
        high_memory = __va(arm_lowmem_limit - 1) + 1;
 
        /*
-        * Round the memblock limit down to a section size.  This
+        * Round the memblock limit down to a pmd size.  This
         * helps to ensure that we will allocate memory from the
-        * last full section, which should be mapped.
+        * last full pmd, which should be mapped.
         */
        if (memblock_limit)
-               memblock_limit = round_down(memblock_limit, SECTION_SIZE);
+               memblock_limit = round_down(memblock_limit, PMD_SIZE);
        if (!memblock_limit)
                memblock_limit = arm_lowmem_limit;
 
index 224081c..7d0f070 100644 (file)
@@ -272,6 +272,7 @@ void xen_arch_pre_suspend(void) { }
 void xen_arch_post_suspend(int suspend_cancelled) { }
 void xen_timer_resume(void) { }
 void xen_arch_resume(void) { }
+void xen_arch_suspend(void) { }
 
 
 /* In the hypervisor.S file. */
index d4e162d..7cc3be9 100644 (file)
@@ -478,9 +478,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 
 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 {
-       struct pci_controller *controller = bridge->bus->sysdata;
-
-       ACPI_COMPANION_SET(&bridge->dev, controller->companion);
+       /*
+        * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
+        * here, pci_create_root_bus() has been called by someone else and
+        * sysdata is likely to be different from what we expect.  Let it go in
+        * that case.
+        */
+       if (!bridge->dev.parent) {
+               struct pci_controller *controller = bridge->bus->sysdata;
+               ACPI_COMPANION_SET(&bridge->dev, controller->companion);
+       }
        return 0;
 }
 
index e1fe630..597899a 100644 (file)
@@ -1,6 +1,7 @@
 /*
  *  Atheros AR71XX/AR724X/AR913X specific prom routines
  *
+ *  Copyright (C) 2015 Laurent Fasnacht <l@libres.ch>
  *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
@@ -25,12 +26,14 @@ void __init prom_init(void)
 {
        fw_init_cmdline();
 
+#ifdef CONFIG_BLK_DEV_INITRD
        /* Read the initrd address from the firmware environment */
        initrd_start = fw_getenvl("initrd_start");
        if (initrd_start) {
                initrd_start = KSEG0ADDR(initrd_start);
                initrd_end = initrd_start + fw_getenvl("initrd_size");
        }
+#endif
 }
 
 void __init prom_free_prom_memory(void)
index 0026806..b2a577e 100644 (file)
@@ -194,7 +194,7 @@ CONFIG_USB_WUSB_CBAF=m
 CONFIG_USB_C67X00_HCD=m
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_ISP1760_HCD=m
+CONFIG_USB_ISP1760=m
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_UHCI_HCD=m
 CONFIG_USB_R8A66597_HCD=m
index d2bfbc2..51f57d8 100644 (file)
@@ -29,7 +29,7 @@
 int kgdb_early_setup;
 #endif
 
-static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
+static DECLARE_BITMAP(irq_map, NR_IRQS);
 
 int allocate_irqno(void)
 {
index fd528d7..336708a 100644 (file)
@@ -444,7 +444,7 @@ struct plat_smp_ops bmips5000_smp_ops = {
 static void bmips_wr_vec(unsigned long dst, char *start, char *end)
 {
        memcpy((void *)dst, start, end - start);
-       dma_cache_wback((unsigned long)start, end - start);
+       dma_cache_wback(dst, end - start);
        local_flush_icache_range(dst, dst + (end - start));
        instruction_hazard();
 }
index 7d12c0d..77e6494 100644 (file)
@@ -34,7 +34,12 @@ LEAF(__strnlen_\func\()_asm)
 FEXPORT(__strnlen_\func\()_nocheck_asm)
        move            v0, a0
        PTR_ADDU        a1, a0                  # stop pointer
-1:     beq             v0, a1, 1f              # limit reached?
+1:
+#ifdef CONFIG_CPU_DADDI_WORKAROUNDS
+       .set            noat
+       li              AT, 1
+#endif
+       beq             v0, a1, 1f              # limit reached?
 .ifeqs "\func", "kernel"
        EX(lb, t0, (v0), .Lfault\@)
 .else
@@ -42,7 +47,13 @@ FEXPORT(__strnlen_\func\()_nocheck_asm)
 .endif
        .set            noreorder
        bnez            t0, 1b
-1:      PTR_ADDIU      v0, 1
+1:
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+        PTR_ADDIU      v0, 1
+#else
+        PTR_ADDU       v0, AT
+       .set            at
+#endif
        .set            reorder
        PTR_SUBU        v0, a0
        jr              ra
index 15c99b6..b2eb468 100644 (file)
@@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
                    uint64_t nip, uint64_t addr)
 {
        uint64_t srr1;
-       int index = __this_cpu_inc_return(mce_nest_count);
+       int index = __this_cpu_inc_return(mce_nest_count) - 1;
        struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
 
        /*
@@ -184,7 +184,7 @@ void machine_check_queue_event(void)
        if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
                return;
 
-       index = __this_cpu_inc_return(mce_queue_count);
+       index = __this_cpu_inc_return(mce_queue_count) - 1;
        /* If queue is full, just return for now. */
        if (index >= MAX_MC_EVT) {
                __this_cpu_dec(mce_queue_count);
index f096e72..1db6851 100644 (file)
@@ -213,6 +213,7 @@ SECTIONS
                *(.opd)
        }
 
+       . = ALIGN(256);
        .got : AT(ADDR(.got) - LOAD_OFFSET) {
                __toc_start = .;
 #ifndef CONFIG_RELOCATABLE
index 48d3c5d..df81caa 100644 (file)
@@ -1952,7 +1952,7 @@ static void post_guest_process(struct kvmppc_vcore *vc)
  */
 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 {
-       struct kvm_vcpu *vcpu;
+       struct kvm_vcpu *vcpu, *vnext;
        int i;
        int srcu_idx;
 
@@ -1982,7 +1982,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
         */
        if ((threads_per_core > 1) &&
            ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
-               list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
+               list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
+                                        arch.run_list) {
                        vcpu->arch.ret = -EBUSY;
                        kvmppc_remove_runnable(vc, vcpu);
                        wake_up(&vcpu->arch.cpu_run);
index 0ce968b..3385e3d 100644 (file)
@@ -689,27 +689,34 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 struct page *
 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
 {
-       pte_t *ptep;
-       struct page *page;
+       pte_t *ptep, pte;
        unsigned shift;
        unsigned long mask, flags;
+       struct page *page = ERR_PTR(-EINVAL);
+
+       local_irq_save(flags);
+       ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
+       if (!ptep)
+               goto no_page;
+       pte = READ_ONCE(*ptep);
        /*
+        * Verify it is a huge page else bail.
         * Transparent hugepages are handled by generic code. We can skip them
         * here.
         */
-       local_irq_save(flags);
-       ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
+       if (!shift || pmd_trans_huge(__pmd(pte_val(pte))))
+               goto no_page;
 
-       /* Verify it is a huge page else bail. */
-       if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) {
-               local_irq_restore(flags);
-               return ERR_PTR(-EINVAL);
+       if (!pte_present(pte)) {
+               page = NULL;
+               goto no_page;
        }
        mask = (1UL << shift) - 1;
-       page = pte_page(*ptep);
+       page = pte_page(pte);
        if (page)
                page += (address & mask) / PAGE_SIZE;
 
+no_page:
        local_irq_restore(flags);
        return page;
 }
index 59daa5e..6bfadf1 100644 (file)
@@ -839,6 +839,17 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
         * hash fault look at them.
         */
        memset(pgtable, 0, PTE_FRAG_SIZE);
+       /*
+        * Serialize against find_linux_pte_or_hugepte which does lock-less
+        * lookup in page tables with local interrupts disabled. For huge pages
+        * it casts pmd_t to pte_t. Since format of pte_t is different from
+        * pmd_t we want to prevent transit from pmd pointing to page table
+        * to pmd pointing to huge page (and back) while interrupts are disabled.
+        * We clear pmd to possibly replace it with page table pointer in
+        * different code paths. So make sure we wait for the parallel
+        * find_linux_pte_or_hugepage to finish.
+        */
+       kick_all_cpus_sync();
        return old_pmd;
 }
 
index 7940dc9..b258110 100644 (file)
 #define GHASH_DIGEST_SIZE      16
 
 struct ghash_ctx {
-       u8 icv[16];
-       u8 key[16];
+       u8 key[GHASH_BLOCK_SIZE];
 };
 
 struct ghash_desc_ctx {
+       u8 icv[GHASH_BLOCK_SIZE];
+       u8 key[GHASH_BLOCK_SIZE];
        u8 buffer[GHASH_BLOCK_SIZE];
        u32 bytes;
 };
@@ -28,8 +29,10 @@ struct ghash_desc_ctx {
 static int ghash_init(struct shash_desc *desc)
 {
        struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+       struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
 
        memset(dctx, 0, sizeof(*dctx));
+       memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
 
        return 0;
 }
@@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
        }
 
        memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
-       memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
 
        return 0;
 }
@@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
                         const u8 *src, unsigned int srclen)
 {
        struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-       struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
        unsigned int n;
        u8 *buf = dctx->buffer;
        int ret;
@@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
                src += n;
 
                if (!dctx->bytes) {
-                       ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
+                       ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
                                              GHASH_BLOCK_SIZE);
                        if (ret != GHASH_BLOCK_SIZE)
                                return -EIO;
@@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
 
        n = srclen & ~(GHASH_BLOCK_SIZE - 1);
        if (n) {
-               ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
+               ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
                if (ret != n)
                        return -EIO;
                src += n;
@@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
        return 0;
 }
 
-static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+static int ghash_flush(struct ghash_desc_ctx *dctx)
 {
        u8 *buf = dctx->buffer;
        int ret;
@@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
 
                memset(pos, 0, dctx->bytes);
 
-               ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
+               ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
                if (ret != GHASH_BLOCK_SIZE)
                        return -EIO;
+
+               dctx->bytes = 0;
        }
 
-       dctx->bytes = 0;
        return 0;
 }
 
 static int ghash_final(struct shash_desc *desc, u8 *dst)
 {
        struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-       struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
        int ret;
 
-       ret = ghash_flush(ctx, dctx);
+       ret = ghash_flush(dctx);
        if (!ret)
-               memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
+               memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
        return ret;
 }
 
index 1f374b3..9d5192c 100644 (file)
@@ -125,7 +125,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
                /* fill page with urandom bytes */
                get_random_bytes(pg, PAGE_SIZE);
                /* exor page with stckf values */
-               for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) {
+               for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
                        u64 *p = ((u64 *)pg) + n;
                        *p ^= get_tod_clock_fast();
                }
index fc64239..ef24a21 100644 (file)
@@ -494,7 +494,7 @@ static inline int pmd_large(pmd_t pmd)
        return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
 }
 
-static inline int pmd_pfn(pmd_t pmd)
+static inline unsigned long pmd_pfn(pmd_t pmd)
 {
        unsigned long origin_mask;
 
index 7690dc8..20c146d 100644 (file)
@@ -443,8 +443,11 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
 
 /*
  * Compile one eBPF instruction into s390x code
+ *
+ * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
+ * stack space for the large switch statement.
  */
-static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
+static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
 {
        struct bpf_insn *insn = &fp->insnsi[i];
        int jmp_off, last, insn_count = 1;
@@ -588,8 +591,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
                EMIT4(0xb9160000, dst_reg, rc_reg);
                break;
        }
-       case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / (u32) src */
-       case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % (u32) src */
+       case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
+       case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
        {
                int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 
@@ -602,10 +605,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
                EMIT4_IMM(0xa7090000, REG_W0, 0);
                /* lgr %w1,%dst */
                EMIT4(0xb9040000, REG_W1, dst_reg);
-               /* llgfr %dst,%src (u32 cast) */
-               EMIT4(0xb9160000, dst_reg, src_reg);
                /* dlgr %w0,%dst */
-               EMIT4(0xb9870000, REG_W0, dst_reg);
+               EMIT4(0xb9870000, REG_W0, src_reg);
                /* lgr %dst,%rc */
                EMIT4(0xb9040000, dst_reg, rc_reg);
                break;
@@ -632,8 +633,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
                EMIT4(0xb9160000, dst_reg, rc_reg);
                break;
        }
-       case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / (u32) imm */
-       case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % (u32) imm */
+       case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
+       case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
        {
                int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 
@@ -649,7 +650,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
                EMIT4(0xb9040000, REG_W1, dst_reg);
                /* dlg %w0,<d(imm)>(%l) */
                EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
-                             EMIT_CONST_U64((u32) imm));
+                             EMIT_CONST_U64(imm));
                /* lgr %dst,%rc */
                EMIT4(0xb9040000, dst_reg, rc_reg);
                break;
index a6e424d..a6cfdab 100644 (file)
@@ -24,7 +24,8 @@ typedef struct {
        unsigned int    icache_line_size;
        unsigned int    ecache_size;
        unsigned int    ecache_line_size;
-       int             core_id;
+       unsigned short  sock_id;
+       unsigned short  core_id;
        int             proc_id;
 } cpuinfo_sparc;
 
index dc165eb..2a52c91 100644 (file)
@@ -308,12 +308,26 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
        "       sllx            %1, 32, %1\n"
        "       or              %0, %1, %0\n"
        "       .previous\n"
+       "       .section        .sun_m7_2insn_patch, \"ax\"\n"
+       "       .word           661b\n"
+       "       sethi           %%uhi(%4), %1\n"
+       "       sethi           %%hi(%4), %0\n"
+       "       .word           662b\n"
+       "       or              %1, %%ulo(%4), %1\n"
+       "       or              %0, %%lo(%4), %0\n"
+       "       .word           663b\n"
+       "       sllx            %1, 32, %1\n"
+       "       or              %0, %1, %0\n"
+       "       .previous\n"
        : "=r" (mask), "=r" (tmp)
        : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
               _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
               _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
          "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
               _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
+              _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V),
+         "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
+              _PAGE_CP_4V | _PAGE_E_4V |
               _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
 
        return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
@@ -342,9 +356,15 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
        "       andn            %0, %4, %0\n"
        "       or              %0, %5, %0\n"
        "       .previous\n"
+       "       .section        .sun_m7_2insn_patch, \"ax\"\n"
+       "       .word           661b\n"
+       "       andn            %0, %6, %0\n"
+       "       or              %0, %5, %0\n"
+       "       .previous\n"
        : "=r" (val)
        : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
-                    "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V));
+                    "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V),
+                    "i" (_PAGE_CP_4V));
 
        return __pgprot(val);
 }
index ed8f071..d1761df 100644 (file)
@@ -40,11 +40,12 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
 #ifdef CONFIG_SMP
 #define topology_physical_package_id(cpu)      (cpu_data(cpu).proc_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).core_id)
-#define topology_core_cpumask(cpu)             (&cpu_core_map[cpu])
+#define topology_core_cpumask(cpu)             (&cpu_core_sib_map[cpu])
 #define topology_thread_cpumask(cpu)           (&per_cpu(cpu_sibling_map, cpu))
 #endif /* CONFIG_SMP */
 
 extern cpumask_t cpu_core_map[NR_CPUS];
+extern cpumask_t cpu_core_sib_map[NR_CPUS];
 static inline const struct cpumask *cpu_coregroup_mask(int cpu)
 {
         return &cpu_core_map[cpu];
index 6fd4436..ec9c04d 100644 (file)
@@ -79,6 +79,8 @@ struct sun4v_2insn_patch_entry {
 };
 extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
        __sun4v_2insn_patch_end;
+extern struct sun4v_2insn_patch_entry __sun_m7_2insn_patch,
+       __sun_m7_2insn_patch_end;
 
 
 #endif /* !(__ASSEMBLY__) */
index 07cc49e..0f67942 100644 (file)
@@ -69,6 +69,8 @@ void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
                             struct sun4v_1insn_patch_entry *);
 void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
                             struct sun4v_2insn_patch_entry *);
+void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+                            struct sun4v_2insn_patch_entry *);
 extern unsigned int dcache_parity_tl1_occurred;
 extern unsigned int icache_parity_tl1_occurred;
 
index 94e392b..814fb17 100644 (file)
@@ -723,7 +723,6 @@ static int grpci2_of_probe(struct platform_device *ofdev)
                err = -ENOMEM;
                goto err1;
        }
-       memset(grpci2priv, 0, sizeof(*grpci2priv));
        priv->regs = regs;
        priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */
        priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT;
index 26c80e1..6f80936 100644 (file)
@@ -614,45 +614,68 @@ static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
        }
 }
 
-static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
+static void find_back_node_value(struct mdesc_handle *hp, u64 node,
+                                char *srch_val,
+                                void (*func)(struct mdesc_handle *, u64, int),
+                                u64 val, int depth)
 {
-       u64 a;
-
-       mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
-               u64 t = mdesc_arc_target(hp, a);
-               const char *name;
-               const u64 *id;
+       u64 arc;
 
-               name = mdesc_node_name(hp, t);
-               if (!strcmp(name, "cpu")) {
-                       id = mdesc_get_property(hp, t, "id", NULL);
-                       if (*id < NR_CPUS)
-                               cpu_data(*id).core_id = core_id;
-               } else {
-                       u64 j;
+       /* Since we have an estimate of recursion depth, do a sanity check. */
+       if (depth == 0)
+               return;
 
-                       mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) {
-                               u64 n = mdesc_arc_target(hp, j);
-                               const char *n_name;
+       mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) {
+               u64 n = mdesc_arc_target(hp, arc);
+               const char *name = mdesc_node_name(hp, n);
 
-                               n_name = mdesc_node_name(hp, n);
-                               if (strcmp(n_name, "cpu"))
-                                       continue;
+               if (!strcmp(srch_val, name))
+                       (*func)(hp, n, val);
 
-                               id = mdesc_get_property(hp, n, "id", NULL);
-                               if (*id < NR_CPUS)
-                                       cpu_data(*id).core_id = core_id;
-                       }
-               }
+               find_back_node_value(hp, n, srch_val, func, val, depth-1);
        }
 }
 
+static void __mark_core_id(struct mdesc_handle *hp, u64 node,
+                          int core_id)
+{
+       const u64 *id = mdesc_get_property(hp, node, "id", NULL);
+
+       if (*id < num_possible_cpus())
+               cpu_data(*id).core_id = core_id;
+}
+
+static void __mark_sock_id(struct mdesc_handle *hp, u64 node,
+                          int sock_id)
+{
+       const u64 *id = mdesc_get_property(hp, node, "id", NULL);
+
+       if (*id < num_possible_cpus())
+               cpu_data(*id).sock_id = sock_id;
+}
+
+static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
+                         int core_id)
+{
+       find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
+}
+
+static void mark_sock_ids(struct mdesc_handle *hp, u64 mp,
+                         int sock_id)
+{
+       find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10);
+}
+
 static void set_core_ids(struct mdesc_handle *hp)
 {
        int idx;
        u64 mp;
 
        idx = 1;
+
+       /* Identify unique cores by looking for cpus backpointed to by
+        * level 1 instruction caches.
+        */
        mdesc_for_each_node_by_name(hp, mp, "cache") {
                const u64 *level;
                const char *type;
@@ -667,11 +690,72 @@ static void set_core_ids(struct mdesc_handle *hp)
                        continue;
 
                mark_core_ids(hp, mp, idx);
+               idx++;
+       }
+}
+
+static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
+{
+       u64 mp;
+       int idx = 1;
+       int fnd = 0;
+
+       /* Identify unique sockets by looking for cpus backpointed to by
+        * shared level n caches.
+        */
+       mdesc_for_each_node_by_name(hp, mp, "cache") {
+               const u64 *cur_lvl;
+
+               cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
+               if (*cur_lvl != level)
+                       continue;
+
+               mark_sock_ids(hp, mp, idx);
+               idx++;
+               fnd = 1;
+       }
+       return fnd;
+}
+
+static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp)
+{
+       int idx = 1;
 
+       mdesc_for_each_node_by_name(hp, mp, "socket") {
+               u64 a;
+
+               mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
+                       u64 t = mdesc_arc_target(hp, a);
+                       const char *name;
+                       const u64 *id;
+
+                       name = mdesc_node_name(hp, t);
+                       if (strcmp(name, "cpu"))
+                               continue;
+
+                       id = mdesc_get_property(hp, t, "id", NULL);
+                       if (*id < num_possible_cpus())
+                               cpu_data(*id).sock_id = idx;
+               }
                idx++;
        }
 }
 
+static void set_sock_ids(struct mdesc_handle *hp)
+{
+       u64 mp;
+
+       /* If machine description exposes sockets data use it.
+        * Otherwise fallback to use shared L3 or L2 caches.
+        */
+       mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
+       if (mp != MDESC_NODE_NULL)
+               return set_sock_ids_by_socket(hp, mp);
+
+       if (!set_sock_ids_by_cache(hp, 3))
+               set_sock_ids_by_cache(hp, 2);
+}
+
 static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
 {
        u64 a;
@@ -707,7 +791,6 @@ static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
                        continue;
 
                mark_proc_ids(hp, mp, idx);
-
                idx++;
        }
 }
@@ -900,6 +983,7 @@ void mdesc_fill_in_cpu_data(cpumask_t *mask)
 
        set_core_ids(hp);
        set_proc_ids(hp);
+       set_sock_ids(hp);
 
        mdesc_release(hp);
 
index 6f7251f..c928bc6 100644 (file)
@@ -1002,6 +1002,38 @@ static int __init pcibios_init(void)
 subsys_initcall(pcibios_init);
 
 #ifdef CONFIG_SYSFS
+
+#define SLOT_NAME_SIZE  11  /* Max decimal digits + null in u32 */
+
+static void pcie_bus_slot_names(struct pci_bus *pbus)
+{
+       struct pci_dev *pdev;
+       struct pci_bus *bus;
+
+       list_for_each_entry(pdev, &pbus->devices, bus_list) {
+               char name[SLOT_NAME_SIZE];
+               struct pci_slot *pci_slot;
+               const u32 *slot_num;
+               int len;
+
+               slot_num = of_get_property(pdev->dev.of_node,
+                                          "physical-slot#", &len);
+
+               if (slot_num == NULL || len != 4)
+                       continue;
+
+               snprintf(name, sizeof(name), "%u", slot_num[0]);
+               pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL);
+
+               if (IS_ERR(pci_slot))
+                       pr_err("PCI: pci_create_slot returned %ld.\n",
+                              PTR_ERR(pci_slot));
+       }
+
+       list_for_each_entry(bus, &pbus->children, node)
+               pcie_bus_slot_names(bus);
+}
+
 static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
 {
        const struct pci_slot_names {
@@ -1053,18 +1085,29 @@ static int __init of_pci_slot_init(void)
 
        while ((pbus = pci_find_next_bus(pbus)) != NULL) {
                struct device_node *node;
+               struct pci_dev *pdev;
+
+               pdev = list_first_entry(&pbus->devices, struct pci_dev,
+                                       bus_list);
 
-               if (pbus->self) {
-                       /* PCI->PCI bridge */
-                       node = pbus->self->dev.of_node;
+               if (pdev && pci_is_pcie(pdev)) {
+                       pcie_bus_slot_names(pbus);
                } else {
-                       struct pci_pbm_info *pbm = pbus->sysdata;
 
-                       /* Host PCI controller */
-                       node = pbm->op->dev.of_node;
-               }
+                       if (pbus->self) {
+
+                               /* PCI->PCI bridge */
+                               node = pbus->self->dev.of_node;
+
+                       } else {
+                               struct pci_pbm_info *pbm = pbus->sysdata;
 
-               pci_bus_slot_names(node, pbus);
+                               /* Host PCI controller */
+                               node = pbm->op->dev.of_node;
+                       }
+
+                       pci_bus_slot_names(node, pbus);
+               }
        }
 
        return 0;
index c38d19f..f7b2617 100644 (file)
@@ -255,6 +255,24 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
        }
 }
 
+void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
+                            struct sun4v_2insn_patch_entry *end)
+{
+       while (start < end) {
+               unsigned long addr = start->addr;
+
+               *(unsigned int *) (addr +  0) = start->insns[0];
+               wmb();
+               __asm__ __volatile__("flush     %0" : : "r" (addr +  0));
+
+               *(unsigned int *) (addr +  4) = start->insns[1];
+               wmb();
+               __asm__ __volatile__("flush     %0" : : "r" (addr +  4));
+
+               start++;
+       }
+}
+
 static void __init sun4v_patch(void)
 {
        extern void sun4v_hvapi_init(void);
@@ -267,6 +285,9 @@ static void __init sun4v_patch(void)
 
        sun4v_patch_2insn_range(&__sun4v_2insn_patch,
                                &__sun4v_2insn_patch_end);
+       if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7)
+               sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
+                                        &__sun_m7_2insn_patch_end);
 
        sun4v_hvapi_init();
 }
index 61139d9..19cd08d 100644 (file)
@@ -60,8 +60,12 @@ DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
        { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 
+cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
+       [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_SYMBOL(cpu_core_map);
+EXPORT_SYMBOL(cpu_core_sib_map);
 
 static cpumask_t smp_commenced_mask;
 
@@ -1243,6 +1247,15 @@ void smp_fill_in_sib_core_maps(void)
                }
        }
 
+       for_each_present_cpu(i)  {
+               unsigned int j;
+
+               for_each_present_cpu(j)  {
+                       if (cpu_data(i).sock_id == cpu_data(j).sock_id)
+                               cpumask_set_cpu(j, &cpu_core_sib_map[i]);
+               }
+       }
+
        for_each_present_cpu(i) {
                unsigned int j;
 
index 0924305..f1a2f68 100644 (file)
@@ -138,6 +138,11 @@ SECTIONS
                *(.pause_3insn_patch)
                __pause_3insn_patch_end = .;
        }
+       .sun_m7_2insn_patch : {
+               __sun_m7_2insn_patch = .;
+               *(.sun_m7_2insn_patch)
+               __sun_m7_2insn_patch_end = .;
+       }
        PERCPU_SECTION(SMP_CACHE_BYTES)
 
        . = ALIGN(PAGE_SIZE);
index 4ca0d6b..559cb74 100644 (file)
@@ -54,6 +54,7 @@
 #include "init_64.h"
 
 unsigned long kern_linear_pte_xor[4] __read_mostly;
+static unsigned long page_cache4v_flag;
 
 /* A bitmap, two bits for every 256MB of physical memory.  These two
  * bits determine what page size we use for kernel linear
@@ -1909,11 +1910,24 @@ static void __init sun4u_linear_pte_xor_finalize(void)
 
 static void __init sun4v_linear_pte_xor_finalize(void)
 {
+       unsigned long pagecv_flag;
+
+       /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
+        * enables MCD error. Do not set bit 9 on M7 processor.
+        */
+       switch (sun4v_chip_type) {
+       case SUN4V_CHIP_SPARC_M7:
+               pagecv_flag = 0x00;
+               break;
+       default:
+               pagecv_flag = _PAGE_CV_4V;
+               break;
+       }
 #ifndef CONFIG_DEBUG_PAGEALLOC
        if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
                kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
                        PAGE_OFFSET;
-               kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+               kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
                                           _PAGE_P_4V | _PAGE_W_4V);
        } else {
                kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
@@ -1922,7 +1936,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
        if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
                kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
                        PAGE_OFFSET;
-               kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+               kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
                                           _PAGE_P_4V | _PAGE_W_4V);
        } else {
                kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
@@ -1931,7 +1945,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
        if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
                kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
                        PAGE_OFFSET;
-               kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+               kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
                                           _PAGE_P_4V | _PAGE_W_4V);
        } else {
                kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
@@ -1958,6 +1972,13 @@ static phys_addr_t __init available_memory(void)
        return available;
 }
 
+#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
+#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
+#define __DIRTY_BITS_4U         (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
+#define __DIRTY_BITS_4V         (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
+#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
+#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
+
 /* We need to exclude reserved regions. This exclusion will include
  * vmlinux and initrd. To be more precise the initrd size could be used to
  * compute a new lower limit because it is freed later during initialization.
@@ -2034,6 +2055,25 @@ void __init paging_init(void)
        memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
 #endif
 
+       /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
+        * bit on M7 processor. This is a conflicting usage of the same
+        * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
+        * Detection error on all pages and this will lead to problems
+        * later. Kernel does not run with MCD enabled and hence rest
+        * of the required steps to fully configure memory corruption
+        * detection are not taken. We need to ensure TTE.mcde is not
+        * set on M7 processor. Compute the value of cacheability
+        * flag for use later taking this into consideration.
+        */
+       switch (sun4v_chip_type) {
+       case SUN4V_CHIP_SPARC_M7:
+               page_cache4v_flag = _PAGE_CP_4V;
+               break;
+       default:
+               page_cache4v_flag = _PAGE_CACHE_4V;
+               break;
+       }
+
        if (tlb_type == hypervisor)
                sun4v_pgprot_init();
        else
@@ -2274,13 +2314,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 }
 #endif
 
-#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
-#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
-#define __DIRTY_BITS_4U         (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
-#define __DIRTY_BITS_4V         (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
-#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
-#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
-
 pgprot_t PAGE_KERNEL __read_mostly;
 EXPORT_SYMBOL(PAGE_KERNEL);
 
@@ -2312,8 +2345,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
                    _PAGE_P_4U | _PAGE_W_4U);
        if (tlb_type == hypervisor)
                pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
-                           _PAGE_CP_4V | _PAGE_CV_4V |
-                           _PAGE_P_4V | _PAGE_W_4V);
+                           page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
 
        pte_base |= _PAGE_PMD_HUGE;
 
@@ -2450,14 +2482,14 @@ static void __init sun4v_pgprot_init(void)
        int i;
 
        PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
-                               _PAGE_CACHE_4V | _PAGE_P_4V |
+                               page_cache4v_flag | _PAGE_P_4V |
                                __ACCESS_BITS_4V | __DIRTY_BITS_4V |
                                _PAGE_EXEC_4V);
        PAGE_KERNEL_LOCKED = PAGE_KERNEL;
 
        _PAGE_IE = _PAGE_IE_4V;
        _PAGE_E = _PAGE_E_4V;
-       _PAGE_CACHE = _PAGE_CACHE_4V;
+       _PAGE_CACHE = page_cache4v_flag;
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
        kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
@@ -2465,8 +2497,8 @@ static void __init sun4v_pgprot_init(void)
        kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
                PAGE_OFFSET;
 #endif
-       kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
-                                  _PAGE_P_4V | _PAGE_W_4V);
+       kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
+                                  _PAGE_W_4V);
 
        for (i = 1; i < 4; i++)
                kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
@@ -2479,12 +2511,12 @@ static void __init sun4v_pgprot_init(void)
                             _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
                             _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
 
-       page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
-       page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+       page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
+       page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
                       __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
-       page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+       page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
                       __ACCESS_BITS_4V | _PAGE_EXEC_4V);
-       page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+       page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
                         __ACCESS_BITS_4V | _PAGE_EXEC_4V);
 
        page_exec_bit = _PAGE_EXEC_4V;
@@ -2542,7 +2574,7 @@ static unsigned long kern_large_tte(unsigned long paddr)
               _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
        if (tlb_type == hypervisor)
                val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
-                      _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
+                      page_cache4v_flag | _PAGE_P_4V |
                       _PAGE_EXEC_4V | _PAGE_W_4V);
 
        return val | paddr;
index 6bbb991..e68408f 100644 (file)
@@ -100,7 +100,7 @@ config X86
        select IRQ_FORCED_THREADING
        select HAVE_BPF_JIT if X86_64
        select HAVE_ARCH_TRANSPARENT_HUGEPAGE
-       select HAVE_ARCH_HUGE_VMAP if X86_64 || (X86_32 && X86_PAE)
+       select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
        select ARCH_HAS_SG_CHAIN
        select CLKEVT_I8253
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -441,6 +441,7 @@ config X86_UV
        depends on X86_EXTENDED_PLATFORM
        depends on NUMA
        depends on X86_X2APIC
+       depends on PCI
        ---help---
          This option is needed in order to support SGI Ultraviolet systems.
          If you don't have one of these, you should say N here.
@@ -850,11 +851,12 @@ config NR_CPUS
        default "1" if !SMP
        default "8192" if MAXSMP
        default "32" if SMP && X86_BIGSMP
-       default "8" if SMP
+       default "8" if SMP && X86_32
+       default "64" if SMP
        ---help---
          This allows you to specify the maximum number of CPUs which this
          kernel will support.  If CPUMASK_OFFSTACK is enabled, the maximum
-         supported value is 4096, otherwise the maximum value is 512.  The
+         supported value is 8192, otherwise the maximum value is 512.  The
          minimum value which makes sense is 2.
 
          This is purely to save memory - each supported CPU adds
index 72484a6..a5973f8 100644 (file)
@@ -332,4 +332,15 @@ config X86_DEBUG_STATIC_CPU_HAS
 
          If unsure, say N.
 
+config PUNIT_ATOM_DEBUG
+       tristate "ATOM Punit debug driver"
+       select DEBUG_FS
+       select IOSF_MBI
+       ---help---
+         This is a debug driver, which gets the power states
+         of all Punit North Complex devices. The power states of
+         each device is exposed as part of the debugfs interface.
+         The current power state can be read from
+         /sys/kernel/debug/punit_atom/dev_power_state
+
 endmenu
index 34a5b93..a2b9740 100644 (file)
@@ -177,6 +177,7 @@ static inline unsigned int isa_virt_to_bus(volatile void *address)
  * look at pci_iomap().
  */
 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
                                unsigned long prot_val);
@@ -338,6 +339,9 @@ extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
 #define IO_SPACE_LIMIT 0xffff
 
 #ifdef CONFIG_MTRR
+extern int __must_check arch_phys_wc_index(int handle);
+#define arch_phys_wc_index arch_phys_wc_index
+
 extern int __must_check arch_phys_wc_add(unsigned long base,
                                         unsigned long size);
 extern void arch_phys_wc_del(int handle);
index dea2e7e..f4a555b 100644 (file)
@@ -207,6 +207,7 @@ union kvm_mmu_page_role {
                unsigned nxe:1;
                unsigned cr0_wp:1;
                unsigned smep_andnot_wp:1;
+               unsigned smap_andnot_wp:1;
        };
 };
 
@@ -400,6 +401,7 @@ struct kvm_vcpu_arch {
        struct kvm_mmu_memory_cache mmu_page_header_cache;
 
        struct fpu guest_fpu;
+       bool eager_fpu;
        u64 xcr0;
        u64 guest_supported_xcr0;
        u32 guest_xstate_size;
@@ -743,6 +745,7 @@ struct kvm_x86_ops {
        void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
        unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
        void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
+       void (*fpu_activate)(struct kvm_vcpu *vcpu);
        void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
 
        void (*tlb_flush)(struct kvm_vcpu *vcpu);
index f768f62..b94f6f6 100644 (file)
@@ -31,7 +31,7 @@
  * arch_phys_wc_add and arch_phys_wc_del.
  */
 # ifdef CONFIG_MTRR
-extern u8 mtrr_type_lookup(u64 addr, u64 end);
+extern u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform);
 extern void mtrr_save_fixed_ranges(void *);
 extern void mtrr_save_state(void);
 extern int mtrr_add(unsigned long base, unsigned long size,
@@ -48,14 +48,13 @@ extern void mtrr_aps_init(void);
 extern void mtrr_bp_restore(void);
 extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
 extern int amd_special_default_mtrr(void);
-extern int phys_wc_to_mtrr_index(int handle);
 #  else
-static inline u8 mtrr_type_lookup(u64 addr, u64 end)
+static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform)
 {
        /*
         * Return no-MTRRs:
         */
-       return 0xff;
+       return MTRR_TYPE_INVALID;
 }
 #define mtrr_save_fixed_ranges(arg) do {} while (0)
 #define mtrr_save_state() do {} while (0)
@@ -84,10 +83,6 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
 static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
 {
 }
-static inline int phys_wc_to_mtrr_index(int handle)
-{
-       return -1;
-}
 
 #define mtrr_ap_init() do {} while (0)
 #define mtrr_bp_init() do {} while (0)
@@ -127,4 +122,8 @@ struct mtrr_gentry32 {
                                 _IOW(MTRR_IOCTL_BASE,  9, struct mtrr_sentry32)
 #endif /* CONFIG_COMPAT */
 
+/* Bit fields for enabled in struct mtrr_state_type */
+#define MTRR_STATE_MTRR_FIXED_ENABLED  0x01
+#define MTRR_STATE_MTRR_ENABLED                0x02
+
 #endif /* _ASM_X86_MTRR_H */
index 91bc4ba..cdcff7f 100644 (file)
@@ -4,12 +4,7 @@
 #include <linux/types.h>
 #include <asm/pgtable_types.h>
 
-#ifdef CONFIG_X86_PAT
-extern int pat_enabled;
-#else
-static const int pat_enabled;
-#endif
-
+bool pat_enabled(void);
 extern void pat_init(void);
 void pat_init_cache_modes(void);
 
index aeb4666..2270e41 100644 (file)
@@ -215,6 +215,44 @@ static inline void clwb(volatile void *__p)
                : [pax] "a" (p));
 }
 
+/**
+ * pcommit_sfence() - persistent commit and fence
+ *
+ * The PCOMMIT instruction ensures that data that has been flushed from the
+ * processor's cache hierarchy with CLWB, CLFLUSHOPT or CLFLUSH is accepted to
+ * memory and is durable on the DIMM.  The primary use case for this is
+ * persistent memory.
+ *
+ * This function shows how to properly use CLWB/CLFLUSHOPT/CLFLUSH and PCOMMIT
+ * with appropriate fencing.
+ *
+ * Example:
+ * void flush_and_commit_buffer(void *vaddr, unsigned int size)
+ * {
+ *         unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1;
+ *         void *vend = vaddr + size;
+ *         void *p;
+ *
+ *         for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
+ *              p < vend; p += boot_cpu_data.x86_clflush_size)
+ *                 clwb(p);
+ *
+ *         // SFENCE to order CLWB/CLFLUSHOPT/CLFLUSH cache flushes
+ *         // MFENCE via mb() also works
+ *         wmb();
+ *
+ *         // PCOMMIT and the required SFENCE for ordering
+ *         pcommit_sfence();
+ * }
+ *
+ * After this function completes the data pointed to by 'vaddr' has been
+ * accepted to memory and will be durable if the 'vaddr' points to persistent
+ * memory.
+ *
+ * PCOMMIT must always be ordered by an MFENCE or SFENCE, so to help simplify
+ * things we include both the PCOMMIT and the required SFENCE in the
+ * alternatives generated by pcommit_sfence().
+ */
 static inline void pcommit_sfence(void)
 {
        alternative(ASM_NOP7,
index c469490..3c6bb34 100644 (file)
 #define MSR_CORE_C3_RESIDENCY          0x000003fc
 #define MSR_CORE_C6_RESIDENCY          0x000003fd
 #define MSR_CORE_C7_RESIDENCY          0x000003fe
+#define MSR_KNL_CORE_C6_RESIDENCY      0x000003ff
 #define MSR_PKG_C2_RESIDENCY           0x0000060d
 #define MSR_PKG_C8_RESIDENCY           0x00000630
 #define MSR_PKG_C9_RESIDENCY           0x00000631
index d0acb65..7528dcf 100644 (file)
@@ -103,7 +103,7 @@ struct mtrr_state_type {
 #define MTRRIOC_GET_PAGE_ENTRY   _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
 #define MTRRIOC_KILL_PAGE_ENTRY  _IOW(MTRR_IOCTL_BASE,  9, struct mtrr_sentry)
 
-/*  These are the region types  */
+/* MTRR memory types, which are defined in SDM */
 #define MTRR_TYPE_UNCACHABLE 0
 #define MTRR_TYPE_WRCOMB     1
 /*#define MTRR_TYPE_         2*/
@@ -113,5 +113,11 @@ struct mtrr_state_type {
 #define MTRR_TYPE_WRBACK     6
 #define MTRR_NUM_TYPES       7
 
+/*
+ * Invalid MTRR memory type.  mtrr_type_lookup() returns this value when
+ * MTRRs are disabled.  Note, this value is allocated from the reserved
+ * values (0x7-0xff) of the MTRR memory types.
+ */
+#define MTRR_TYPE_INVALID    0xff
 
 #endif /* _UAPI_ASM_X86_MTRR_H */
index e535533..20190bd 100644 (file)
@@ -708,6 +708,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                          struct pt_regs *regs)
 {
        int i, ret = 0;
+       char *tmp;
 
        for (i = 0; i < mca_cfg.banks; i++) {
                m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
@@ -716,9 +717,11 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                        if (quirk_no_way_out)
                                quirk_no_way_out(i, m, regs);
                }
-               if (mce_severity(m, mca_cfg.tolerant, msg, true) >=
-                   MCE_PANIC_SEVERITY)
+
+               if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+                       *msg = tmp;
                        ret = 1;
+               }
        }
        return ret;
 }
index 5f90b85..70d7c93 100644 (file)
@@ -98,7 +98,8 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
                        continue;
                base = range_state[i].base_pfn;
                if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed &&
-                   (mtrr_state.enabled & 1)) {
+                   (mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
+                   (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
                        /* Var MTRR contains UC entry below 1M? Skip it: */
                        printk(BIOS_BUG_MSG, i);
                        if (base + size <= (1<<(20-PAGE_SHIFT)))
index 7d74f7b..3b533cf 100644 (file)
@@ -102,59 +102,76 @@ static int check_type_overlap(u8 *prev, u8 *curr)
        return 0;
 }
 
-/*
- * Error/Semi-error returns:
- * 0xFF - when MTRR is not enabled
- * *repeat == 1 implies [start:end] spanned across MTRR range and type returned
- *             corresponds only to [start:*partial_end].
- *             Caller has to lookup again for [*partial_end:end].
+/**
+ * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries
+ *
+ * Return the MTRR fixed memory type of 'start'.
+ *
+ * MTRR fixed entries are divided into the following ways:
+ *  0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges
+ *  0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges
+ *  0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges
+ *
+ * Return Values:
+ * MTRR_TYPE_(type)  - Matched memory type
+ * MTRR_TYPE_INVALID - Unmatched
+ */
+static u8 mtrr_type_lookup_fixed(u64 start, u64 end)
+{
+       int idx;
+
+       if (start >= 0x100000)
+               return MTRR_TYPE_INVALID;
+
+       /* 0x0 - 0x7FFFF */
+       if (start < 0x80000) {
+               idx = 0;
+               idx += (start >> 16);
+               return mtrr_state.fixed_ranges[idx];
+       /* 0x80000 - 0xBFFFF */
+       } else if (start < 0xC0000) {
+               idx = 1 * 8;
+               idx += ((start - 0x80000) >> 14);
+               return mtrr_state.fixed_ranges[idx];
+       }
+
+       /* 0xC0000 - 0xFFFFF */
+       idx = 3 * 8;
+       idx += ((start - 0xC0000) >> 12);
+       return mtrr_state.fixed_ranges[idx];
+}
+
+/**
+ * mtrr_type_lookup_variable - look up memory type in MTRR variable entries
+ *
+ * Return Value:
+ * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched)
+ *
+ * Output Arguments:
+ * repeat - Set to 1 when [start:end] spanned across MTRR range and type
+ *         returned corresponds only to [start:*partial_end].  Caller has
+ *         to lookup again for [*partial_end:end].
+ *
+ * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
+ *          region is fully covered by a single MTRR entry or the default
+ *          type.
  */
-static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
+static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
+                                   int *repeat, u8 *uniform)
 {
        int i;
        u64 base, mask;
        u8 prev_match, curr_match;
 
        *repeat = 0;
-       if (!mtrr_state_set)
-               return 0xFF;
-
-       if (!mtrr_state.enabled)
-               return 0xFF;
+       *uniform = 1;
 
-       /* Make end inclusive end, instead of exclusive */
+       /* Make end inclusive instead of exclusive */
        end--;
 
-       /* Look in fixed ranges. Just return the type as per start */
-       if (mtrr_state.have_fixed && (start < 0x100000)) {
-               int idx;
-
-               if (start < 0x80000) {
-                       idx = 0;
-                       idx += (start >> 16);
-                       return mtrr_state.fixed_ranges[idx];
-               } else if (start < 0xC0000) {
-                       idx = 1 * 8;
-                       idx += ((start - 0x80000) >> 14);
-                       return mtrr_state.fixed_ranges[idx];
-               } else if (start < 0x1000000) {
-                       idx = 3 * 8;
-                       idx += ((start - 0xC0000) >> 12);
-                       return mtrr_state.fixed_ranges[idx];
-               }
-       }
-
-       /*
-        * Look in variable ranges
-        * Look of multiple ranges matching this address and pick type
-        * as per MTRR precedence
-        */
-       if (!(mtrr_state.enabled & 2))
-               return mtrr_state.def_type;
-
-       prev_match = 0xFF;
+       prev_match = MTRR_TYPE_INVALID;
        for (i = 0; i < num_var_ranges; ++i) {
-               unsigned short start_state, end_state;
+               unsigned short start_state, end_state, inclusive;
 
                if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
                        continue;
@@ -166,20 +183,29 @@ static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
 
                start_state = ((start & mask) == (base & mask));
                end_state = ((end & mask) == (base & mask));
+               inclusive = ((start < base) && (end > base));
 
-               if (start_state != end_state) {
+               if ((start_state != end_state) || inclusive) {
                        /*
                         * We have start:end spanning across an MTRR.
-                        * We split the region into
-                        * either
-                        * (start:mtrr_end) (mtrr_end:end)
-                        * or
-                        * (start:mtrr_start) (mtrr_start:end)
+                        * We split the region into either
+                        *
+                        * - start_state:1
+                        * (start:mtrr_end)(mtrr_end:end)
+                        * - end_state:1
+                        * (start:mtrr_start)(mtrr_start:end)
+                        * - inclusive:1
+                        * (start:mtrr_start)(mtrr_start:mtrr_end)(mtrr_end:end)
+                        *
                         * depending on kind of overlap.
-                        * Return the type for first region and a pointer to
-                        * the start of second region so that caller will
-                        * lookup again on the second region.
-                        * Note: This way we handle multiple overlaps as well.
+                        *
+                        * Return the type of the first region and a pointer
+                        * to the start of next region so that caller will be
+                        * advised to lookup again after having adjusted start
+                        * and end.
+                        *
+                        * Note: This way we handle overlaps with multiple
+                        * entries and the default type properly.
                         */
                        if (start_state)
                                *partial_end = base + get_mtrr_size(mask);
@@ -193,59 +219,94 @@ static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
 
                        end = *partial_end - 1; /* end is inclusive */
                        *repeat = 1;
+                       *uniform = 0;
                }
 
                if ((start & mask) != (base & mask))
                        continue;
 
                curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
-               if (prev_match == 0xFF) {
+               if (prev_match == MTRR_TYPE_INVALID) {
                        prev_match = curr_match;
                        continue;
                }
 
+               *uniform = 0;
                if (check_type_overlap(&prev_match, &curr_match))
                        return curr_match;
        }
 
-       if (mtrr_tom2) {
-               if (start >= (1ULL<<32) && (end < mtrr_tom2))
-                       return MTRR_TYPE_WRBACK;
-       }
-
-       if (prev_match != 0xFF)
+       if (prev_match != MTRR_TYPE_INVALID)
                return prev_match;
 
        return mtrr_state.def_type;
 }
 
-/*
- * Returns the effective MTRR type for the region
- * Error return:
- * 0xFF - when MTRR is not enabled
+/**
+ * mtrr_type_lookup - look up memory type in MTRR
+ *
+ * Return Values:
+ * MTRR_TYPE_(type)  - The effective MTRR type for the region
+ * MTRR_TYPE_INVALID - MTRR is disabled
+ *
+ * Output Argument:
+ * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
+ *          region is fully covered by a single MTRR entry or the default
+ *          type.
  */
-u8 mtrr_type_lookup(u64 start, u64 end)
+u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
 {
-       u8 type, prev_type;
+       u8 type, prev_type, is_uniform = 1, dummy;
        int repeat;
        u64 partial_end;
 
-       type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
+       if (!mtrr_state_set)
+               return MTRR_TYPE_INVALID;
+
+       if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED))
+               return MTRR_TYPE_INVALID;
+
+       /*
+        * Look up the fixed ranges first, which take priority over
+        * the variable ranges.
+        */
+       if ((start < 0x100000) &&
+           (mtrr_state.have_fixed) &&
+           (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
+               is_uniform = 0;
+               type = mtrr_type_lookup_fixed(start, end);
+               goto out;
+       }
+
+       /*
+        * Look up the variable ranges.  Look of multiple ranges matching
+        * this address and pick type as per MTRR precedence.
+        */
+       type = mtrr_type_lookup_variable(start, end, &partial_end,
+                                        &repeat, &is_uniform);
 
        /*
         * Common path is with repeat = 0.
         * However, we can have cases where [start:end] spans across some
-        * MTRR range. Do repeated lookups for that case here.
+        * MTRR ranges and/or the default type.  Do repeated lookups for
+        * that case here.
         */
        while (repeat) {
                prev_type = type;
                start = partial_end;
-               type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
+               is_uniform = 0;
+               type = mtrr_type_lookup_variable(start, end, &partial_end,
+                                                &repeat, &dummy);
 
                if (check_type_overlap(&prev_type, &type))
-                       return type;
+                       goto out;
        }
 
+       if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2))
+               type = MTRR_TYPE_WRBACK;
+
+out:
+       *uniform = is_uniform;
        return type;
 }
 
@@ -347,7 +408,9 @@ static void __init print_mtrr_state(void)
                 mtrr_attrib_to_str(mtrr_state.def_type));
        if (mtrr_state.have_fixed) {
                pr_debug("MTRR fixed ranges %sabled:\n",
-                        mtrr_state.enabled & 1 ? "en" : "dis");
+                       ((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
+                        (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
+                        "en" : "dis");
                print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
                for (i = 0; i < 2; ++i)
                        print_fixed(0x80000 + i * 0x20000, 0x04000,
@@ -360,7 +423,7 @@ static void __init print_mtrr_state(void)
                print_fixed_last();
        }
        pr_debug("MTRR variable ranges %sabled:\n",
-                mtrr_state.enabled & 2 ? "en" : "dis");
+                mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
        high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
 
        for (i = 0; i < num_var_ranges; ++i) {
@@ -382,7 +445,7 @@ static void __init print_mtrr_state(void)
 }
 
 /* Grab all of the MTRR state for this CPU into *state */
-void __init get_mtrr_state(void)
+bool __init get_mtrr_state(void)
 {
        struct mtrr_var_range *vrs;
        unsigned long flags;
@@ -426,6 +489,8 @@ void __init get_mtrr_state(void)
 
        post_set();
        local_irq_restore(flags);
+
+       return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
 }
 
 /* Some BIOS's are messed up and don't set all MTRRs the same! */
index ea5f363..e7ed0d8 100644 (file)
 #define MTRR_TO_PHYS_WC_OFFSET 1000
 
 u32 num_var_ranges;
+static bool __mtrr_enabled;
+
+static bool mtrr_enabled(void)
+{
+       return __mtrr_enabled;
+}
 
 unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
 static DEFINE_MUTEX(mtrr_mutex);
@@ -286,7 +292,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
        int i, replace, error;
        mtrr_type ltype;
 
-       if (!mtrr_if)
+       if (!mtrr_enabled())
                return -ENXIO;
 
        error = mtrr_if->validate_add_page(base, size, type);
@@ -435,6 +441,8 @@ static int mtrr_check(unsigned long base, unsigned long size)
 int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
             bool increment)
 {
+       if (!mtrr_enabled())
+               return -ENODEV;
        if (mtrr_check(base, size))
                return -EINVAL;
        return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
@@ -463,8 +471,8 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
        unsigned long lbase, lsize;
        int error = -EINVAL;
 
-       if (!mtrr_if)
-               return -ENXIO;
+       if (!mtrr_enabled())
+               return -ENODEV;
 
        max = num_var_ranges;
        /* No CPU hotplug when we change MTRR entries */
@@ -523,6 +531,8 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
  */
 int mtrr_del(int reg, unsigned long base, unsigned long size)
 {
+       if (!mtrr_enabled())
+               return -ENODEV;
        if (mtrr_check(base, size))
                return -EINVAL;
        return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
@@ -538,6 +548,9 @@ EXPORT_SYMBOL(mtrr_del);
  * attempts to add a WC MTRR covering size bytes starting at base and
  * logs an error if this fails.
  *
+ * The called should provide a power of two size on an equivalent
+ * power of two boundary.
+ *
  * Drivers must store the return value to pass to mtrr_del_wc_if_needed,
  * but drivers should not try to interpret that return value.
  */
@@ -545,7 +558,7 @@ int arch_phys_wc_add(unsigned long base, unsigned long size)
 {
        int ret;
 
-       if (pat_enabled)
+       if (pat_enabled() || !mtrr_enabled())
                return 0;  /* Success!  (We don't need to do anything.) */
 
        ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
@@ -577,7 +590,7 @@ void arch_phys_wc_del(int handle)
 EXPORT_SYMBOL(arch_phys_wc_del);
 
 /*
- * phys_wc_to_mtrr_index - translates arch_phys_wc_add's return value
+ * arch_phys_wc_index - translates arch_phys_wc_add's return value
  * @handle: Return value from arch_phys_wc_add
  *
  * This will turn the return value from arch_phys_wc_add into an mtrr
@@ -587,14 +600,14 @@ EXPORT_SYMBOL(arch_phys_wc_del);
  * in printk line.  Alas there is an illegitimate use in some ancient
  * drm ioctls.
  */
-int phys_wc_to_mtrr_index(int handle)
+int arch_phys_wc_index(int handle)
 {
        if (handle < MTRR_TO_PHYS_WC_OFFSET)
                return -1;
        else
                return handle - MTRR_TO_PHYS_WC_OFFSET;
 }
-EXPORT_SYMBOL_GPL(phys_wc_to_mtrr_index);
+EXPORT_SYMBOL_GPL(arch_phys_wc_index);
 
 /*
  * HACK ALERT!
@@ -734,10 +747,12 @@ void __init mtrr_bp_init(void)
        }
 
        if (mtrr_if) {
+               __mtrr_enabled = true;
                set_num_var_ranges();
                init_table();
                if (use_intel()) {
-                       get_mtrr_state();
+                       /* BIOS may override */
+                       __mtrr_enabled = get_mtrr_state();
 
                        if (mtrr_cleanup(phys_addr)) {
                                changed_by_mtrr_cleanup = 1;
@@ -745,10 +760,16 @@ void __init mtrr_bp_init(void)
                        }
                }
        }
+
+       if (!mtrr_enabled())
+               pr_info("MTRR: Disabled\n");
 }
 
 void mtrr_ap_init(void)
 {
+       if (!mtrr_enabled())
+               return;
+
        if (!use_intel() || mtrr_aps_delayed_init)
                return;
        /*
@@ -774,6 +795,9 @@ void mtrr_save_state(void)
 {
        int first_cpu;
 
+       if (!mtrr_enabled())
+               return;
+
        get_online_cpus();
        first_cpu = cpumask_first(cpu_online_mask);
        smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
@@ -782,6 +806,8 @@ void mtrr_save_state(void)
 
 void set_mtrr_aps_delayed_init(void)
 {
+       if (!mtrr_enabled())
+               return;
        if (!use_intel())
                return;
 
@@ -793,7 +819,7 @@ void set_mtrr_aps_delayed_init(void)
  */
 void mtrr_aps_init(void)
 {
-       if (!use_intel())
+       if (!use_intel() || !mtrr_enabled())
                return;
 
        /*
@@ -810,7 +836,7 @@ void mtrr_aps_init(void)
 
 void mtrr_bp_restore(void)
 {
-       if (!use_intel())
+       if (!use_intel() || !mtrr_enabled())
                return;
 
        mtrr_if->set_all();
@@ -818,7 +844,7 @@ void mtrr_bp_restore(void)
 
 static int __init mtrr_init_finialize(void)
 {
-       if (!mtrr_if)
+       if (!mtrr_enabled())
                return 0;
 
        if (use_intel()) {
index df5e41f..951884d 100644 (file)
@@ -51,7 +51,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
 
 void fill_mtrr_var_range(unsigned int index,
                u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
-void get_mtrr_state(void);
+bool get_mtrr_state(void);
 
 extern void set_mtrr_ops(const struct mtrr_ops *ops);
 
index 0091832..6185d31 100644 (file)
@@ -173,6 +173,21 @@ static void init_thread_xstate(void)
                xstate_size = sizeof(struct i387_fxsave_struct);
        else
                xstate_size = sizeof(struct i387_fsave_struct);
+
+       /*
+        * Quirk: we don't yet handle the XSAVES* instructions
+        * correctly, as we don't correctly convert between
+        * standard and compacted format when interfacing
+        * with user-space - so disable it for now.
+        *
+        * The difference is small: with recent CPUs the
+        * compacted format is only marginally smaller than
+        * the standard FPU state format.
+        *
+        * ( This is easy to backport while we are fixing
+        *   XSAVES* support. )
+        */
+       setup_clear_cpu_cap(X86_FEATURE_XSAVES);
 }
 
 /*
index 59b69f6..1d08ad3 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/module.h>
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
+#include <asm/i387.h> /* For use_eager_fpu.  Ugh! */
+#include <asm/fpu-internal.h> /* For use_eager_fpu.  Ugh! */
 #include <asm/user.h>
 #include <asm/xsave.h>
 #include "cpuid.h"
@@ -95,6 +97,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
        if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
                best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
 
+       vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu);
+
        /*
         * The existing code assumes virtual address is 48-bit in the canonical
         * address checks; exit if it is ever changed.
index c3b1ad9..496b369 100644 (file)
@@ -117,4 +117,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
        best = kvm_find_cpuid_entry(vcpu, 7, 0);
        return best && (best->ebx & bit(X86_FEATURE_RTM));
 }
+
+static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 7, 0);
+       return best && (best->ebx & bit(X86_FEATURE_MPX));
+}
 #endif
index d43867c..44a7d25 100644 (file)
@@ -3736,8 +3736,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
        }
 }
 
-void update_permission_bitmask(struct kvm_vcpu *vcpu,
-               struct kvm_mmu *mmu, bool ept)
+static void update_permission_bitmask(struct kvm_vcpu *vcpu,
+                                     struct kvm_mmu *mmu, bool ept)
 {
        unsigned bit, byte, pfec;
        u8 map;
@@ -3918,6 +3918,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
 {
        bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
+       bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
        struct kvm_mmu *context = &vcpu->arch.mmu;
 
        MMU_WARN_ON(VALID_PAGE(context->root_hpa));
@@ -3936,6 +3937,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
        context->base_role.cr0_wp  = is_write_protection(vcpu);
        context->base_role.smep_andnot_wp
                = smep && !is_write_protection(vcpu);
+       context->base_role.smap_andnot_wp
+               = smap && !is_write_protection(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
 
@@ -4207,12 +4210,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                       const u8 *new, int bytes)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
-       union kvm_mmu_page_role mask = { .word = 0 };
        struct kvm_mmu_page *sp;
        LIST_HEAD(invalid_list);
        u64 entry, gentry, *spte;
        int npte;
        bool remote_flush, local_flush, zap_page;
+       union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
+               .cr0_wp = 1,
+               .cr4_pae = 1,
+               .nxe = 1,
+               .smep_andnot_wp = 1,
+               .smap_andnot_wp = 1,
+       };
 
        /*
         * If we don't have indirect shadow pages, it means no page is
@@ -4238,7 +4247,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        ++vcpu->kvm->stat.mmu_pte_write;
        kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 
-       mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
        for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
                if (detect_write_misaligned(sp, gpa, bytes) ||
                      detect_write_flooding(sp)) {
index c7d6563..0ada65e 100644 (file)
@@ -71,8 +71,6 @@ enum {
 int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
-void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-               bool ept);
 
 static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
 {
@@ -166,6 +164,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
        int index = (pfec >> 1) +
                    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
 
+       WARN_ON(pfec & PFERR_RSVD_MASK);
+
        return (mmu->permissions[index] >> pte_access) & 1;
 }
 
index fd49c86..6e6d115 100644 (file)
@@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
                                              mmu_is_nested(vcpu));
                if (likely(r != RET_MMIO_PF_INVALID))
                        return r;
+
+               /*
+                * page fault with PFEC.RSVD  = 1 is caused by shadow
+                * page fault, should not be used to walk guest page
+                * table.
+                */
+               error_code &= ~PFERR_RSVD_MASK;
        };
 
        r = mmu_topup_memory_caches(vcpu);
index ce741b8..9afa233 100644 (file)
@@ -4381,6 +4381,7 @@ static struct kvm_x86_ops svm_x86_ops = {
        .cache_reg = svm_cache_reg,
        .get_rflags = svm_get_rflags,
        .set_rflags = svm_set_rflags,
+       .fpu_activate = svm_fpu_activate,
        .fpu_deactivate = svm_fpu_deactivate,
 
        .tlb_flush = svm_flush_tlb,
index f7b6168..2d73807 100644 (file)
@@ -10185,6 +10185,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .cache_reg = vmx_cache_reg,
        .get_rflags = vmx_get_rflags,
        .set_rflags = vmx_set_rflags,
+       .fpu_activate = vmx_fpu_activate,
        .fpu_deactivate = vmx_fpu_deactivate,
 
        .tlb_flush = vmx_flush_tlb,
index c73efcd..ea306ad 100644 (file)
@@ -702,8 +702,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
        unsigned long old_cr4 = kvm_read_cr4(vcpu);
-       unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
-                                  X86_CR4_PAE | X86_CR4_SMEP;
+       unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
+                                  X86_CR4_SMEP | X86_CR4_SMAP;
+
        if (cr4 & CR4_RESERVED_BITS)
                return 1;
 
@@ -744,9 +745,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
            (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
                kvm_mmu_reset_context(vcpu);
 
-       if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
-               update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
-
        if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
                kvm_update_cpuid(vcpu);
 
@@ -6197,6 +6195,8 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
                return;
 
        page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+       if (is_error_page(page))
+               return;
        kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
 
        /*
@@ -7060,7 +7060,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
        fpu_save_init(&vcpu->arch.guest_fpu);
        __kernel_fpu_end();
        ++vcpu->stat.fpu_reload;
-       kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
+       if (!vcpu->arch.eager_fpu)
+               kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
+
        trace_kvm_fpu(0);
 }
 
@@ -7076,11 +7078,21 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
                                                unsigned int id)
 {
+       struct kvm_vcpu *vcpu;
+
        if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
                printk_once(KERN_WARNING
                "kvm: SMP vm created on host with unstable TSC; "
                "guest TSC will not be reliable\n");
-       return kvm_x86_ops->vcpu_create(kvm, id);
+
+       vcpu = kvm_x86_ops->vcpu_create(kvm, id);
+
+       /*
+        * Activate fpu unconditionally in case the guest needs eager FPU.  It will be
+        * deactivated soon if it doesn't.
+        */
+       kvm_x86_ops->fpu_activate(vcpu);
+       return vcpu;
 }
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
index 9ca35fc..3a2ec87 100644 (file)
@@ -82,7 +82,7 @@ iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
         * MTRR is UC or WC.  UC_MINUS gets the real intention, of the
         * user, which is "WC if the MTRR is WC, UC if you can't do that."
         */
-       if (!pat_enabled && pgprot_val(prot) ==
+       if (!pat_enabled() && pgprot_val(prot) ==
            (__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC)))
                prot = __pgprot(__PAGE_KERNEL |
                                cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
index 70e7444..b0da358 100644 (file)
@@ -234,10 +234,11 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 {
        /*
         * Ideally, this should be:
-        *      pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
+        *      pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
         *
         * Till we fix all X drivers to use ioremap_wc(), we will use
-        * UC MINUS.
+        * UC MINUS. Drivers that are certain they need or can already
+        * be converted over to strong UC can use ioremap_uc().
         */
        enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
 
@@ -247,6 +248,39 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 EXPORT_SYMBOL(ioremap_nocache);
 
 /**
+ * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
+ * @phys_addr:    bus address of the memory
+ * @size:      size of the resource to map
+ *
+ * ioremap_uc performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked with a strong
+ * preference as completely uncachable on the CPU when possible. For non-PAT
+ * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
+ * systems this will set the PAT entry for the pages as strong UC.  This call
+ * will honor existing caching rules from things like the PCI bus. Note that
+ * there are other caches and buffers on many busses. In particular driver
+ * authors should read up on PCI writes.
+ *
+ * It's useful if some control registers are in such an area and
+ * write combining or read caching is not desirable:
+ *
+ * Must be freed with iounmap.
+ */
+void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
+{
+       enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
+
+       return __ioremap_caller(phys_addr, size, pcm,
+                               __builtin_return_address(0));
+}
+EXPORT_SYMBOL_GPL(ioremap_uc);
+
+/**
  * ioremap_wc  -       map memory into CPU space write combined
  * @phys_addr: bus address of the memory
  * @size:      size of the resource to map
@@ -258,7 +292,7 @@ EXPORT_SYMBOL(ioremap_nocache);
  */
 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
 {
-       if (pat_enabled)
+       if (pat_enabled())
                return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
                                        __builtin_return_address(0));
        else
@@ -331,7 +365,7 @@ void iounmap(volatile void __iomem *addr)
 }
 EXPORT_SYMBOL(iounmap);
 
-int arch_ioremap_pud_supported(void)
+int __init arch_ioremap_pud_supported(void)
 {
 #ifdef CONFIG_X86_64
        return cpu_has_gbpages;
@@ -340,7 +374,7 @@ int arch_ioremap_pud_supported(void)
 #endif
 }
 
-int arch_ioremap_pmd_supported(void)
+int __init arch_ioremap_pmd_supported(void)
 {
        return cpu_has_pse;
 }
index 89af288..70d221f 100644 (file)
@@ -129,16 +129,15 @@ within(unsigned long addr, unsigned long start, unsigned long end)
  */
 void clflush_cache_range(void *vaddr, unsigned int size)
 {
-       void *vend = vaddr + size - 1;
+       unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1;
+       void *vend = vaddr + size;
+       void *p;
 
        mb();
 
-       for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
-               clflushopt(vaddr);
-       /*
-        * Flush any possible final partial cacheline:
-        */
-       clflushopt(vend);
+       for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
+            p < vend; p += boot_cpu_data.x86_clflush_size)
+               clflushopt(p);
 
        mb();
 }
@@ -418,13 +417,11 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
        phys_addr_t phys_addr;
        unsigned long offset;
        enum pg_level level;
-       unsigned long psize;
        unsigned long pmask;
        pte_t *pte;
 
        pte = lookup_address(virt_addr, &level);
        BUG_ON(!pte);
-       psize = page_level_size(level);
        pmask = page_level_mask(level);
        offset = virt_addr & ~pmask;
        phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
@@ -1468,6 +1465,9 @@ int _set_memory_uc(unsigned long addr, int numpages)
 {
        /*
         * for now UC MINUS. see comments in ioremap_nocache()
+        * If you really need strong UC use ioremap_uc(), but note
+        * that you cannot override IO areas with set_memory_*() as
+        * these helpers cannot work with IO memory.
         */
        return change_page_attr_set(&addr, numpages,
                                    cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
@@ -1571,7 +1571,7 @@ int set_memory_wc(unsigned long addr, int numpages)
 {
        int ret;
 
-       if (!pat_enabled)
+       if (!pat_enabled())
                return set_memory_uc(addr, numpages);
 
        ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
index 35af677..a1c9654 100644 (file)
 #include "pat_internal.h"
 #include "mm_internal.h"
 
-#ifdef CONFIG_X86_PAT
-int __read_mostly pat_enabled = 1;
+#undef pr_fmt
+#define pr_fmt(fmt) "" fmt
+
+static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
 
 static inline void pat_disable(const char *reason)
 {
-       pat_enabled = 0;
-       printk(KERN_INFO "%s\n", reason);
+       __pat_enabled = 0;
+       pr_info("x86/PAT: %s\n", reason);
 }
 
 static int __init nopat(char *str)
@@ -48,13 +50,12 @@ static int __init nopat(char *str)
        return 0;
 }
 early_param("nopat", nopat);
-#else
-static inline void pat_disable(const char *reason)
+
+bool pat_enabled(void)
 {
-       (void)reason;
+       return !!__pat_enabled;
 }
-#endif
-
+EXPORT_SYMBOL_GPL(pat_enabled);
 
 int pat_debug_enable;
 
@@ -188,7 +189,7 @@ void pat_init_cache_modes(void)
                                           pat_msg + 4 * i);
                update_cache_mode_entry(i, cache);
        }
-       pr_info("PAT configuration [0-7]: %s\n", pat_msg);
+       pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
 }
 
 #define PAT(x, y)      ((u64)PAT_ ## y << ((x)*8))
@@ -198,7 +199,7 @@ void pat_init(void)
        u64 pat;
        bool boot_cpu = !boot_pat_state;
 
-       if (!pat_enabled)
+       if (!pat_enabled())
                return;
 
        if (!cpu_has_pat) {
@@ -211,8 +212,7 @@ void pat_init(void)
                         * switched to PAT on the boot CPU. We have no way to
                         * undo PAT.
                         */
-                       printk(KERN_ERR "PAT enabled, "
-                              "but not supported by secondary CPU\n");
+                       pr_err("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
                        BUG();
                }
        }
@@ -267,9 +267,9 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end,
         * request is for WB.
         */
        if (req_type == _PAGE_CACHE_MODE_WB) {
-               u8 mtrr_type;
+               u8 mtrr_type, uniform;
 
-               mtrr_type = mtrr_type_lookup(start, end);
+               mtrr_type = mtrr_type_lookup(start, end, &uniform);
                if (mtrr_type != MTRR_TYPE_WRBACK)
                        return _PAGE_CACHE_MODE_UC_MINUS;
 
@@ -347,7 +347,7 @@ static int reserve_ram_pages_type(u64 start, u64 end,
                page = pfn_to_page(pfn);
                type = get_page_memtype(page);
                if (type != -1) {
-                       pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
+                       pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
                                start, end - 1, type, req_type);
                        if (new_type)
                                *new_type = type;
@@ -400,7 +400,7 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
 
        BUG_ON(start >= end); /* end is exclusive */
 
-       if (!pat_enabled) {
+       if (!pat_enabled()) {
                /* This is identical to page table setting without PAT */
                if (new_type) {
                        if (req_type == _PAGE_CACHE_MODE_WC)
@@ -451,9 +451,9 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
 
        err = rbt_memtype_check_insert(new, new_type);
        if (err) {
-               printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
-                      start, end - 1,
-                      cattr_name(new->type), cattr_name(req_type));
+               pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
+                       start, end - 1,
+                       cattr_name(new->type), cattr_name(req_type));
                kfree(new);
                spin_unlock(&memtype_lock);
 
@@ -475,7 +475,7 @@ int free_memtype(u64 start, u64 end)
        int is_range_ram;
        struct memtype *entry;
 
-       if (!pat_enabled)
+       if (!pat_enabled())
                return 0;
 
        /* Low ISA region is always mapped WB. No need to track */
@@ -497,8 +497,8 @@ int free_memtype(u64 start, u64 end)
        spin_unlock(&memtype_lock);
 
        if (!entry) {
-               printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
-                      current->comm, current->pid, start, end - 1);
+               pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
+                       current->comm, current->pid, start, end - 1);
                return -EINVAL;
        }
 
@@ -623,13 +623,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
        u64 to = from + size;
        u64 cursor = from;
 
-       if (!pat_enabled)
+       if (!pat_enabled())
                return 1;
 
        while (cursor < to) {
                if (!devmem_is_allowed(pfn)) {
-                       printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
-                              current->comm, from, to - 1);
+                       pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
+                               current->comm, from, to - 1);
                        return 0;
                }
                cursor += PAGE_SIZE;
@@ -659,7 +659,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
         * caching for the high addresses through the KEN pin, but
         * we maintain the tradition of paranoia in this code.
         */
-       if (!pat_enabled &&
+       if (!pat_enabled() &&
            !(boot_cpu_has(X86_FEATURE_MTRR) ||
              boot_cpu_has(X86_FEATURE_K6_MTRR) ||
              boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
@@ -698,8 +698,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
                                size;
 
        if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
-               printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
-                       "for [mem %#010Lx-%#010Lx]\n",
+               pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
                        current->comm, current->pid,
                        cattr_name(pcm),
                        base, (unsigned long long)(base + size-1));
@@ -729,12 +728,12 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
         * the type requested matches the type of first page in the range.
         */
        if (is_ram) {
-               if (!pat_enabled)
+               if (!pat_enabled())
                        return 0;
 
                pcm = lookup_memtype(paddr);
                if (want_pcm != pcm) {
-                       printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
+                       pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
                                current->comm, current->pid,
                                cattr_name(want_pcm),
                                (unsigned long long)paddr,
@@ -755,13 +754,12 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                if (strict_prot ||
                    !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
                        free_memtype(paddr, paddr + size);
-                       printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
-                               " for [mem %#010Lx-%#010Lx], got %s\n",
-                               current->comm, current->pid,
-                               cattr_name(want_pcm),
-                               (unsigned long long)paddr,
-                               (unsigned long long)(paddr + size - 1),
-                               cattr_name(pcm));
+                       pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
+                              current->comm, current->pid,
+                              cattr_name(want_pcm),
+                              (unsigned long long)paddr,
+                              (unsigned long long)(paddr + size - 1),
+                              cattr_name(pcm));
                        return -EINVAL;
                }
                /*
@@ -844,7 +842,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
                return ret;
        }
 
-       if (!pat_enabled)
+       if (!pat_enabled())
                return 0;
 
        /*
@@ -872,7 +870,7 @@ int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
 {
        enum page_cache_mode pcm;
 
-       if (!pat_enabled)
+       if (!pat_enabled())
                return 0;
 
        /* Set prot based on lookup */
@@ -913,7 +911,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
 
 pgprot_t pgprot_writecombine(pgprot_t prot)
 {
-       if (pat_enabled)
+       if (pat_enabled())
                return __pgprot(pgprot_val(prot) |
                                cachemode2protval(_PAGE_CACHE_MODE_WC));
        else
@@ -996,7 +994,7 @@ static const struct file_operations memtype_fops = {
 
 static int __init pat_memtype_list_init(void)
 {
-       if (pat_enabled) {
+       if (pat_enabled()) {
                debugfs_create_file("pat_memtype_list", S_IRUSR,
                                    arch_debugfs_dir, NULL, &memtype_fops);
        }
index f641162..a739bfc 100644 (file)
@@ -4,7 +4,7 @@
 extern int pat_debug_enable;
 
 #define dprintk(fmt, arg...) \
-       do { if (pat_debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
+       do { if (pat_debug_enable) pr_info("x86/PAT: " fmt, ##arg); } while (0)
 
 struct memtype {
        u64                     start;
index 6582adc..6393108 100644 (file)
@@ -160,9 +160,9 @@ success:
        return 0;
 
 failure:
-       printk(KERN_INFO "%s:%d conflicting memory types "
-               "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
-               end, cattr_name(found_type), cattr_name(match->type));
+       pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
+               current->comm, current->pid, start, end,
+               cattr_name(found_type), cattr_name(match->type));
        return -EBUSY;
 }
 
index 0b97d2c..fb0a9dd 100644 (file)
@@ -563,16 +563,31 @@ void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
 }
 
 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+/**
+ * pud_set_huge - setup kernel PUD mapping
+ *
+ * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
+ * function sets up a huge page only if any of the following conditions are met:
+ *
+ * - MTRRs are disabled, or
+ *
+ * - MTRRs are enabled and the range is completely covered by a single MTRR, or
+ *
+ * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
+ *   has no effect on the requested PAT memory type.
+ *
+ * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
+ * page mapping attempt fails.
+ *
+ * Returns 1 on success and 0 on failure.
+ */
 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
 {
-       u8 mtrr;
+       u8 mtrr, uniform;
 
-       /*
-        * Do not use a huge page when the range is covered by non-WB type
-        * of MTRRs.
-        */
-       mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE);
-       if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
+       mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
+       if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
+           (mtrr != MTRR_TYPE_WRBACK))
                return 0;
 
        prot = pgprot_4k_2_large(prot);
@@ -584,17 +599,24 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
        return 1;
 }
 
+/**
+ * pmd_set_huge - setup kernel PMD mapping
+ *
+ * See text over pud_set_huge() above.
+ *
+ * Returns 1 on success and 0 on failure.
+ */
 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
 {
-       u8 mtrr;
+       u8 mtrr, uniform;
 
-       /*
-        * Do not use a huge page when the range is covered by non-WB type
-        * of MTRRs.
-        */
-       mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE);
-       if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
+       mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
+       if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
+           (mtrr != MTRR_TYPE_WRBACK)) {
+               pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
+                            __func__, addr, addr + PMD_SIZE);
                return 0;
+       }
 
        prot = pgprot_4k_2_large(prot);
 
@@ -605,6 +627,11 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
        return 1;
 }
 
+/**
+ * pud_clear_huge - clear kernel PUD mapping when it is set
+ *
+ * Returns 1 on success and 0 on failure (no PUD map is found).
+ */
 int pud_clear_huge(pud_t *pud)
 {
        if (pud_large(*pud)) {
@@ -615,6 +642,11 @@ int pud_clear_huge(pud_t *pud)
        return 0;
 }
 
+/**
+ * pmd_clear_huge - clear kernel PMD mapping when it is set
+ *
+ * Returns 1 on success and 0 on failure (no PMD map is found).
+ */
 int pmd_clear_huge(pmd_t *pmd)
 {
        if (pmd_large(*pmd)) {
index 99f7610..ddeff48 100644 (file)
@@ -966,7 +966,12 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
        }
        ctx.cleanup_addr = proglen;
 
-       for (pass = 0; pass < 10; pass++) {
+       /* JITed image shrinks with every pass and the loop iterates
+        * until the image stops shrinking. Very large bpf programs
+        * may converge on the last pass. In such case do one more
+        * pass to emit the final image
+        */
+       for (pass = 0; pass < 10 || image; pass++) {
                proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
                if (proglen <= 0) {
                        image = NULL;
index d939633..14a63ed 100644 (file)
@@ -482,9 +482,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 
 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 {
-       struct pci_sysdata *sd = bridge->bus->sysdata;
-
-       ACPI_COMPANION_SET(&bridge->dev, sd->companion);
+       /*
+        * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
+        * here, pci_create_root_bus() has been called by someone else and
+        * sysdata is likely to be different from what we expect.  Let it go in
+        * that case.
+        */
+       if (!bridge->dev.parent) {
+               struct pci_sysdata *sd = bridge->bus->sysdata;
+               ACPI_COMPANION_SET(&bridge->dev, sd->companion);
+       }
        return 0;
 }
 
index 349c0d3..0a9f2ca 100644 (file)
@@ -429,12 +429,12 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
         * Caller can followup with UC MINUS request and add a WC mtrr if there
         * is a free mtrr slot.
         */
-       if (!pat_enabled && write_combine)
+       if (!pat_enabled() && write_combine)
                return -EINVAL;
 
-       if (pat_enabled && write_combine)
+       if (pat_enabled() && write_combine)
                prot |= cachemode2protval(_PAGE_CACHE_MODE_WC);
-       else if (pat_enabled || boot_cpu_data.x86 > 3)
+       else if (pat_enabled() || boot_cpu_data.x86 > 3)
                /*
                 * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
                 * To avoid attribute conflicts, request UC MINUS here
index a62e0be..f1a6c8e 100644 (file)
@@ -1,4 +1,5 @@
 # Platform specific code goes here
+obj-y  += atom/
 obj-y  += ce4100/
 obj-y  += efi/
 obj-y  += geode/
diff --git a/arch/x86/platform/atom/Makefile b/arch/x86/platform/atom/Makefile
new file mode 100644 (file)
index 0000000..0a3a40c
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_PUNIT_ATOM_DEBUG) += punit_atom_debug.o
diff --git a/arch/x86/platform/atom/punit_atom_debug.c b/arch/x86/platform/atom/punit_atom_debug.c
new file mode 100644 (file)
index 0000000..5ca8ead
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Intel SOC Punit device state debug driver
+ * Punit controls power management for North Complex devices (Graphics
+ * blocks, Image Signal Processing, video processing, display, DSP etc.)
+ *
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/io.h>
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+
+/* Side band Interface port */
+#define PUNIT_PORT             0x04
+/* Power gate status reg */
+#define PWRGT_STATUS           0x61
+/* Subsystem config/status Video processor */
+#define VED_SS_PM0             0x32
+/* Subsystem config/status ISP (Image Signal Processor) */
+#define ISP_SS_PM0             0x39
+/* Subsystem config/status Input/output controller */
+#define MIO_SS_PM              0x3B
+/* Shift bits for getting status for video, isp and i/o */
+#define SSS_SHIFT              24
+/* Shift bits for getting status for graphics rendering */
+#define RENDER_POS             0
+/* Shift bits for getting status for media control */
+#define MEDIA_POS              2
+/* Shift bits for getting status for Valley View/Baytrail display */
+#define VLV_DISPLAY_POS                6
+/* Subsystem config/status display for Cherry Trail SOC */
+#define CHT_DSP_SSS            0x36
+/* Shift bits for getting status for display */
+#define CHT_DSP_SSS_POS                16
+
+struct punit_device {
+       char *name;
+       int reg;
+       int sss_pos;
+};
+
+static const struct punit_device punit_device_byt[] = {
+       { "GFX RENDER", PWRGT_STATUS,   RENDER_POS },
+       { "GFX MEDIA",  PWRGT_STATUS,   MEDIA_POS },
+       { "DISPLAY",    PWRGT_STATUS,   VLV_DISPLAY_POS },
+       { "VED",        VED_SS_PM0,     SSS_SHIFT },
+       { "ISP",        ISP_SS_PM0,     SSS_SHIFT },
+       { "MIO",        MIO_SS_PM,      SSS_SHIFT },
+       { NULL }
+};
+
+static const struct punit_device punit_device_cht[] = {
+       { "GFX RENDER", PWRGT_STATUS,   RENDER_POS },
+       { "GFX MEDIA",  PWRGT_STATUS,   MEDIA_POS },
+       { "DISPLAY",    CHT_DSP_SSS,    CHT_DSP_SSS_POS },
+       { "VED",        VED_SS_PM0,     SSS_SHIFT },
+       { "ISP",        ISP_SS_PM0,     SSS_SHIFT },
+       { "MIO",        MIO_SS_PM,      SSS_SHIFT },
+       { NULL }
+};
+
+static const char * const dstates[] = {"D0", "D0i1", "D0i2", "D0i3"};
+
+static int punit_dev_state_show(struct seq_file *seq_file, void *unused)
+{
+       u32 punit_pwr_status;
+       struct punit_device *punit_devp = seq_file->private;
+       int index;
+       int status;
+
+       seq_puts(seq_file, "\n\nPUNIT NORTH COMPLEX DEVICES :\n");
+       while (punit_devp->name) {
+               status = iosf_mbi_read(PUNIT_PORT, BT_MBI_PMC_READ,
+                                      punit_devp->reg,
+                                      &punit_pwr_status);
+               if (status) {
+                       seq_printf(seq_file, "%9s : Read Failed\n",
+                                  punit_devp->name);
+               } else  {
+                       index = (punit_pwr_status >> punit_devp->sss_pos) & 3;
+                       seq_printf(seq_file, "%9s : %s\n", punit_devp->name,
+                                  dstates[index]);
+               }
+               punit_devp++;
+       }
+
+       return 0;
+}
+
+static int punit_dev_state_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, punit_dev_state_show, inode->i_private);
+}
+
+static const struct file_operations punit_dev_state_ops = {
+       .open           = punit_dev_state_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static struct dentry *punit_dbg_file;
+
+static int punit_dbgfs_register(struct punit_device *punit_device)
+{
+       static struct dentry *dev_state;
+
+       punit_dbg_file = debugfs_create_dir("punit_atom", NULL);
+       if (!punit_dbg_file)
+               return -ENXIO;
+
+       dev_state = debugfs_create_file("dev_power_state", S_IFREG | S_IRUGO,
+                                       punit_dbg_file, punit_device,
+                                       &punit_dev_state_ops);
+       if (!dev_state) {
+               pr_err("punit_dev_state register failed\n");
+               debugfs_remove(punit_dbg_file);
+               return -ENXIO;
+       }
+
+       return 0;
+}
+
+static void punit_dbgfs_unregister(void)
+{
+       debugfs_remove_recursive(punit_dbg_file);
+}
+
+#define ICPU(model, drv_data) \
+       { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT,\
+         (kernel_ulong_t)&drv_data }
+
+static const struct x86_cpu_id intel_punit_cpu_ids[] = {
+       ICPU(55, punit_device_byt), /* Valleyview, Bay Trail */
+       ICPU(76, punit_device_cht), /* Braswell, Cherry Trail */
+       {}
+};
+
+MODULE_DEVICE_TABLE(x86cpu, intel_punit_cpu_ids);
+
+static int __init punit_atom_debug_init(void)
+{
+       const struct x86_cpu_id *id;
+       int ret;
+
+       id = x86_match_cpu(intel_punit_cpu_ids);
+       if (!id)
+               return -ENODEV;
+
+       ret = punit_dbgfs_register((struct punit_device *)id->driver_data);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static void __exit punit_atom_debug_exit(void)
+{
+       punit_dbgfs_unregister();
+}
+
+module_init(punit_atom_debug_init);
+module_exit(punit_atom_debug_exit);
+
+MODULE_AUTHOR("Kumar P, Mahesh <mahesh.kumar.p@intel.com>");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Driver for Punit devices states debugging");
+MODULE_LICENSE("GPL v2");
index 172a02a..ba78ccf 100644 (file)
@@ -185,4 +185,17 @@ static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
        return -EINVAL;
 }
 
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+                                   dma_addr_t *dma_handle, gfp_t flag,
+                                   struct dma_attrs *attrs)
+{
+       return NULL;
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+                                 void *vaddr, dma_addr_t dma_handle,
+                                 struct dma_attrs *attrs)
+{
+}
+
 #endif /* _XTENSA_DMA_MAPPING_H */
index 7871603..03b5f8d 100644 (file)
@@ -734,6 +734,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 }
 EXPORT_SYMBOL(blk_init_queue_node);
 
+static void blk_queue_bio(struct request_queue *q, struct bio *bio);
+
 struct request_queue *
 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
                         spinlock_t *lock)
@@ -1578,7 +1580,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        blk_rq_bio_prep(req->q, req, bio);
 }
 
-void blk_queue_bio(struct request_queue *q, struct bio *bio)
+static void blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
        const bool sync = !!(bio->bi_rw & REQ_SYNC);
        struct blk_plug *plug;
@@ -1686,7 +1688,6 @@ out_unlock:
                spin_unlock_irq(q->queue_lock);
        }
 }
-EXPORT_SYMBOL_GPL(blk_queue_bio);      /* for device mapper only */
 
 /*
  * If bio->bi_dev is a partition, remap the location
index 8aaf298..362905e 100644 (file)
@@ -1512,15 +1512,6 @@ config CRYPTO_USER_API_RNG
          This option enables the user-spaces interface for random
          number generator algorithms.
 
-config CRYPTO_USER_API_AEAD
-       tristate "User-space interface for AEAD cipher algorithms"
-       depends on NET
-       select CRYPTO_AEAD
-       select CRYPTO_USER_API
-       help
-         This option enables the user-spaces interface for AEAD
-         cipher algorithms.
-
 config CRYPTO_HASH_INFO
        bool
 
index 00a6fe1..69abada 100644 (file)
@@ -33,7 +33,7 @@ struct aead_ctx {
        /*
         * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
         * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
-        * bytes
+        * pages
         */
 #define RSGL_MAX_ENTRIES ALG_MAX_PAGES
        struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
@@ -435,11 +435,10 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
                if (err < 0)
                        goto unlock;
                usedpages += err;
-               /* chain the new scatterlist with initial list */
+               /* chain the new scatterlist with previous one */
                if (cnt)
-                       scatterwalk_crypto_chain(ctx->rsgl[0].sg,
-                                       ctx->rsgl[cnt].sg, 1,
-                                       sg_nents(ctx->rsgl[cnt-1].sg));
+                       af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
+
                /* we do not need more iovecs as we have sufficient memory */
                if (outlen <= usedpages)
                        break;
index 88f13c5..44f2514 100644 (file)
@@ -2257,7 +2257,8 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        page_code = GET_INQ_PAGE_CODE(cmd);
        alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
 
-       inq_response = kmalloc(alloc_len, GFP_KERNEL);
+       inq_response = kmalloc(max(alloc_len, STANDARD_INQUIRY_LENGTH),
+                               GFP_KERNEL);
        if (inq_response == NULL) {
                res = -ENOMEM;
                goto out_mem;
index 288547a..8c81af6 100644 (file)
@@ -88,6 +88,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x04CA, 0x3007) },
        { USB_DEVICE(0x04CA, 0x3008) },
        { USB_DEVICE(0x04CA, 0x300b) },
+       { USB_DEVICE(0x04CA, 0x300f) },
        { USB_DEVICE(0x04CA, 0x3010) },
        { USB_DEVICE(0x0930, 0x0219) },
        { USB_DEVICE(0x0930, 0x0220) },
@@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x0cf3, 0xe003) },
        { USB_DEVICE(0x0CF3, 0xE004) },
        { USB_DEVICE(0x0CF3, 0xE005) },
+       { USB_DEVICE(0x0CF3, 0xE006) },
        { USB_DEVICE(0x13d3, 0x3362) },
        { USB_DEVICE(0x13d3, 0x3375) },
        { USB_DEVICE(0x13d3, 0x3393) },
@@ -143,6 +145,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -158,6 +161,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
index d21f3b4..3c10d4d 100644 (file)
@@ -186,6 +186,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -202,6 +203,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
@@ -218,6 +220,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
 
        /* QCA ROME chipset */
+       { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
        { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
        { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
 
index 5bd792c..ab3bde1 100644 (file)
@@ -453,7 +453,7 @@ void __iomem *mips_cdmm_early_probe(unsigned int dev_type)
 
        /* Look for a specific device type */
        for (; drb < bus->drbs; drb += size + 1) {
-               acsr = readl(cdmm + drb * CDMM_DRB_SIZE);
+               acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
                type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
                if (type == dev_type)
                        return cdmm + drb * CDMM_DRB_SIZE;
@@ -500,7 +500,7 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
        bus->discovered = true;
        pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
        for (; drb < bus->drbs; drb += size + 1) {
-               acsr = readl(cdmm + drb * CDMM_DRB_SIZE);
+               acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
                type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
                size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
                rev  = (acsr & CDMM_ACSR_DEVREV)  >> CDMM_ACSR_DEVREV_SHIFT;
index 44ea107..30335d3 100644 (file)
@@ -1128,13 +1128,6 @@ static int si5351_dt_parse(struct i2c_client *client,
        if (!pdata)
                return -ENOMEM;
 
-       pdata->clk_xtal = of_clk_get(np, 0);
-       if (!IS_ERR(pdata->clk_xtal))
-               clk_put(pdata->clk_xtal);
-       pdata->clk_clkin = of_clk_get(np, 1);
-       if (!IS_ERR(pdata->clk_clkin))
-               clk_put(pdata->clk_clkin);
-
        /*
         * property silabs,pll-source : <num src>, [<..>]
         * allow to selectively set pll source
@@ -1328,8 +1321,22 @@ static int si5351_i2c_probe(struct i2c_client *client,
        i2c_set_clientdata(client, drvdata);
        drvdata->client = client;
        drvdata->variant = variant;
-       drvdata->pxtal = pdata->clk_xtal;
-       drvdata->pclkin = pdata->clk_clkin;
+       drvdata->pxtal = devm_clk_get(&client->dev, "xtal");
+       drvdata->pclkin = devm_clk_get(&client->dev, "clkin");
+
+       if (PTR_ERR(drvdata->pxtal) == -EPROBE_DEFER ||
+           PTR_ERR(drvdata->pclkin) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+
+       /*
+        * Check for valid parent clock: VARIANT_A and VARIANT_B need XTAL,
+        *   VARIANT_C can have CLKIN instead.
+        */
+       if (IS_ERR(drvdata->pxtal) &&
+           (drvdata->variant != SI5351_VARIANT_C || IS_ERR(drvdata->pclkin))) {
+               dev_err(&client->dev, "missing parent clock\n");
+               return -EINVAL;
+       }
 
        drvdata->regmap = devm_regmap_init_i2c(client, &si5351_regmap_config);
        if (IS_ERR(drvdata->regmap)) {
@@ -1393,6 +1400,11 @@ static int si5351_i2c_probe(struct i2c_client *client,
                }
        }
 
+       if (!IS_ERR(drvdata->pxtal))
+               clk_prepare_enable(drvdata->pxtal);
+       if (!IS_ERR(drvdata->pclkin))
+               clk_prepare_enable(drvdata->pclkin);
+
        /* register xtal input clock gate */
        memset(&init, 0, sizeof(init));
        init.name = si5351_input_names[0];
@@ -1407,7 +1419,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
        clk = devm_clk_register(&client->dev, &drvdata->xtal);
        if (IS_ERR(clk)) {
                dev_err(&client->dev, "unable to register %s\n", init.name);
-               return PTR_ERR(clk);
+               ret = PTR_ERR(clk);
+               goto err_clk;
        }
 
        /* register clkin input clock gate */
@@ -1425,7 +1438,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
                if (IS_ERR(clk)) {
                        dev_err(&client->dev, "unable to register %s\n",
                                init.name);
-                       return PTR_ERR(clk);
+                       ret = PTR_ERR(clk);
+                       goto err_clk;
                }
        }
 
@@ -1447,7 +1461,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
        clk = devm_clk_register(&client->dev, &drvdata->pll[0].hw);
        if (IS_ERR(clk)) {
                dev_err(&client->dev, "unable to register %s\n", init.name);
-               return -EINVAL;
+               ret = PTR_ERR(clk);
+               goto err_clk;
        }
 
        /* register PLLB or VXCO (Si5351B) */
@@ -1471,7 +1486,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
        clk = devm_clk_register(&client->dev, &drvdata->pll[1].hw);
        if (IS_ERR(clk)) {
                dev_err(&client->dev, "unable to register %s\n", init.name);
-               return -EINVAL;
+               ret = PTR_ERR(clk);
+               goto err_clk;
        }
 
        /* register clk multisync and clk out divider */
@@ -1492,8 +1508,10 @@ static int si5351_i2c_probe(struct i2c_client *client,
                num_clocks * sizeof(*drvdata->onecell.clks), GFP_KERNEL);
 
        if (WARN_ON(!drvdata->msynth || !drvdata->clkout ||
-                   !drvdata->onecell.clks))
-               return -ENOMEM;
+                   !drvdata->onecell.clks)) {
+               ret = -ENOMEM;
+               goto err_clk;
+       }
 
        for (n = 0; n < num_clocks; n++) {
                drvdata->msynth[n].num = n;
@@ -1511,7 +1529,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
                if (IS_ERR(clk)) {
                        dev_err(&client->dev, "unable to register %s\n",
                                init.name);
-                       return -EINVAL;
+                       ret = PTR_ERR(clk);
+                       goto err_clk;
                }
        }
 
@@ -1538,7 +1557,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
                if (IS_ERR(clk)) {
                        dev_err(&client->dev, "unable to register %s\n",
                                init.name);
-                       return -EINVAL;
+                       ret = PTR_ERR(clk);
+                       goto err_clk;
                }
                drvdata->onecell.clks[n] = clk;
 
@@ -1557,10 +1577,17 @@ static int si5351_i2c_probe(struct i2c_client *client,
                                  &drvdata->onecell);
        if (ret) {
                dev_err(&client->dev, "unable to add clk provider\n");
-               return ret;
+               goto err_clk;
        }
 
        return 0;
+
+err_clk:
+       if (!IS_ERR(drvdata->pxtal))
+               clk_disable_unprepare(drvdata->pxtal);
+       if (!IS_ERR(drvdata->pclkin))
+               clk_disable_unprepare(drvdata->pclkin);
+       return ret;
 }
 
 static const struct i2c_device_id si5351_i2c_ids[] = {
index 459ce9d..5b0f418 100644 (file)
@@ -1475,8 +1475,10 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
         */
        if (clk->prepare_count) {
                clk_core_prepare(parent);
+               flags = clk_enable_lock();
                clk_core_enable(parent);
                clk_core_enable(clk);
+               clk_enable_unlock(flags);
        }
 
        /* update the clk tree topology */
@@ -1491,13 +1493,17 @@ static void __clk_set_parent_after(struct clk_core *core,
                                   struct clk_core *parent,
                                   struct clk_core *old_parent)
 {
+       unsigned long flags;
+
        /*
         * Finish the migration of prepare state and undo the changes done
         * for preventing a race with clk_enable().
         */
        if (core->prepare_count) {
+               flags = clk_enable_lock();
                clk_core_disable(core);
                clk_core_disable(old_parent);
+               clk_enable_unlock(flags);
                clk_core_unprepare(old_parent);
        }
 }
@@ -1525,8 +1531,10 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
                clk_enable_unlock(flags);
 
                if (clk->prepare_count) {
+                       flags = clk_enable_lock();
                        clk_core_disable(clk);
                        clk_core_disable(parent);
+                       clk_enable_unlock(flags);
                        clk_core_unprepare(parent);
                }
                return ret;
index d345847..c66f7bc 100644 (file)
@@ -71,8 +71,8 @@ static const char *gcc_xo_gpll0_bimc[] = {
 static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = {
        { P_XO, 0 },
        { P_GPLL0_AUX, 3 },
-       { P_GPLL2_AUX, 2 },
        { P_GPLL1, 1 },
+       { P_GPLL2_AUX, 2 },
 };
 
 static const char *gcc_xo_gpll0a_gpll1_gpll2a[] = {
@@ -1115,7 +1115,7 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
 static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
        F(100000000, P_GPLL0, 8, 0, 0),
        F(160000000, P_GPLL0, 5, 0, 0),
-       F(228570000, P_GPLL0, 5, 0, 0),
+       F(228570000, P_GPLL0, 3.5, 0, 0),
        { }
 };
 
index 17e9af7..a17683b 100644 (file)
@@ -10,7 +10,7 @@ obj-$(CONFIG_SOC_EXYNOS5250)  += clk-exynos5250.o
 obj-$(CONFIG_SOC_EXYNOS5260)   += clk-exynos5260.o
 obj-$(CONFIG_SOC_EXYNOS5410)   += clk-exynos5410.o
 obj-$(CONFIG_SOC_EXYNOS5420)   += clk-exynos5420.o
-obj-$(CONFIG_ARCH_EXYNOS5433)  += clk-exynos5433.o
+obj-$(CONFIG_ARCH_EXYNOS)      += clk-exynos5433.o
 obj-$(CONFIG_SOC_EXYNOS5440)   += clk-exynos5440.o
 obj-$(CONFIG_ARCH_EXYNOS)      += clk-exynos-audss.o
 obj-$(CONFIG_ARCH_EXYNOS)      += clk-exynos-clkout.o
index 07d666c..bea4a17 100644 (file)
@@ -271,6 +271,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
        { .offset = SRC_MASK_PERIC0,            .value = 0x11111110, },
        { .offset = SRC_MASK_PERIC1,            .value = 0x11111100, },
        { .offset = SRC_MASK_ISP,               .value = 0x11111000, },
+       { .offset = GATE_BUS_TOP,               .value = 0xffffffff, },
        { .offset = GATE_BUS_DISP1,             .value = 0xffffffff, },
        { .offset = GATE_IP_PERIC,              .value = 0xffffffff, },
 };
index 387e3e3..9e04ae2 100644 (file)
@@ -748,7 +748,7 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
        PLL_35XX_RATE(825000000U,  275, 4,  1),
        PLL_35XX_RATE(800000000U,  400, 6,  1),
        PLL_35XX_RATE(733000000U,  733, 12, 1),
-       PLL_35XX_RATE(700000000U,  360, 6,  1),
+       PLL_35XX_RATE(700000000U,  175, 3,  1),
        PLL_35XX_RATE(667000000U,  222, 4,  1),
        PLL_35XX_RATE(633000000U,  211, 4,  1),
        PLL_35XX_RATE(600000000U,  500, 5,  2),
@@ -760,14 +760,14 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
        PLL_35XX_RATE(444000000U,  370, 5,  2),
        PLL_35XX_RATE(420000000U,  350, 5,  2),
        PLL_35XX_RATE(400000000U,  400, 6,  2),
-       PLL_35XX_RATE(350000000U,  360, 6,  2),
+       PLL_35XX_RATE(350000000U,  350, 6,  2),
        PLL_35XX_RATE(333000000U,  222, 4,  2),
        PLL_35XX_RATE(300000000U,  500, 5,  3),
        PLL_35XX_RATE(266000000U,  532, 6,  3),
        PLL_35XX_RATE(200000000U,  400, 6,  3),
        PLL_35XX_RATE(166000000U,  332, 6,  3),
        PLL_35XX_RATE(160000000U,  320, 6,  3),
-       PLL_35XX_RATE(133000000U,  552, 6,  4),
+       PLL_35XX_RATE(133000000U,  532, 6,  4),
        PLL_35XX_RATE(100000000U,  400, 6,  4),
        { /* sentinel */ }
 };
@@ -1490,7 +1490,7 @@ static struct samsung_gate_clock mif_gate_clks[] __initdata = {
 
        /* ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT */
        GATE(CLK_PCLK_MONOTONIC_CNT, "pclk_monotonic_cnt", "div_aclk_mif_133",
-                       ENABLE_PCLK_MIF_SECURE_RTC, 0, 0, 0),
+                       ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT, 0, 0, 0),
 
        /* ENABLE_PCLK_MIF_SECURE_RTC */
        GATE(CLK_PCLK_RTC, "pclk_rtc", "div_aclk_mif_133",
@@ -3665,7 +3665,7 @@ static struct samsung_gate_clock apollo_gate_clks[] __initdata = {
                        ENABLE_SCLK_APOLLO, 3, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_SCLK_HPM_APOLLO, "sclk_hpm_apollo", "div_sclk_hpm_apollo",
                        ENABLE_SCLK_APOLLO, 1, CLK_IGNORE_UNUSED, 0),
-       GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo_pll",
+       GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo2",
                        ENABLE_SCLK_APOLLO, 0, CLK_IGNORE_UNUSED, 0),
 };
 
@@ -3927,7 +3927,7 @@ CLK_OF_DECLARE(exynos5433_cmu_atlas, "samsung,exynos5433-cmu-atlas",
 #define ENABLE_PCLK_MSCL                               0x0900
 #define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER0                0x0904
 #define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER1                0x0908
-#define ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG              0x000c
+#define ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG              0x090c
 #define ENABLE_SCLK_MSCL                               0x0a00
 #define ENABLE_IP_MSCL0                                        0x0b00
 #define ENABLE_IP_MSCL1                                        0x0b04
index 6b8115f..83f281d 100644 (file)
@@ -117,7 +117,7 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
                = container_of(chip, struct kempld_gpio_data, chip);
        struct kempld_device_data *pld = gpio->pld;
 
-       return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
+       return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
 }
 
 static int kempld_gpio_pincount(struct kempld_device_data *pld)
index 59eaa23..6bc612b 100644 (file)
@@ -53,6 +53,11 @@ static DEFINE_MUTEX(gpio_lookup_lock);
 static LIST_HEAD(gpio_lookup_list);
 LIST_HEAD(gpio_chips);
 
+
+static void gpiochip_free_hogs(struct gpio_chip *chip);
+static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+
+
 static inline void desc_set_label(struct gpio_desc *d, const char *label)
 {
        d->label = label;
@@ -297,6 +302,7 @@ int gpiochip_add(struct gpio_chip *chip)
 
 err_remove_chip:
        acpi_gpiochip_remove(chip);
+       gpiochip_free_hogs(chip);
        of_gpiochip_remove(chip);
        spin_lock_irqsave(&gpio_lock, flags);
        list_del(&chip->list);
@@ -313,10 +319,6 @@ err_free_descs:
 }
 EXPORT_SYMBOL_GPL(gpiochip_add);
 
-/* Forward-declaration */
-static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
-static void gpiochip_free_hogs(struct gpio_chip *chip);
-
 /**
  * gpiochip_remove() - unregister a gpio_chip
  * @chip: the chip to unregister
index 266dcd6..0a95782 100644 (file)
@@ -36,9 +36,6 @@
 
 #include <linux/pci.h>
 #include <linux/export.h>
-#ifdef CONFIG_X86
-#include <asm/mtrr.h>
-#endif
 
 static int drm_version(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
@@ -197,16 +194,7 @@ static int drm_getmap(struct drm_device *dev, void *data,
        map->type = r_list->map->type;
        map->flags = r_list->map->flags;
        map->handle = (void *)(unsigned long) r_list->user_token;
-
-#ifdef CONFIG_X86
-       /*
-        * There appears to be exactly one user of the mtrr index: dritest.
-        * It's easy enough to keep it working on non-PAT systems.
-        */
-       map->mtrr = phys_wc_to_mtrr_index(r_list->map->mtrr);
-#else
-       map->mtrr = -1;
-#endif
+       map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
 
        mutex_unlock(&dev->struct_mutex);
 
index 40c1db9..2f0ed11 100644 (file)
@@ -465,6 +465,9 @@ int drm_plane_helper_commit(struct drm_plane *plane,
                if (!crtc[i])
                        continue;
 
+               if (crtc[i]->cursor == plane)
+                       continue;
+
                /* There's no other way to figure out whether the crtc is running. */
                ret = drm_crtc_vblank_get(crtc[i]);
                if (ret == 0) {
index 1f7e33f..6714e5b 100644 (file)
@@ -91,7 +91,7 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
 
 static void decon_clear_channel(struct decon_context *ctx)
 {
-       int win, ch_enabled = 0;
+       unsigned int win, ch_enabled = 0;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -710,7 +710,7 @@ static void decon_dpms(struct exynos_drm_crtc *crtc, int mode)
        }
 }
 
-static struct exynos_drm_crtc_ops decon_crtc_ops = {
+static const struct exynos_drm_crtc_ops decon_crtc_ops = {
        .dpms = decon_dpms,
        .mode_fixup = decon_mode_fixup,
        .commit = decon_commit,
index 1dbfba5..30feb7d 100644 (file)
@@ -32,7 +32,6 @@
 #include <drm/bridge/ptn3460.h>
 
 #include "exynos_dp_core.h"
-#include "exynos_drm_fimd.h"
 
 #define ctx_from_connector(c)  container_of(c, struct exynos_dp_device, \
                                        connector)
@@ -196,7 +195,7 @@ static int exynos_dp_read_edid(struct exynos_dp_device *dp)
                }
        }
 
-       dev_err(dp->dev, "EDID Read success!\n");
+       dev_dbg(dp->dev, "EDID Read success!\n");
        return 0;
 }
 
@@ -1066,6 +1065,8 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
 
 static void exynos_dp_poweron(struct exynos_dp_device *dp)
 {
+       struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
+
        if (dp->dpms_mode == DRM_MODE_DPMS_ON)
                return;
 
@@ -1076,7 +1077,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
                }
        }
 
-       fimd_dp_clock_enable(dp_to_crtc(dp), true);
+       if (crtc->ops->clock_enable)
+               crtc->ops->clock_enable(dp_to_crtc(dp), true);
 
        clk_prepare_enable(dp->clock);
        exynos_dp_phy_init(dp);
@@ -1087,6 +1089,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
 
 static void exynos_dp_poweroff(struct exynos_dp_device *dp)
 {
+       struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
+
        if (dp->dpms_mode != DRM_MODE_DPMS_ON)
                return;
 
@@ -1102,7 +1106,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
        exynos_dp_phy_exit(dp);
        clk_disable_unprepare(dp->clock);
 
-       fimd_dp_clock_enable(dp_to_crtc(dp), false);
+       if (crtc->ops->clock_enable)
+               crtc->ops->clock_enable(dp_to_crtc(dp), false);
 
        if (dp->panel) {
                if (drm_panel_unprepare(dp->panel))
index eb49195..9006b94 100644 (file)
@@ -238,11 +238,11 @@ static struct drm_crtc_funcs exynos_crtc_funcs = {
 };
 
 struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
-                                              struct drm_plane *plane,
-                                              int pipe,
-                                              enum exynos_drm_output_type type,
-                                              struct exynos_drm_crtc_ops *ops,
-                                              void *ctx)
+                                       struct drm_plane *plane,
+                                       int pipe,
+                                       enum exynos_drm_output_type type,
+                                       const struct exynos_drm_crtc_ops *ops,
+                                       void *ctx)
 {
        struct exynos_drm_crtc *exynos_crtc;
        struct exynos_drm_private *private = drm_dev->dev_private;
index 0ecd8fc..0f3aa70 100644 (file)
 #include "exynos_drm_drv.h"
 
 struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
-                                              struct drm_plane *plane,
-                                              int pipe,
-                                              enum exynos_drm_output_type type,
-                                              struct exynos_drm_crtc_ops *ops,
-                                              void *context);
+                                       struct drm_plane *plane,
+                                       int pipe,
+                                       enum exynos_drm_output_type type,
+                                       const struct exynos_drm_crtc_ops *ops,
+                                       void *context);
 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
 void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
 void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
index e12ecb5..29e3fb7 100644 (file)
@@ -71,13 +71,6 @@ enum exynos_drm_output_type {
  * @dma_addr: array of bus(accessed by dma) address to the memory region
  *           allocated for a overlay.
  * @zpos: order of overlay layer(z position).
- * @index_color: if using color key feature then this value would be used
- *                     as index color.
- * @default_win: a window to be enabled.
- * @color_key: color key on or off.
- * @local_path: in case of lcd type, local path mode on or off.
- * @transparency: transparency on or off.
- * @activated: activated or not.
  * @enabled: enabled or not.
  * @resume: to resume or not.
  *
@@ -108,13 +101,7 @@ struct exynos_drm_plane {
        uint32_t pixel_format;
        dma_addr_t dma_addr[MAX_FB_BUFFER];
        unsigned int zpos;
-       unsigned int index_color;
 
-       bool default_win:1;
-       bool color_key:1;
-       bool local_path:1;
-       bool transparency:1;
-       bool activated:1;
        bool enabled:1;
        bool resume:1;
 };
@@ -181,6 +168,10 @@ struct exynos_drm_display {
  * @win_disable: disable hardware specific overlay.
  * @te_handler: trigger to transfer video image at the tearing effect
  *     synchronization signal if there is a page flip request.
+ * @clock_enable: optional function enabling/disabling display domain clock,
+ *     called from exynos-dp driver before powering up (with
+ *     'enable' argument as true) and after powering down (with
+ *     'enable' as false).
  */
 struct exynos_drm_crtc;
 struct exynos_drm_crtc_ops {
@@ -195,6 +186,7 @@ struct exynos_drm_crtc_ops {
        void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos);
        void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
        void (*te_handler)(struct exynos_drm_crtc *crtc);
+       void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
 };
 
 /*
@@ -221,7 +213,7 @@ struct exynos_drm_crtc {
        unsigned int                    dpms;
        wait_queue_head_t               pending_flip_queue;
        struct drm_pending_vblank_event *event;
-       struct exynos_drm_crtc_ops      *ops;
+       const struct exynos_drm_crtc_ops        *ops;
        void                            *ctx;
 };
 
index 929cb03..142eb4e 100644 (file)
@@ -171,43 +171,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
        return &exynos_fb->fb;
 }
 
-static u32 exynos_drm_format_num_buffers(struct drm_mode_fb_cmd2 *mode_cmd)
-{
-       unsigned int cnt = 0;
-
-       if (mode_cmd->pixel_format != DRM_FORMAT_NV12)
-               return drm_format_num_planes(mode_cmd->pixel_format);
-
-       while (cnt != MAX_FB_BUFFER) {
-               if (!mode_cmd->handles[cnt])
-                       break;
-               cnt++;
-       }
-
-       /*
-        * check if NV12 or NV12M.
-        *
-        * NV12
-        * handles[0] = base1, offsets[0] = 0
-        * handles[1] = base1, offsets[1] = Y_size
-        *
-        * NV12M
-        * handles[0] = base1, offsets[0] = 0
-        * handles[1] = base2, offsets[1] = 0
-        */
-       if (cnt == 2) {
-               /*
-                * in case of NV12 format, offsets[1] is not 0 and
-                * handles[0] is same as handles[1].
-                */
-               if (mode_cmd->offsets[1] &&
-                       mode_cmd->handles[0] == mode_cmd->handles[1])
-                       cnt = 1;
-       }
-
-       return cnt;
-}
-
 static struct drm_framebuffer *
 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                      struct drm_mode_fb_cmd2 *mode_cmd)
@@ -230,7 +193,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 
        drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
        exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-       exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
+       exynos_fb->buf_cnt = drm_format_num_planes(mode_cmd->pixel_format);
 
        DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
 
index 9819fa6..a0edab8 100644 (file)
@@ -33,7 +33,6 @@
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_plane.h"
 #include "exynos_drm_iommu.h"
-#include "exynos_drm_fimd.h"
 
 /*
  * FIMD stands for Fully Interactive Mobile Display and
@@ -216,7 +215,7 @@ static void fimd_wait_for_vblank(struct exynos_drm_crtc *crtc)
                DRM_DEBUG_KMS("vblank wait timed out.\n");
 }
 
-static void fimd_enable_video_output(struct fimd_context *ctx, int win,
+static void fimd_enable_video_output(struct fimd_context *ctx, unsigned int win,
                                        bool enable)
 {
        u32 val = readl(ctx->regs + WINCON(win));
@@ -229,7 +228,8 @@ static void fimd_enable_video_output(struct fimd_context *ctx, int win,
        writel(val, ctx->regs + WINCON(win));
 }
 
-static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
+static void fimd_enable_shadow_channel_path(struct fimd_context *ctx,
+                                               unsigned int win,
                                                bool enable)
 {
        u32 val = readl(ctx->regs + SHADOWCON);
@@ -244,7 +244,7 @@ static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
 
 static void fimd_clear_channel(struct fimd_context *ctx)
 {
-       int win, ch_enabled = 0;
+       unsigned int win, ch_enabled = 0;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -946,7 +946,24 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
                drm_handle_vblank(ctx->drm_dev, ctx->pipe);
 }
 
-static struct exynos_drm_crtc_ops fimd_crtc_ops = {
+static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
+{
+       struct fimd_context *ctx = crtc->ctx;
+       u32 val;
+
+       /*
+        * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
+        * clock. On these SoCs the bootloader may enable it but any
+        * power domain off/on will reset it to disable state.
+        */
+       if (ctx->driver_data != &exynos5_fimd_driver_data)
+               return;
+
+       val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
+       writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
+}
+
+static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
        .dpms = fimd_dpms,
        .mode_fixup = fimd_mode_fixup,
        .commit = fimd_commit,
@@ -956,6 +973,7 @@ static struct exynos_drm_crtc_ops fimd_crtc_ops = {
        .win_commit = fimd_win_commit,
        .win_disable = fimd_win_disable,
        .te_handler = fimd_te_handler,
+       .clock_enable = fimd_dp_clock_enable,
 };
 
 static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
@@ -1025,12 +1043,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
        if (ctx->display)
                exynos_drm_create_enc_conn(drm_dev, ctx->display);
 
-       ret = fimd_iommu_attach_devices(ctx, drm_dev);
-       if (ret)
-               return ret;
-
-       return 0;
-
+       return fimd_iommu_attach_devices(ctx, drm_dev);
 }
 
 static void fimd_unbind(struct device *dev, struct device *master,
@@ -1192,24 +1205,6 @@ static int fimd_remove(struct platform_device *pdev)
        return 0;
 }
 
-void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
-{
-       struct fimd_context *ctx = crtc->ctx;
-       u32 val;
-
-       /*
-        * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
-        * clock. On these SoCs the bootloader may enable it but any
-        * power domain off/on will reset it to disable state.
-        */
-       if (ctx->driver_data != &exynos5_fimd_driver_data)
-               return;
-
-       val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
-       writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
-}
-EXPORT_SYMBOL_GPL(fimd_dp_clock_enable);
-
 struct platform_driver fimd_driver = {
        .probe          = fimd_probe,
        .remove         = fimd_remove,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.h b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
deleted file mode 100644 (file)
index b4fcaa5..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (c) 2015 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_FIMD_H_
-#define _EXYNOS_DRM_FIMD_H_
-
-extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable);
-
-#endif /* _EXYNOS_DRM_FIMD_H_ */
index 13ea334..b1180fb 100644 (file)
@@ -76,7 +76,7 @@ int exynos_check_plane(struct drm_plane *plane, struct drm_framebuffer *fb)
                        return -EFAULT;
                }
 
-               exynos_plane->dma_addr[i] = buffer->dma_addr;
+               exynos_plane->dma_addr[i] = buffer->dma_addr + fb->offsets[i];
 
                DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
                                i, (unsigned long)exynos_plane->dma_addr[i]);
index 27e84ec..1b3479a 100644 (file)
@@ -217,7 +217,7 @@ static int vidi_ctx_initialize(struct vidi_context *ctx,
        return 0;
 }
 
-static struct exynos_drm_crtc_ops vidi_crtc_ops = {
+static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
        .dpms = vidi_dpms,
        .enable_vblank = vidi_enable_vblank,
        .disable_vblank = vidi_disable_vblank,
index fbec750..8874c1f 100644 (file)
 #define MIXER_WIN_NR           3
 #define MIXER_DEFAULT_WIN      0
 
+/* The pixelformats that are natively supported by the mixer. */
+#define MXR_FORMAT_RGB565      4
+#define MXR_FORMAT_ARGB1555    5
+#define MXR_FORMAT_ARGB4444    6
+#define MXR_FORMAT_ARGB8888    7
+
 struct mixer_resources {
        int                     irq;
        void __iomem            *mixer_regs;
@@ -327,7 +333,8 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
        mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
 }
 
-static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
+static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
+                               bool enable)
 {
        struct mixer_resources *res = &ctx->mixer_res;
        u32 val = enable ? ~0 : 0;
@@ -359,8 +366,6 @@ static void mixer_run(struct mixer_context *ctx)
        struct mixer_resources *res = &ctx->mixer_res;
 
        mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
-
-       mixer_regs_dump(ctx);
 }
 
 static void mixer_stop(struct mixer_context *ctx)
@@ -373,16 +378,13 @@ static void mixer_stop(struct mixer_context *ctx)
        while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
                        --timeout)
                usleep_range(10000, 12000);
-
-       mixer_regs_dump(ctx);
 }
 
-static void vp_video_buffer(struct mixer_context *ctx, int win)
+static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
 {
        struct mixer_resources *res = &ctx->mixer_res;
        unsigned long flags;
        struct exynos_drm_plane *plane;
-       unsigned int buf_num = 1;
        dma_addr_t luma_addr[2], chroma_addr[2];
        bool tiled_mode = false;
        bool crcb_mode = false;
@@ -393,27 +395,18 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
        switch (plane->pixel_format) {
        case DRM_FORMAT_NV12:
                crcb_mode = false;
-               buf_num = 2;
                break;
-       /* TODO: single buffer format NV12, NV21 */
+       case DRM_FORMAT_NV21:
+               crcb_mode = true;
+               break;
        default:
-               /* ignore pixel format at disable time */
-               if (!plane->dma_addr[0])
-                       break;
-
                DRM_ERROR("pixel format for vp is wrong [%d].\n",
                                plane->pixel_format);
                return;
        }
 
-       if (buf_num == 2) {
-               luma_addr[0] = plane->dma_addr[0];
-               chroma_addr[0] = plane->dma_addr[1];
-       } else {
-               luma_addr[0] = plane->dma_addr[0];
-               chroma_addr[0] = plane->dma_addr[0]
-                       + (plane->pitch * plane->fb_height);
-       }
+       luma_addr[0] = plane->dma_addr[0];
+       chroma_addr[0] = plane->dma_addr[1];
 
        if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) {
                ctx->interlace = true;
@@ -484,6 +477,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
        mixer_vsync_set_update(ctx, true);
        spin_unlock_irqrestore(&res->reg_slock, flags);
 
+       mixer_regs_dump(ctx);
        vp_regs_dump(ctx);
 }
 
@@ -518,7 +512,7 @@ fail:
        return -ENOTSUPP;
 }
 
-static void mixer_graph_buffer(struct mixer_context *ctx, int win)
+static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
 {
        struct mixer_resources *res = &ctx->mixer_res;
        unsigned long flags;
@@ -531,20 +525,27 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
 
        plane = &ctx->planes[win];
 
-       #define RGB565 4
-       #define ARGB1555 5
-       #define ARGB4444 6
-       #define ARGB8888 7
+       switch (plane->pixel_format) {
+       case DRM_FORMAT_XRGB4444:
+               fmt = MXR_FORMAT_ARGB4444;
+               break;
 
-       switch (plane->bpp) {
-       case 16:
-               fmt = ARGB4444;
+       case DRM_FORMAT_XRGB1555:
+               fmt = MXR_FORMAT_ARGB1555;
                break;
-       case 32:
-               fmt = ARGB8888;
+
+       case DRM_FORMAT_RGB565:
+               fmt = MXR_FORMAT_RGB565;
+               break;
+
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               fmt = MXR_FORMAT_ARGB8888;
                break;
+
        default:
-               fmt = ARGB8888;
+               DRM_DEBUG_KMS("pixelformat unsupported by mixer\n");
+               return;
        }
 
        /* check if mixer supports requested scaling setup */
@@ -617,6 +618,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
 
        mixer_vsync_set_update(ctx, true);
        spin_unlock_irqrestore(&res->reg_slock, flags);
+
+       mixer_regs_dump(ctx);
 }
 
 static void vp_win_reset(struct mixer_context *ctx)
@@ -1070,6 +1073,7 @@ static void mixer_poweroff(struct mixer_context *ctx)
        mutex_unlock(&ctx->mixer_mutex);
 
        mixer_stop(ctx);
+       mixer_regs_dump(ctx);
        mixer_window_suspend(ctx);
 
        ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
@@ -1126,7 +1130,7 @@ int mixer_check_mode(struct drm_display_mode *mode)
        return -EINVAL;
 }
 
-static struct exynos_drm_crtc_ops mixer_crtc_ops = {
+static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
        .dpms                   = mixer_dpms,
        .enable_vblank          = mixer_enable_vblank,
        .disable_vblank         = mixer_disable_vblank,
@@ -1156,7 +1160,7 @@ static struct mixer_drv_data exynos4210_mxr_drv_data = {
        .has_sclk = 1,
 };
 
-static struct platform_device_id mixer_driver_types[] = {
+static const struct platform_device_id mixer_driver_types[] = {
        {
                .name           = "s5p-mixer",
                .driver_data    = (unsigned long)&exynos4210_mxr_drv_data,
index fa4ccb3..555b896 100644 (file)
@@ -2045,22 +2045,20 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
        p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
        p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
 
-       if (crtc->primary->state->fb) {
-               p->pri.enabled = true;
+       if (crtc->primary->state->fb)
                p->pri.bytes_per_pixel =
                        crtc->primary->state->fb->bits_per_pixel / 8;
-       } else {
-               p->pri.enabled = false;
-               p->pri.bytes_per_pixel = 0;
-       }
+       else
+               p->pri.bytes_per_pixel = 4;
+
+       p->cur.bytes_per_pixel = 4;
+       /*
+        * TODO: for now, assume primary and cursor planes are always enabled.
+        * Setting them to false makes the screen flicker.
+        */
+       p->pri.enabled = true;
+       p->cur.enabled = true;
 
-       if (crtc->cursor->state->fb) {
-               p->cur.enabled = true;
-               p->cur.bytes_per_pixel = 4;
-       } else {
-               p->cur.enabled = false;
-               p->cur.bytes_per_pixel = 0;
-       }
        p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
        p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
 
index 94a5bee..bbdcab0 100644 (file)
@@ -384,7 +384,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
        if (gpu->memptrs_bo) {
                if (gpu->memptrs_iova)
                        msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
-               drm_gem_object_unreference(gpu->memptrs_bo);
+               drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
        }
        release_firmware(gpu->pm4);
        release_firmware(gpu->pfp);
index 28d1f95..ad50b80 100644 (file)
@@ -177,6 +177,11 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
                goto fail;
        }
 
+       for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
+               encoders[i]->bridge = msm_dsi->bridge;
+               msm_dsi->encoders[i] = encoders[i];
+       }
+
        msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
        if (IS_ERR(msm_dsi->connector)) {
                ret = PTR_ERR(msm_dsi->connector);
@@ -185,11 +190,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
                goto fail;
        }
 
-       for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
-               encoders[i]->bridge = msm_dsi->bridge;
-               msm_dsi->encoders[i] = encoders[i];
-       }
-
        priv->bridges[priv->num_bridges++]       = msm_dsi->bridge;
        priv->connectors[priv->num_connectors++] = msm_dsi->connector;
 
index 956b224..649d20d 100644 (file)
@@ -1023,7 +1023,7 @@ static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
                *data = buf[1]; /* strip out dcs type */
                return 1;
        } else {
-               pr_err("%s: read data does not match with rx_buf len %d\n",
+               pr_err("%s: read data does not match with rx_buf len %zu\n",
                        __func__, msg->rx_len);
                return -EINVAL;
        }
@@ -1040,7 +1040,7 @@ static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
                data[1] = buf[2];
                return 2;
        } else {
-               pr_err("%s: read data does not match with rx_buf len %d\n",
+               pr_err("%s: read data does not match with rx_buf len %zu\n",
                        __func__, msg->rx_len);
                return -EINVAL;
        }
@@ -1093,7 +1093,6 @@ static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
 {
        u32 *lp, *temp, data;
        int i, j = 0, cnt;
-       bool ack_error = false;
        u32 read_cnt;
        u8 reg[16];
        int repeated_bytes = 0;
@@ -1105,15 +1104,10 @@ static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
        if (cnt > 4)
                cnt = 4; /* 4 x 32 bits registers only */
 
-       /* Calculate real read data count */
-       read_cnt = dsi_read(msm_host, 0x1d4) >> 16;
-
-       ack_error = (rx_byte == 4) ?
-               (read_cnt == 8) : /* short pkt + 4-byte error pkt */
-               (read_cnt == (pkt_size + 6 + 4)); /* long pkt+4-byte error pkt*/
-
-       if (ack_error)
-               read_cnt -= 4; /* Remove 4 byte error pkt */
+       if (rx_byte == 4)
+               read_cnt = 4;
+       else
+               read_cnt = pkt_size + 6;
 
        /*
         * In case of multiple reads from the panel, after the first read, there
@@ -1215,7 +1209,7 @@ static void dsi_err_worker(struct work_struct *work)
                container_of(work, struct msm_dsi_host, err_work);
        u32 status = msm_host->err_work_state;
 
-       pr_err("%s: status=%x\n", __func__, status);
+       pr_err_ratelimited("%s: status=%x\n", __func__, status);
        if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
                dsi_sw_reset_restore(msm_host);
 
@@ -1797,6 +1791,7 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
        case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
                pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
                ret = 0;
+               break;
        case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
        case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
                ret = dsi_short_read1_resp(buf, msg);
index ee3ebca..0a40f3c 100644 (file)
@@ -462,7 +462,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
        struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
        struct drm_connector *connector = NULL;
        struct dsi_connector *dsi_connector;
-       int ret;
+       int ret, i;
 
        dsi_connector = devm_kzalloc(msm_dsi->dev->dev,
                                sizeof(*dsi_connector), GFP_KERNEL);
@@ -495,6 +495,10 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
        if (ret)
                goto fail;
 
+       for (i = 0; i < MSM_DSI_ENCODER_NUM; i++)
+               drm_mode_connector_attach_encoder(connector,
+                                               msm_dsi->encoders[i]);
+
        return connector;
 
 fail:
index 5f5a84f..208f9d4 100644 (file)
@@ -132,7 +132,7 @@ ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg)
        /* msg sanity check */
        if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) ||
                (msg->size > AUX_CMD_I2C_MAX)) {
-               pr_err("%s: invalid msg: size(%d), request(%x)\n",
+               pr_err("%s: invalid msg: size(%zu), request(%x)\n",
                        __func__, msg->size, msg->request);
                return -EINVAL;
        }
@@ -155,7 +155,7 @@ ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg)
                 */
                edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0);
                msm_edp_aux_ctrl(aux, 1);
-               pr_err("%s: aux timeout, %d\n", __func__, ret);
+               pr_err("%s: aux timeout, %zd\n", __func__, ret);
                goto unlock_exit;
        }
        DBG("completion");
index d8812e8..b4d1b46 100644 (file)
@@ -151,6 +151,8 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
        if (ret)
                goto fail;
 
+       drm_mode_connector_attach_encoder(connector, edp->encoder);
+
        return connector;
 
 fail:
index 0ec5abd..29e52d7 100644 (file)
@@ -1149,12 +1149,13 @@ int msm_edp_ctrl_init(struct msm_edp *edp)
        ctrl->aux = msm_edp_aux_init(dev, ctrl->base, &ctrl->drm_aux);
        if (!ctrl->aux || !ctrl->drm_aux) {
                pr_err("%s:failed to init aux\n", __func__);
-               return ret;
+               return -ENOMEM;
        }
 
        ctrl->phy = msm_edp_phy_init(dev, ctrl->base);
        if (!ctrl->phy) {
                pr_err("%s:failed to init phy\n", __func__);
+               ret = -ENOMEM;
                goto err_destory_aux;
        }
 
index e001e6b..8b9a793 100644 (file)
@@ -72,14 +72,13 @@ const struct mdp5_cfg_hw msm8x74_config = {
                .base = { 0x12d00, 0x12e00, 0x12f00 },
        },
        .intf = {
-               .count = 4,
                .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
-       },
-       .intfs = {
-               [0] = INTF_eDP,
-               [1] = INTF_DSI,
-               [2] = INTF_DSI,
-               [3] = INTF_HDMI,
+               .connect = {
+                       [0] = INTF_eDP,
+                       [1] = INTF_DSI,
+                       [2] = INTF_DSI,
+                       [3] = INTF_HDMI,
+               },
        },
        .max_clk = 200000000,
 };
@@ -142,14 +141,13 @@ const struct mdp5_cfg_hw apq8084_config = {
                .base = { 0x12f00, 0x13000, 0x13100, 0x13200 },
        },
        .intf = {
-               .count = 5,
                .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
-       },
-       .intfs = {
-               [0] = INTF_eDP,
-               [1] = INTF_DSI,
-               [2] = INTF_DSI,
-               [3] = INTF_HDMI,
+               .connect = {
+                       [0] = INTF_eDP,
+                       [1] = INTF_DSI,
+                       [2] = INTF_DSI,
+                       [3] = INTF_HDMI,
+               },
        },
        .max_clk = 320000000,
 };
@@ -196,10 +194,12 @@ const struct mdp5_cfg_hw msm8x16_config = {
 
        },
        .intf = {
-               .count = 1, /* INTF_1 */
-               .base = { 0x6B800 },
+               .base = { 0x00000, 0x6b800 },
+               .connect = {
+                       [0] = INTF_DISABLED,
+                       [1] = INTF_DSI,
+               },
        },
-       /* TODO enable .intfs[] with [1] = INTF_DSI, once DSI is implemented */
        .max_clk = 320000000,
 };
 
index 3a551b0..69349ab 100644 (file)
@@ -59,6 +59,11 @@ struct mdp5_smp_block {
 
 #define MDP5_INTF_NUM_MAX      5
 
+struct mdp5_intf_block {
+       uint32_t base[MAX_BASES];
+       u32 connect[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
+};
+
 struct mdp5_cfg_hw {
        char  *name;
 
@@ -72,9 +77,7 @@ struct mdp5_cfg_hw {
        struct mdp5_sub_block dspp;
        struct mdp5_sub_block ad;
        struct mdp5_sub_block pp;
-       struct mdp5_sub_block intf;
-
-       u32 intfs[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
+       struct mdp5_intf_block intf;
 
        uint32_t max_clk;
 };
index dfa8beb..bbacf9d 100644 (file)
@@ -206,8 +206,8 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
 
 static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
 {
-       const int intf_cnt = hw_cfg->intf.count;
-       const u32 *intfs = hw_cfg->intfs;
+       const enum mdp5_intf_type *intfs = hw_cfg->intf.connect;
+       const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect);
        int id = 0, i;
 
        for (i = 0; i < intf_cnt; i++) {
@@ -228,7 +228,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
        struct msm_drm_private *priv = dev->dev_private;
        const struct mdp5_cfg_hw *hw_cfg =
                                        mdp5_cfg_get_hw_config(mdp5_kms->cfg);
-       enum mdp5_intf_type intf_type = hw_cfg->intfs[intf_num];
+       enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
        struct drm_encoder *encoder;
        int ret = 0;
 
@@ -365,7 +365,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
        /* Construct encoders and modeset initialize connector devices
         * for each external display interface.
         */
-       for (i = 0; i < ARRAY_SIZE(hw_cfg->intfs); i++) {
+       for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
                ret = modeset_init_intf(mdp5_kms, i);
                if (ret)
                        goto fail;
@@ -514,8 +514,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
         */
        mdp5_enable(mdp5_kms);
        for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
-               if (!config->hw->intf.base[i] ||
-                               mdp5_cfg_intf_is_virtual(config->hw->intfs[i]))
+               if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
+                               !config->hw->intf.base[i])
                        continue;
                mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
        }
index 18a3d20..57b8f56 100644 (file)
@@ -273,7 +273,7 @@ static void set_scanout_locked(struct drm_plane *plane,
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
                        msm_framebuffer_iova(fb, mdp5_kms->id, 2));
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
-                       msm_framebuffer_iova(fb, mdp5_kms->id, 4));
+                       msm_framebuffer_iova(fb, mdp5_kms->id, 3));
 
        plane->fb = fb;
 }
index 47f4dd4..c80a6be 100644 (file)
 
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
+#ifdef CONFIG_DRM_MSM_FBDEV
        struct msm_drm_private *priv = dev->dev_private;
        if (priv->fbdev)
                drm_fb_helper_hotplug_event(priv->fbdev);
+#endif
 }
 
 static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -94,7 +96,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
        }
 
        if (reglog)
-               printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
+               printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
 
        return ptr;
 }
@@ -102,7 +104,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 void msm_writel(u32 data, void __iomem *addr)
 {
        if (reglog)
-               printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
+               printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
        writel(data, addr);
 }
 
@@ -110,7 +112,7 @@ u32 msm_readl(const void __iomem *addr)
 {
        u32 val = readl(addr);
        if (reglog)
-               printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
+               printk(KERN_ERR "IO:R %p %08x\n", addr, val);
        return val;
 }
 
@@ -143,8 +145,8 @@ static int msm_unload(struct drm_device *dev)
        if (gpu) {
                mutex_lock(&dev->struct_mutex);
                gpu->funcs->pm_suspend(gpu);
-               gpu->funcs->destroy(gpu);
                mutex_unlock(&dev->struct_mutex);
+               gpu->funcs->destroy(gpu);
        }
 
        if (priv->vram.paddr) {
@@ -177,7 +179,7 @@ static int get_mdp_ver(struct platform_device *pdev)
        const struct of_device_id *match;
        match = of_match_node(match_types, dev->of_node);
        if (match)
-               return (int)match->data;
+               return (int)(unsigned long)match->data;
 #endif
        return 4;
 }
@@ -216,7 +218,7 @@ static int msm_init_vram(struct drm_device *dev)
                if (ret)
                        return ret;
                size = r.end - r.start;
-               DRM_INFO("using VRAM carveout: %lx@%08x\n", size, r.start);
+               DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
        } else
 #endif
 
@@ -283,10 +285,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
 
        drm_mode_config_init(dev);
 
-       ret = msm_init_vram(dev);
-       if (ret)
-               goto fail;
-
        platform_set_drvdata(pdev, dev);
 
        /* Bind all our sub-components: */
@@ -294,6 +292,10 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
        if (ret)
                return ret;
 
+       ret = msm_init_vram(dev);
+       if (ret)
+               goto fail;
+
        switch (get_mdp_ver(pdev)) {
        case 4:
                kms = mdp4_kms_init(dev);
@@ -419,9 +421,11 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
 
 static void msm_lastclose(struct drm_device *dev)
 {
+#ifdef CONFIG_DRM_MSM_FBDEV
        struct msm_drm_private *priv = dev->dev_private;
        if (priv->fbdev)
                drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
+#endif
 }
 
 static irqreturn_t msm_irq(int irq, void *arg)
index 6b573e6..1217132 100644 (file)
@@ -172,8 +172,8 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 {
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_kms *kms = priv->kms;
-       struct msm_framebuffer *msm_fb;
-       struct drm_framebuffer *fb = NULL;
+       struct msm_framebuffer *msm_fb = NULL;
+       struct drm_framebuffer *fb;
        const struct msm_format *format;
        int ret, i, n;
        unsigned int hsub, vsub;
@@ -239,8 +239,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
        return fb;
 
 fail:
-       if (fb)
-               msm_framebuffer_destroy(fb);
+       kfree(msm_fb);
 
        return ERR_PTR(ret);
 }
index 479d8af..5283976 100644 (file)
@@ -483,7 +483,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
        uint64_t off = drm_vma_node_start(&obj->vma_node);
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-       seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
+       seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n",
                        msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
                        msm_obj->read_fence, msm_obj->write_fence,
                        obj->name, obj->refcount.refcount.counter,
index 7acdaa5..7ac2f19 100644 (file)
@@ -60,7 +60,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
                u32 pa = sg_phys(sg) - sg->offset;
                size_t bytes = sg->length + sg->offset;
 
-               VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
+               VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
 
                ret = iommu_map(domain, da, pa, bytes, prot);
                if (ret)
@@ -99,7 +99,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
                if (unmapped < bytes)
                        return unmapped;
 
-               VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
+               VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
 
                BUG_ON(!PAGE_ALIGNED(bytes));
 
index 8171537..1f14b90 100644 (file)
@@ -56,6 +56,6 @@ fail:
 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
 {
        if (ring->bo)
-               drm_gem_object_unreference(ring->bo);
+               drm_gem_object_unreference_unlocked(ring->bo);
        kfree(ring);
 }
index 0b5af0f..64f8b2f 100644 (file)
@@ -14,7 +14,7 @@
 
 #define FERMI_TWOD_A                                                 0x0000902d
 
-#define FERMI_MEMORY_TO_MEMORY_FORMAT_A                              0x0000903d
+#define FERMI_MEMORY_TO_MEMORY_FORMAT_A                              0x00009039
 
 #define KEPLER_INLINE_TO_MEMORY_A                                    0x0000a040
 #define KEPLER_INLINE_TO_MEMORY_B                                    0x0000a140
index 2f5eadd..fdb1dcf 100644 (file)
@@ -329,7 +329,6 @@ gm204_gr_init(struct nvkm_object *object)
        nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
 
        for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-       printk(KERN_ERR "ppc %d %d\n", gpc, priv->ppc_nr[gpc]);
                for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++)
                        nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
                nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
index e8778c6..c61102f 100644 (file)
@@ -90,12 +90,14 @@ gf100_devinit_disable(struct nvkm_devinit *devinit)
        return disable;
 }
 
-static int
+int
 gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
                   struct nvkm_oclass *oclass, void *data, u32 size,
                   struct nvkm_object **pobject)
 {
+       struct nvkm_devinit_impl *impl = (void *)oclass;
        struct nv50_devinit_priv *priv;
+       u64 disable;
        int ret;
 
        ret = nvkm_devinit_create(parent, engine, oclass, &priv);
@@ -103,7 +105,8 @@ gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
-       if (nv_rd32(priv, 0x022500) & 0x00000001)
+       disable = impl->disable(&priv->base);
+       if (disable & (1ULL << NVDEV_ENGINE_DISP))
                priv->base.post = true;
 
        return 0;
index b345a53..87ca0ec 100644 (file)
@@ -48,7 +48,7 @@ struct nvkm_oclass *
 gm107_devinit_oclass = &(struct nvkm_devinit_impl) {
        .base.handle = NV_SUBDEV(DEVINIT, 0x07),
        .base.ofuncs = &(struct nvkm_ofuncs) {
-               .ctor = nv50_devinit_ctor,
+               .ctor = gf100_devinit_ctor,
                .dtor = _nvkm_devinit_dtor,
                .init = nv50_devinit_init,
                .fini = _nvkm_devinit_fini,
index 535172c..1076fcf 100644 (file)
@@ -161,7 +161,7 @@ struct nvkm_oclass *
 gm204_devinit_oclass = &(struct nvkm_devinit_impl) {
        .base.handle = NV_SUBDEV(DEVINIT, 0x07),
        .base.ofuncs = &(struct nvkm_ofuncs) {
-               .ctor = nv50_devinit_ctor,
+               .ctor = gf100_devinit_ctor,
                .dtor = _nvkm_devinit_dtor,
                .init = nv50_devinit_init,
                .fini = _nvkm_devinit_fini,
index b882b65..9243521 100644 (file)
@@ -15,6 +15,9 @@ int  nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
 int  gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
+int  gf100_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
+                       struct nvkm_oclass *, void *, u32,
+                       struct nvkm_object **);
 int  gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
 u64  gm107_devinit_disable(struct nvkm_devinit *);
index 42b2ea3..e597ffc 100644 (file)
@@ -1798,7 +1798,9 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
                        if ((crtc->mode.clock == test_crtc->mode.clock) &&
                            (adjusted_clock == test_adjusted_clock) &&
                            (radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
-                           (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
+                           (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID) &&
+                           (drm_detect_monitor_audio(radeon_connector_edid(test_radeon_crtc->connector)) ==
+                            drm_detect_monitor_audio(radeon_connector_edid(radeon_crtc->connector))))
                                return test_radeon_crtc->pll_id;
                }
        }
index 3e3290c..b435c85 100644 (file)
@@ -421,19 +421,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
 {
        struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
        u8 msg[DP_DPCD_SIZE];
-       int ret;
+       int ret, i;
 
-       ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
-                              DP_DPCD_SIZE);
-       if (ret > 0) {
-               memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+       for (i = 0; i < 7; i++) {
+               ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
+                                      DP_DPCD_SIZE);
+               if (ret == DP_DPCD_SIZE) {
+                       memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
 
-               DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
-                             dig_connector->dpcd);
+                       DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+                                     dig_connector->dpcd);
 
-               radeon_dp_probe_oui(radeon_connector);
+                       radeon_dp_probe_oui(radeon_connector);
 
-               return true;
+                       return true;
+               }
        }
        dig_connector->dpcd[0] = 0;
        return false;
index a0c35bb..ba50f3c 100644 (file)
@@ -5822,7 +5822,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
               L2_CACHE_BIGK_FRAGMENT_SIZE(4));
        /* setup context0 */
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
                        (u32)(rdev->dummy_page.addr >> 12));
index 05e6d6e..f848acf 100644 (file)
@@ -2485,7 +2485,7 @@ static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
                                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
index 0926739..9953356 100644 (file)
@@ -400,7 +400,7 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
        if (enable) {
                struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
-               if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+               if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
                        WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
                               HDMI_AVI_INFO_SEND | /* enable AVI info frames */
                               HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
@@ -438,7 +438,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
        if (!dig || !dig->afmt)
                return;
 
-       if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+       if (enable && connector &&
+           drm_detect_monitor_audio(radeon_connector_edid(connector))) {
                struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
                struct radeon_connector *radeon_connector = to_radeon_connector(connector);
                struct radeon_connector_atom_dig *dig_connector;
index aba2f42..64d3a77 100644 (file)
@@ -1282,7 +1282,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
               L2_CACHE_BIGK_FRAGMENT_SIZE(6));
        /* setup context0 */
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
                        (u32)(rdev->dummy_page.addr >> 12));
index 25b4ac9..8f6d862 100644 (file)
@@ -1112,7 +1112,7 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
        WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE);
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
                                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
index dcb7796..25191f1 100644 (file)
@@ -460,9 +460,6 @@ void radeon_audio_detect(struct drm_connector *connector,
        if (!connector || !connector->encoder)
                return;
 
-       if (!radeon_encoder_is_digital(connector->encoder))
-               return;
-
        rdev = connector->encoder->dev->dev_private;
 
        if (!radeon_audio_chipset_supported(rdev))
@@ -471,26 +468,26 @@ void radeon_audio_detect(struct drm_connector *connector,
        radeon_encoder = to_radeon_encoder(connector->encoder);
        dig = radeon_encoder->enc_priv;
 
-       if (!dig->afmt)
-               return;
-
        if (status == connector_status_connected) {
-               struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+               struct radeon_connector *radeon_connector;
+               int sink_type;
+
+               if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+                       radeon_encoder->audio = NULL;
+                       return;
+               }
+
+               radeon_connector = to_radeon_connector(connector);
+               sink_type = radeon_dp_getsinktype(radeon_connector);
 
                if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
-                   radeon_dp_getsinktype(radeon_connector) ==
-                   CONNECTOR_OBJECT_ID_DISPLAYPORT)
+                       sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
                        radeon_encoder->audio = rdev->audio.dp_funcs;
                else
                        radeon_encoder->audio = rdev->audio.hdmi_funcs;
 
                dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
-               if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-                       radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
-               } else {
-                       radeon_audio_enable(rdev, dig->afmt->pin, 0);
-                       dig->afmt->pin = NULL;
-               }
+               radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
        } else {
                radeon_audio_enable(rdev, dig->afmt->pin, 0);
                dig->afmt->pin = NULL;
index d17d251..cebb65e 100644 (file)
@@ -1379,10 +1379,8 @@ out:
        /* updated in get modes as well since we need to know if it's analog or digital */
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0) {
-               radeon_connector_get_edid(connector);
+       if (radeon_audio != 0)
                radeon_audio_detect(connector, ret);
-       }
 
 exit:
        pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1719,10 +1717,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
 
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0) {
-               radeon_connector_get_edid(connector);
+       if (radeon_audio != 0)
                radeon_audio_detect(connector, ret);
-       }
 
 out:
        pm_runtime_mark_last_busy(connector->dev->dev);
index bf1fecc..fcbd60b 100644 (file)
@@ -30,8 +30,6 @@
                            AUX_SW_RX_HPD_DISCON |           \
                            AUX_SW_RX_PARTIAL_BYTE |         \
                            AUX_SW_NON_AUX_MODE |            \
-                           AUX_SW_RX_MIN_COUNT_VIOL |       \
-                           AUX_SW_RX_INVALID_STOP |         \
                            AUX_SW_RX_SYNC_INVALID_L |       \
                            AUX_SW_RX_SYNC_INVALID_H |       \
                            AUX_SW_RX_INVALID_START |        \
index c54d631..01ee96a 100644 (file)
@@ -921,7 +921,7 @@ static int rv770_pcie_gart_enable(struct radeon_device *rdev)
        WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
        WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
                                RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
index 5326f75..4c679b8 100644 (file)
@@ -4303,7 +4303,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
               L2_CACHE_BIGK_FRAGMENT_SIZE(4));
        /* setup context0 */
        WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
-       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end >> 12) - 1);
+       WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
        WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
                        (u32)(rdev->dummy_page.addr >> 12));
index 1055cb7..3f4c7b8 100644 (file)
@@ -1,4 +1,4 @@
 ccflags-y := -Iinclude/drm
-vgem-y := vgem_drv.o vgem_dma_buf.o
+vgem-y := vgem_drv.o
 
 obj-$(CONFIG_DRM_VGEM) += vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_dma_buf.c b/drivers/gpu/drm/vgem/vgem_dma_buf.c
deleted file mode 100644 (file)
index 0254438..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright Â© 2012 Intel Corporation
- * Copyright Â© 2014 The Chromium OS Authors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Ben Widawsky <ben@bwidawsk.net>
- *
- */
-
-#include <linux/dma-buf.h>
-#include "vgem_drv.h"
-
-struct sg_table *vgem_gem_prime_get_sg_table(struct drm_gem_object *gobj)
-{
-       struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
-       BUG_ON(obj->pages == NULL);
-
-       return drm_prime_pages_to_sg(obj->pages, obj->base.size / PAGE_SIZE);
-}
-
-int vgem_gem_prime_pin(struct drm_gem_object *gobj)
-{
-       struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
-       return vgem_gem_get_pages(obj);
-}
-
-void vgem_gem_prime_unpin(struct drm_gem_object *gobj)
-{
-       struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
-       vgem_gem_put_pages(obj);
-}
-
-void *vgem_gem_prime_vmap(struct drm_gem_object *gobj)
-{
-       struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
-       BUG_ON(obj->pages == NULL);
-
-       return vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
-}
-
-void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
-       vunmap(vaddr);
-}
-
-struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
-                                            struct dma_buf *dma_buf)
-{
-       struct drm_vgem_gem_object *obj = NULL;
-       int ret;
-
-       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-       if (obj == NULL) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       ret = drm_gem_object_init(dev, &obj->base, dma_buf->size);
-       if (ret) {
-               ret = -ENOMEM;
-               goto fail_free;
-       }
-
-       get_dma_buf(dma_buf);
-
-       obj->base.dma_buf = dma_buf;
-       obj->use_dma_buf = true;
-
-       return &obj->base;
-
-fail_free:
-       kfree(obj);
-fail:
-       return ERR_PTR(ret);
-}
index cb3b435..7a207ca 100644 (file)
@@ -302,22 +302,13 @@ static const struct file_operations vgem_driver_fops = {
 };
 
 static struct drm_driver vgem_driver = {
-       .driver_features                = DRIVER_GEM | DRIVER_PRIME,
+       .driver_features                = DRIVER_GEM,
        .gem_free_object                = vgem_gem_free_object,
        .gem_vm_ops                     = &vgem_gem_vm_ops,
        .ioctls                         = vgem_ioctls,
        .fops                           = &vgem_driver_fops,
        .dumb_create                    = vgem_gem_dumb_create,
        .dumb_map_offset                = vgem_gem_dumb_map,
-       .prime_handle_to_fd             = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle             = drm_gem_prime_fd_to_handle,
-       .gem_prime_export               = drm_gem_prime_export,
-       .gem_prime_import               = vgem_gem_prime_import,
-       .gem_prime_pin                  = vgem_gem_prime_pin,
-       .gem_prime_unpin                = vgem_gem_prime_unpin,
-       .gem_prime_get_sg_table         = vgem_gem_prime_get_sg_table,
-       .gem_prime_vmap                 = vgem_gem_prime_vmap,
-       .gem_prime_vunmap               = vgem_gem_prime_vunmap,
        .name   = DRIVER_NAME,
        .desc   = DRIVER_DESC,
        .date   = DRIVER_DATE,
index 57ab4d8..e9f92f7 100644 (file)
@@ -43,15 +43,4 @@ struct drm_vgem_gem_object {
 extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
 extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
 
-/* vgem_dma_buf.c */
-extern struct sg_table *vgem_gem_prime_get_sg_table(
-                       struct drm_gem_object *gobj);
-extern int vgem_gem_prime_pin(struct drm_gem_object *gobj);
-extern void vgem_gem_prime_unpin(struct drm_gem_object *gobj);
-extern void *vgem_gem_prime_vmap(struct drm_gem_object *gobj);
-extern void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-extern struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
-                                                   struct dma_buf *dma_buf);
-
-
 #endif
index 41f167e..7ce93d9 100644 (file)
 #define USB_DEVICE_ID_ATEN_2PORTKVM    0x2204
 #define USB_DEVICE_ID_ATEN_4PORTKVM    0x2205
 #define USB_DEVICE_ID_ATEN_4PORTKVMC   0x2208
+#define USB_DEVICE_ID_ATEN_CS682       0x2213
 
 #define USB_VENDOR_ID_ATMEL            0x03eb
 #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
index b3cf6fd..5fd530a 100644 (file)
@@ -44,7 +44,6 @@ MODULE_PARM_DESC(disable_raw_mode,
 /* bits 1..20 are reserved for classes */
 #define HIDPP_QUIRK_DELAYED_INIT               BIT(21)
 #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS       BIT(22)
-#define HIDPP_QUIRK_MULTI_INPUT                        BIT(23)
 
 /*
  * There are two hidpp protocols in use, the first version hidpp10 is known
@@ -706,12 +705,6 @@ static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                struct hid_field *field, struct hid_usage *usage,
                unsigned long **bit, int *max)
 {
-       struct hidpp_device *hidpp = hid_get_drvdata(hdev);
-
-       if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) &&
-           (field->application == HID_GD_KEYBOARD))
-               return 0;
-
        return -1;
 }
 
@@ -720,10 +713,6 @@ static void wtp_populate_input(struct hidpp_device *hidpp,
 {
        struct wtp_data *wd = hidpp->private_data;
 
-       if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && origin_is_hid_core)
-               /* this is the generic hid-input call */
-               return;
-
        __set_bit(EV_ABS, input_dev->evbit);
        __set_bit(EV_KEY, input_dev->evbit);
        __clear_bit(EV_REL, input_dev->evbit);
@@ -1245,10 +1234,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
                connect_mask &= ~HID_CONNECT_HIDINPUT;
 
-       /* Re-enable hidinput for multi-input devices */
-       if (hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT)
-               connect_mask |= HID_CONNECT_HIDINPUT;
-
        ret = hid_hw_start(hdev, connect_mask);
        if (ret) {
                hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
@@ -1296,11 +1281,6 @@ static const struct hid_device_id hidpp_devices[] = {
          HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
                USB_DEVICE_ID_LOGITECH_T651),
          .driver_data = HIDPP_QUIRK_CLASS_WTP },
-       { /* Keyboard TK820 */
-         HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
-               USB_VENDOR_ID_LOGITECH, 0x4102),
-         .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_MULTI_INPUT |
-                        HIDPP_QUIRK_CLASS_WTP },
 
        { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
                USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
index c3f6f1e..090a1ba 100644 (file)
@@ -294,7 +294,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
        if (!report)
                return -EINVAL;
 
-       mutex_lock(&hsdev->mutex);
+       mutex_lock(hsdev->mutex_ptr);
        if (flag == SENSOR_HUB_SYNC) {
                memset(&hsdev->pending, 0, sizeof(hsdev->pending));
                init_completion(&hsdev->pending.ready);
@@ -328,7 +328,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
                kfree(hsdev->pending.raw_data);
                hsdev->pending.status = false;
        }
-       mutex_unlock(&hsdev->mutex);
+       mutex_unlock(hsdev->mutex_ptr);
 
        return ret_val;
 }
@@ -667,7 +667,14 @@ static int sensor_hub_probe(struct hid_device *hdev,
                        hsdev->vendor_id = hdev->vendor;
                        hsdev->product_id = hdev->product;
                        hsdev->usage = collection->usage;
-                       mutex_init(&hsdev->mutex);
+                       hsdev->mutex_ptr = devm_kzalloc(&hdev->dev,
+                                                       sizeof(struct mutex),
+                                                       GFP_KERNEL);
+                       if (!hsdev->mutex_ptr) {
+                               ret = -ENOMEM;
+                               goto err_stop_hw;
+                       }
+                       mutex_init(hsdev->mutex_ptr);
                        hsdev->start_collection_index = i;
                        if (last_hsdev)
                                last_hsdev->end_collection_index = i;
index ab4dd95..92d6cdf 100644 (file)
@@ -862,6 +862,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
        union acpi_object *obj;
        struct acpi_device *adev;
        acpi_handle handle;
+       int ret;
 
        handle = ACPI_HANDLE(&client->dev);
        if (!handle || acpi_bus_get_device(handle, &adev))
@@ -877,7 +878,9 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
        pdata->hid_descriptor_address = obj->integer.value;
        ACPI_FREE(obj);
 
-       return acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
+       /* GPIOs are optional */
+       ret = acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
+       return ret < 0 && ret != -ENXIO ? ret : 0;
 }
 
 static const struct acpi_device_id i2c_hid_acpi_match[] = {
index a775143..4696895 100644 (file)
@@ -61,6 +61,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
index fa54d32..adf959d 100644 (file)
@@ -1072,6 +1072,9 @@ static int wacom_wac_finger_count_touches(struct wacom_wac *wacom)
        int count = 0;
        int i;
 
+       if (!touch_max)
+               return 0;
+
        /* non-HID_GENERIC single touch input doesn't call this routine */
        if ((touch_max == 1) && (wacom->features.type == HID_GENERIC))
                return wacom->hid_data.tipswitch &&
index f3830db..37f0170 100644 (file)
@@ -439,6 +439,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                                 (*t)->dev_attr.attr.name, tg->base + i);
                        if ((*t)->s2) {
                                a2 = &su->u.a2;
+                               sysfs_attr_init(&a2->dev_attr.attr);
                                a2->dev_attr.attr.name = su->name;
                                a2->nr = (*t)->u.s.nr + i;
                                a2->index = (*t)->u.s.index;
@@ -449,6 +450,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                                *attrs = &a2->dev_attr.attr;
                        } else {
                                a = &su->u.a1;
+                               sysfs_attr_init(&a->dev_attr.attr);
                                a->dev_attr.attr.name = su->name;
                                a->index = (*t)->u.index + i;
                                a->dev_attr.attr.mode =
index 4fcb481..bd1c99d 100644 (file)
@@ -995,6 +995,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                                 (*t)->dev_attr.attr.name, tg->base + i);
                        if ((*t)->s2) {
                                a2 = &su->u.a2;
+                               sysfs_attr_init(&a2->dev_attr.attr);
                                a2->dev_attr.attr.name = su->name;
                                a2->nr = (*t)->u.s.nr + i;
                                a2->index = (*t)->u.s.index;
@@ -1005,6 +1006,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                                *attrs = &a2->dev_attr.attr;
                        } else {
                                a = &su->u.a1;
+                               sysfs_attr_init(&a->dev_attr.attr);
                                a->dev_attr.attr.name = su->name;
                                a->index = (*t)->u.index + i;
                                a->dev_attr.attr.mode =
index 112e4d4..6880011 100644 (file)
@@ -239,8 +239,10 @@ static struct ntc_thermistor_platform_data *
 ntc_thermistor_parse_dt(struct platform_device *pdev)
 {
        struct iio_channel *chan;
+       enum iio_chan_type type;
        struct device_node *np = pdev->dev.of_node;
        struct ntc_thermistor_platform_data *pdata;
+       int ret;
 
        if (!np)
                return NULL;
@@ -253,6 +255,13 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
        if (IS_ERR(chan))
                return ERR_CAST(chan);
 
+       ret = iio_get_channel_type(chan, &type);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
+       if (type != IIO_VOLTAGE)
+               return ERR_PTR(-EINVAL);
+
        if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv))
                return ERR_PTR(-ENODEV);
        if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm))
index 99664eb..ccf4cff 100644 (file)
@@ -44,7 +44,7 @@
 #include <linux/sysfs.h>
 
 /* Addresses to scan */
-static const unsigned short normal_i2c[] = { 0x37, 0x48, 0x49, 0x4a, 0x4c, 0x4d,
+static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d,
        0x4e, 0x4f, I2C_CLIENT_END };
 
 enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
index 0c14191..0271608 100644 (file)
@@ -861,6 +861,7 @@ retest:
                cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
                break;
        case IB_CM_REQ_SENT:
+       case IB_CM_MRA_REQ_RCVD:
                ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
                spin_unlock_irq(&cm_id_priv->lock);
                ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
@@ -879,7 +880,6 @@ retest:
                                       NULL, 0, NULL, 0);
                }
                break;
-       case IB_CM_MRA_REQ_RCVD:
        case IB_CM_REP_SENT:
        case IB_CM_MRA_REP_RCVD:
                ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
index 06441a4..38ffe09 100644 (file)
@@ -845,18 +845,26 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
        listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
        ib = (struct sockaddr_ib *) &id->route.addr.src_addr;
        ib->sib_family = listen_ib->sib_family;
-       ib->sib_pkey = path->pkey;
-       ib->sib_flowinfo = path->flow_label;
-       memcpy(&ib->sib_addr, &path->sgid, 16);
+       if (path) {
+               ib->sib_pkey = path->pkey;
+               ib->sib_flowinfo = path->flow_label;
+               memcpy(&ib->sib_addr, &path->sgid, 16);
+       } else {
+               ib->sib_pkey = listen_ib->sib_pkey;
+               ib->sib_flowinfo = listen_ib->sib_flowinfo;
+               ib->sib_addr = listen_ib->sib_addr;
+       }
        ib->sib_sid = listen_ib->sib_sid;
        ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
        ib->sib_scope_id = listen_ib->sib_scope_id;
 
-       ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
-       ib->sib_family = listen_ib->sib_family;
-       ib->sib_pkey = path->pkey;
-       ib->sib_flowinfo = path->flow_label;
-       memcpy(&ib->sib_addr, &path->dgid, 16);
+       if (path) {
+               ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
+               ib->sib_family = listen_ib->sib_family;
+               ib->sib_pkey = path->pkey;
+               ib->sib_flowinfo = path->flow_label;
+               memcpy(&ib->sib_addr, &path->dgid, 16);
+       }
 }
 
 static __be16 ss_get_port(const struct sockaddr_storage *ss)
@@ -905,9 +913,11 @@ static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
 {
        struct cma_hdr *hdr;
 
-       if ((listen_id->route.addr.src_addr.ss_family == AF_IB) &&
-           (ib_event->event == IB_CM_REQ_RECEIVED)) {
-               cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
+       if (listen_id->route.addr.src_addr.ss_family == AF_IB) {
+               if (ib_event->event == IB_CM_REQ_RECEIVED)
+                       cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
+               else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
+                       cma_save_ib_info(id, listen_id, NULL);
                return 0;
        }
 
index c9780d9..b396344 100644 (file)
@@ -40,7 +40,7 @@
 #include <be_roce.h>
 #include "ocrdma_sli.h"
 
-#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u"
+#define OCRDMA_ROCE_DRV_VERSION "10.6.0.0"
 
 #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
@@ -515,6 +515,8 @@ static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
        memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
        if (rdma_is_multicast_addr(&in6))
                rdma_get_mcast_mac(&in6, mac_addr);
+       else if (rdma_link_local_addr(&in6))
+               rdma_get_ll_mac(&in6, mac_addr);
        else
                memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
        return 0;
index d812904..f5a5ea8 100644 (file)
@@ -56,7 +56,13 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
        vlan_tag = attr->vlan_id;
        if (!vlan_tag || (vlan_tag > 0xFFF))
                vlan_tag = dev->pvid;
-       if (vlan_tag && (vlan_tag < 0x1000)) {
+       if (vlan_tag || dev->pfc_state) {
+               if (!vlan_tag) {
+                       pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
+                               dev->id);
+                       pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
+                               dev->id);
+               }
                eth.eth_type = cpu_to_be16(0x8100);
                eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
                vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
@@ -121,7 +127,9 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
                goto av_conf_err;
        }
 
-       if (pd->uctx) {
+       if ((pd->uctx) &&
+           (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) &&
+           (!rdma_link_local_addr((struct in6_addr *)attr->grh.dgid.raw))) {
                status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
                                         attr->dmac, &attr->vlan_id);
                if (status) {
index 0c9e959..47615ff 100644 (file)
@@ -933,12 +933,18 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
        struct ocrdma_eqe eqe;
        struct ocrdma_eqe *ptr;
        u16 cq_id;
+       u8 mcode;
        int budget = eq->cq_cnt;
 
        do {
                ptr = ocrdma_get_eqe(eq);
                eqe = *ptr;
                ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
+               mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK)
+                               >> OCRDMA_EQE_MAJOR_CODE_SHIFT;
+               if (mcode == OCRDMA_MAJOR_CODE_SENTINAL)
+                       pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n",
+                              eq->q.id, eqe.id_valid);
                if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
                        break;
 
@@ -1434,27 +1440,30 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
        struct ocrdma_alloc_pd_range_rsp *rsp;
 
        /* Pre allocate the DPP PDs */
-       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
-       if (!cmd)
-               return -ENOMEM;
-       cmd->pd_count = dev->attr.max_dpp_pds;
-       cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
-       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
-       if (status)
-               goto mbx_err;
-       rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
-
-       if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) {
-               dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
-                               OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
-               dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
-                               OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
-               dev->pd_mgr->max_dpp_pd = rsp->pd_count;
-               pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
-               dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
-                                                    GFP_KERNEL);
+       if (dev->attr.max_dpp_pds) {
+               cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE,
+                                         sizeof(*cmd));
+               if (!cmd)
+                       return -ENOMEM;
+               cmd->pd_count = dev->attr.max_dpp_pds;
+               cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
+               status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+               rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
+
+               if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) &&
+                   rsp->pd_count) {
+                       dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
+                                       OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
+                       dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
+                                       OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
+                       dev->pd_mgr->max_dpp_pd = rsp->pd_count;
+                       pd_bitmap_size =
+                               BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
+                       dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
+                                                            GFP_KERNEL);
+               }
+               kfree(cmd);
        }
-       kfree(cmd);
 
        cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
        if (!cmd)
@@ -1462,10 +1471,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
 
        cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
        status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
-       if (status)
-               goto mbx_err;
        rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
-       if (rsp->pd_count) {
+       if (!status && rsp->pd_count) {
                dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
                                        OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
                dev->pd_mgr->max_normal_pd = rsp->pd_count;
@@ -1473,15 +1480,13 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
                dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
                                                      GFP_KERNEL);
        }
+       kfree(cmd);
 
        if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
                /* Enable PD resource manager */
                dev->pd_mgr->pd_prealloc_valid = true;
-       } else {
-               return -ENOMEM;
+               return 0;
        }
-mbx_err:
-       kfree(cmd);
        return status;
 }
 
@@ -2406,7 +2411,7 @@ int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
        struct ocrdma_query_qp *cmd;
        struct ocrdma_query_qp_rsp *rsp;
 
-       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd));
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp));
        if (!cmd)
                return status;
        cmd->qp_id = qp->id;
@@ -2428,7 +2433,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
        int status;
        struct ib_ah_attr *ah_attr = &attrs->ah_attr;
        union ib_gid sgid, zgid;
-       u32 vlan_id;
+       u32 vlan_id = 0xFFFF;
        u8 mac_addr[6];
        struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
@@ -2468,12 +2473,22 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
        cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
        if (attr_mask & IB_QP_VID) {
                vlan_id = attrs->vlan_id;
+       } else if (dev->pfc_state) {
+               vlan_id = 0;
+               pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
+                       dev->id);
+               pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
+                       dev->id);
+       }
+
+       if (vlan_id < 0x1000) {
                cmd->params.vlan_dmac_b4_to_b5 |=
                    vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
                cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
                cmd->params.rnt_rc_sl_fl |=
                        (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
        }
+
        return 0;
 }
 
@@ -2519,8 +2534,10 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
        }
        if (attr_mask & IB_QP_PATH_MTU) {
-               if (attrs->path_mtu < IB_MTU_256 ||
+               if (attrs->path_mtu < IB_MTU_512 ||
                    attrs->path_mtu > IB_MTU_4096) {
+                       pr_err("ocrdma%d: IB MTU %d is not supported\n",
+                              dev->id, ib_mtu_enum_to_int(attrs->path_mtu));
                        status = -EINVAL;
                        goto pmtu_err;
                }
@@ -3147,9 +3164,9 @@ void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
        ocrdma_free_pd_pool(dev);
        ocrdma_mbx_delete_ah_tbl(dev);
 
-       /* cleanup the eqs */
-       ocrdma_destroy_eqs(dev);
-
        /* cleanup the control path */
        ocrdma_destroy_mq(dev);
+
+       /* cleanup the eqs */
+       ocrdma_destroy_eqs(dev);
 }
index 243c87c..02ad0ae 100644 (file)
@@ -1176,6 +1176,8 @@ struct ocrdma_query_qp_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_rsp rsp;
        struct ocrdma_qp_params params;
+       u32 dpp_credits_cqid;
+       u32 rbq_id;
 };
 
 enum {
@@ -1624,12 +1626,19 @@ struct ocrdma_delete_ah_tbl_rsp {
 enum {
        OCRDMA_EQE_VALID_SHIFT          = 0,
        OCRDMA_EQE_VALID_MASK           = BIT(0),
+       OCRDMA_EQE_MAJOR_CODE_MASK      = 0x0E,
+       OCRDMA_EQE_MAJOR_CODE_SHIFT     = 0x01,
        OCRDMA_EQE_FOR_CQE_MASK         = 0xFFFE,
        OCRDMA_EQE_RESOURCE_ID_SHIFT    = 16,
        OCRDMA_EQE_RESOURCE_ID_MASK     = 0xFFFF <<
                                OCRDMA_EQE_RESOURCE_ID_SHIFT,
 };
 
+enum major_code {
+       OCRDMA_MAJOR_CODE_COMPLETION    = 0x00,
+       OCRDMA_MAJOR_CODE_SENTINAL      = 0x01
+};
+
 struct ocrdma_eqe {
        u32 id_valid;
 };
index 8771755..9dcb660 100644 (file)
@@ -365,7 +365,7 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
        if (!pd)
                return ERR_PTR(-ENOMEM);
 
-       if (udata && uctx) {
+       if (udata && uctx && dev->attr.max_dpp_pds) {
                pd->dpp_enabled =
                        ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
                pd->num_dpp_qp =
@@ -1721,18 +1721,20 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
        struct ocrdma_qp *qp;
        struct ocrdma_dev *dev;
        struct ib_qp_attr attrs;
-       int attr_mask = IB_QP_STATE;
+       int attr_mask;
        unsigned long flags;
 
        qp = get_ocrdma_qp(ibqp);
        dev = get_ocrdma_dev(ibqp->device);
 
-       attrs.qp_state = IB_QPS_ERR;
        pd = qp->pd;
 
        /* change the QP state to ERROR */
-       _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
-
+       if (qp->state != OCRDMA_QPS_RST) {
+               attrs.qp_state = IB_QPS_ERR;
+               attr_mask = IB_QP_STATE;
+               _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
+       }
        /* ensure that CQEs for newly created QP (whose id may be same with
         * one which just getting destroyed are same), dont get
         * discarded until the old CQEs are discarded.
index 327529e..3f40319 100644 (file)
@@ -547,11 +547,11 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc,
        return 0;
 
 err_prot_mr:
-       ib_dereg_mr(desc->pi_ctx->prot_mr);
+       ib_dereg_mr(pi_ctx->prot_mr);
 err_prot_frpl:
-       ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
+       ib_free_fast_reg_page_list(pi_ctx->prot_frpl);
 err_pi_ctx:
-       kfree(desc->pi_ctx);
+       kfree(pi_ctx);
 
        return ret;
 }
index f362883..1d247bc 100644 (file)
@@ -747,6 +747,63 @@ static void joydev_cleanup(struct joydev *joydev)
                input_close_device(handle);
 }
 
+static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
+{
+       DECLARE_BITMAP(jd_scratch, KEY_CNT);
+
+       BUILD_BUG_ON(ABS_CNT > KEY_CNT || EV_CNT > KEY_CNT);
+
+       /*
+        * Virtualization (VMware, etc) and remote management (HP
+        * ILO2) solutions use absolute coordinates for their virtual
+        * pointing devices so that there is one-to-one relationship
+        * between pointer position on the host screen and virtual
+        * guest screen, and so their mice use ABS_X, ABS_Y and 3
+        * primary button events. This clashes with what joydev
+        * considers to be joysticks (a device with at minimum ABS_X
+        * axis).
+        *
+        * Here we are trying to separate absolute mice from
+        * joysticks. A device is, for joystick detection purposes,
+        * considered to be an absolute mouse if the following is
+        * true:
+        *
+        * 1) Event types are exactly EV_ABS, EV_KEY and EV_SYN.
+        * 2) Absolute events are exactly ABS_X and ABS_Y.
+        * 3) Keys are exactly BTN_LEFT, BTN_RIGHT and BTN_MIDDLE.
+        * 4) Device is not on "Amiga" bus.
+        */
+
+       bitmap_zero(jd_scratch, EV_CNT);
+       __set_bit(EV_ABS, jd_scratch);
+       __set_bit(EV_KEY, jd_scratch);
+       __set_bit(EV_SYN, jd_scratch);
+       if (!bitmap_equal(jd_scratch, dev->evbit, EV_CNT))
+               return false;
+
+       bitmap_zero(jd_scratch, ABS_CNT);
+       __set_bit(ABS_X, jd_scratch);
+       __set_bit(ABS_Y, jd_scratch);
+       if (!bitmap_equal(dev->absbit, jd_scratch, ABS_CNT))
+               return false;
+
+       bitmap_zero(jd_scratch, KEY_CNT);
+       __set_bit(BTN_LEFT, jd_scratch);
+       __set_bit(BTN_RIGHT, jd_scratch);
+       __set_bit(BTN_MIDDLE, jd_scratch);
+
+       if (!bitmap_equal(dev->keybit, jd_scratch, KEY_CNT))
+               return false;
+
+       /*
+        * Amiga joystick (amijoy) historically uses left/middle/right
+        * button events.
+        */
+       if (dev->id.bustype == BUS_AMIGA)
+               return false;
+
+       return true;
+}
 
 static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
 {
@@ -758,6 +815,10 @@ static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
        if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit))
                return false;
 
+       /* Avoid absolute mice */
+       if (joydev_dev_is_absolute_mouse(dev))
+               return false;
+
        return true;
 }
 
index 7462d2f..d7820d1 100644 (file)
@@ -156,7 +156,7 @@ config MOUSE_PS2_VMMOUSE
          Say Y here if you are running under control of VMware hypervisor
          (ESXi, Workstation or Fusion). Also make sure that when you enable
          this option, you remove the xf86-input-vmmouse user-space driver
-         or upgrade it to at least xf86-input-vmmouse 13.0.1, which doesn't
+         or upgrade it to at least xf86-input-vmmouse 13.1.0, which doesn't
          load in the presence of an in-kernel vmmouse driver.
 
          If unsure, say N.
index e6708f6..7752bd5 100644 (file)
@@ -941,6 +941,11 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
        case V7_PACKET_ID_TWO:
                mt[1].x &= ~0x000F;
                mt[1].y |= 0x000F;
+               /* Detect false-postive touches where x & y report max value */
+               if (mt[1].y == 0x7ff && mt[1].x == 0xff0) {
+                       mt[1].x = 0;
+                       /* y gets set to 0 at the end of this function */
+               }
                break;
 
        case V7_PACKET_ID_MULTI:
index 991dc6b..79363b6 100644 (file)
@@ -315,7 +315,7 @@ static void elantech_report_semi_mt_data(struct input_dev *dev,
                                         unsigned int x2, unsigned int y2)
 {
        elantech_set_slot(dev, 0, num_fingers != 0, x1, y1);
-       elantech_set_slot(dev, 1, num_fingers == 2, x2, y2);
+       elantech_set_slot(dev, 1, num_fingers >= 2, x2, y2);
 }
 
 /*
index 2d5ff86..e4c3125 100644 (file)
@@ -164,7 +164,7 @@ static irqreturn_t stmpe_ts_handler(int irq, void *data)
                        STMPE_TSC_CTRL_TSC_EN, STMPE_TSC_CTRL_TSC_EN);
 
        /* start polling for touch_det to detect release */
-       schedule_delayed_work(&ts->work, HZ / 50);
+       schedule_delayed_work(&ts->work, msecs_to_jiffies(50));
 
        return IRQ_HANDLED;
 }
index aecb9ad..642f4a5 100644 (file)
@@ -187,7 +187,7 @@ static int sx8654_probe(struct i2c_client *client,
                return -ENOMEM;
 
        input = devm_input_allocate_device(&client->dev);
-       if (!sx8654)
+       if (!input)
                return -ENOMEM;
 
        input->name = "SX8654 I2C Touchscreen";
index 9687f8a..1b7e155 100644 (file)
@@ -828,7 +828,14 @@ static int its_alloc_tables(struct its_node *its)
                        u64 typer = readq_relaxed(its->base + GITS_TYPER);
                        u32 ids = GITS_TYPER_DEVBITS(typer);
 
-                       order = get_order((1UL << ids) * entry_size);
+                       /*
+                        * 'order' was initialized earlier to the default page
+                        * granule of the the ITS.  We can't have an allocation
+                        * smaller than that.  If the requested allocation
+                        * is smaller, round up to the default page granule.
+                        */
+                       order = max(get_order((1UL << ids) * entry_size),
+                                   order);
                        if (order >= MAX_ORDER) {
                                order = MAX_ORDER - 1;
                                pr_warn("%s: Device Table too large, reduce its page order to %u\n",
index 7dc93aa..312ffd3 100644 (file)
@@ -173,7 +173,7 @@ static void unmap_switcher(void)
 bool lguest_address_ok(const struct lguest *lg,
                       unsigned long addr, unsigned long len)
 {
-       return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
+       return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
 }
 
 /*
index 2bc56e2..135a090 100644 (file)
@@ -177,11 +177,16 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
         * nr_pending is 0 and In_sync is clear, the entries we return will
         * still be in the same position on the list when we re-enter
         * list_for_each_entry_continue_rcu.
+        *
+        * Note that if entered with 'rdev == NULL' to start at the
+        * beginning, we temporarily assign 'rdev' to an address which
+        * isn't really an rdev, but which can be used by
+        * list_for_each_entry_continue_rcu() to find the first entry.
         */
        rcu_read_lock();
        if (rdev == NULL)
                /* start at the beginning */
-               rdev = list_entry_rcu(&mddev->disks, struct md_rdev, same_set);
+               rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
        else {
                /* release the previous rdev and start from there. */
                rdev_dec_pending(rdev, mddev);
index 6395347..eff7bdd 100644 (file)
@@ -429,9 +429,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
                /* blk-mq request-based interface */
                *__clone = blk_get_request(bdev_get_queue(bdev),
                                           rq_data_dir(rq), GFP_ATOMIC);
-               if (IS_ERR(*__clone))
+               if (IS_ERR(*__clone)) {
                        /* ENOMEM, requeue */
+                       clear_mapinfo(m, map_context);
                        return r;
+               }
                (*__clone)->bio = (*__clone)->biotail = NULL;
                (*__clone)->rq_disk = bdev->bd_disk;
                (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
index d9b00b8..16ba55a 100644 (file)
@@ -820,6 +820,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
 }
 EXPORT_SYMBOL(dm_consume_args);
 
+static bool __table_type_request_based(unsigned table_type)
+{
+       return (table_type == DM_TYPE_REQUEST_BASED ||
+               table_type == DM_TYPE_MQ_REQUEST_BASED);
+}
+
 static int dm_table_set_type(struct dm_table *t)
 {
        unsigned i;
@@ -852,8 +858,7 @@ static int dm_table_set_type(struct dm_table *t)
                 * Determine the type from the live device.
                 * Default to bio-based if device is new.
                 */
-               if (live_md_type == DM_TYPE_REQUEST_BASED ||
-                   live_md_type == DM_TYPE_MQ_REQUEST_BASED)
+               if (__table_type_request_based(live_md_type))
                        request_based = 1;
                else
                        bio_based = 1;
@@ -903,7 +908,7 @@ static int dm_table_set_type(struct dm_table *t)
                        }
                t->type = DM_TYPE_MQ_REQUEST_BASED;
 
-       } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) {
+       } else if (list_empty(devices) && __table_type_request_based(live_md_type)) {
                /* inherit live MD type */
                t->type = live_md_type;
 
@@ -925,10 +930,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
 
 bool dm_table_request_based(struct dm_table *t)
 {
-       unsigned table_type = dm_table_get_type(t);
-
-       return (table_type == DM_TYPE_REQUEST_BASED ||
-               table_type == DM_TYPE_MQ_REQUEST_BASED);
+       return __table_type_request_based(dm_table_get_type(t));
 }
 
 bool dm_table_mq_request_based(struct dm_table *t)
index a930b72..2caf492 100644 (file)
@@ -1082,13 +1082,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
        dm_put(md);
 }
 
-static void free_rq_clone(struct request *clone, bool must_be_mapped)
+static void free_rq_clone(struct request *clone)
 {
        struct dm_rq_target_io *tio = clone->end_io_data;
        struct mapped_device *md = tio->md;
 
-       WARN_ON_ONCE(must_be_mapped && !clone->q);
-
        blk_rq_unprep_clone(clone);
 
        if (md->type == DM_TYPE_MQ_REQUEST_BASED)
@@ -1132,7 +1130,7 @@ static void dm_end_request(struct request *clone, int error)
                        rq->sense_len = clone->sense_len;
        }
 
-       free_rq_clone(clone, true);
+       free_rq_clone(clone);
        if (!rq->q->mq_ops)
                blk_end_request_all(rq, error);
        else
@@ -1151,7 +1149,7 @@ static void dm_unprep_request(struct request *rq)
        }
 
        if (clone)
-               free_rq_clone(clone, false);
+               free_rq_clone(clone);
 }
 
 /*
@@ -1164,6 +1162,7 @@ static void old_requeue_request(struct request *rq)
 
        spin_lock_irqsave(q->queue_lock, flags);
        blk_requeue_request(q, rq);
+       blk_run_queue_async(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
@@ -1724,8 +1723,7 @@ static int dm_merge_bvec(struct request_queue *q,
        struct mapped_device *md = q->queuedata;
        struct dm_table *map = dm_get_live_table_fast(md);
        struct dm_target *ti;
-       sector_t max_sectors;
-       int max_size = 0;
+       sector_t max_sectors, max_size = 0;
 
        if (unlikely(!map))
                goto out;
@@ -1740,8 +1738,16 @@ static int dm_merge_bvec(struct request_queue *q,
        max_sectors = min(max_io_len(bvm->bi_sector, ti),
                          (sector_t) queue_max_sectors(q));
        max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
-       if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */
-               max_size = 0;
+
+       /*
+        * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
+        * to the targets' merge function since it holds sectors not bytes).
+        * Just doing this as an interim fix for stable@ because the more
+        * comprehensive cleanup of switching to sector_t will impact every
+        * DM target that implements a ->merge hook.
+        */
+       if (max_size > INT_MAX)
+               max_size = INT_MAX;
 
        /*
         * merge_bvec_fn() returns number of bytes
@@ -1749,7 +1755,7 @@ static int dm_merge_bvec(struct request_queue *q,
         * max is precomputed maximal io size
         */
        if (max_size && ti->type->merge)
-               max_size = ti->type->merge(ti, bvm, biovec, max_size);
+               max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
        /*
         * If the target doesn't support merge method and some of the devices
         * provided their merge_bvec method (we know this by looking for the
@@ -1971,8 +1977,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
                        dm_kill_unmapped_request(rq, r);
                        return r;
                }
-               if (IS_ERR(clone))
-                       return DM_MAPIO_REQUEUE;
+               if (r != DM_MAPIO_REMAPPED)
+                       return r;
                if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
                        /* -ENOMEM */
                        ti->type->release_clone_rq(clone);
@@ -2753,13 +2759,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
                /* clone request is allocated at the end of the pdu */
                tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
-               if (!clone_rq(rq, md, tio, GFP_ATOMIC))
-                       return BLK_MQ_RQ_QUEUE_BUSY;
+               (void) clone_rq(rq, md, tio, GFP_ATOMIC);
                queue_kthread_work(&md->kworker, &tio->work);
        } else {
                /* Direct call is fine since .queue_rq allows allocations */
-               if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
-                       dm_requeue_unmapped_original_request(md, rq);
+               if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
+                       /* Undo dm_start_request() before requeuing */
+                       rq_completed(md, rq_data_dir(rq), false);
+                       return BLK_MQ_RQ_QUEUE_BUSY;
+               }
        }
 
        return BLK_MQ_RQ_QUEUE_OK;
index 593a024..2750630 100644 (file)
@@ -4211,12 +4211,12 @@ action_store(struct mddev *mddev, const char *page, size_t len)
        if (!mddev->pers || !mddev->pers->sync_request)
                return -EINVAL;
 
-       if (cmd_match(page, "frozen"))
-               set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-       else
-               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
 
        if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
+               if (cmd_match(page, "frozen"))
+                       set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+               else
+                       clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                flush_workqueue(md_misc_wq);
                if (mddev->sync_thread) {
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
@@ -4229,16 +4229,17 @@ action_store(struct mddev *mddev, const char *page, size_t len)
                   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
                return -EBUSY;
        else if (cmd_match(page, "resync"))
-               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
        else if (cmd_match(page, "recover")) {
+               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
-               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        } else if (cmd_match(page, "reshape")) {
                int err;
                if (mddev->pers->start_reshape == NULL)
                        return -EINVAL;
                err = mddev_lock(mddev);
                if (!err) {
+                       clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                        err = mddev->pers->start_reshape(mddev);
                        mddev_unlock(mddev);
                }
@@ -4250,6 +4251,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
                        set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
                else if (!cmd_match(page, "repair"))
                        return -EINVAL;
+               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
                set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        }
index 6a68ef5..efb654e 100644 (file)
@@ -524,6 +524,9 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
                         ? (sector & (chunk_sects-1))
                         : sector_div(sector, chunk_sects));
 
+               /* Restore due to sector_div */
+               sector = bio->bi_iter.bi_sector;
+
                if (sectors < bio_sectors(bio)) {
                        split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
                        bio_chain(split, bio);
@@ -531,7 +534,6 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
                        split = bio;
                }
 
-               sector = bio->bi_iter.bi_sector;
                zone = find_zone(mddev->private, &sector);
                tmp_dev = map_sector(mddev, zone, sector, &sector);
                split->bi_bdev = tmp_dev->bdev;
index 1ba97fd..553d54b 100644 (file)
@@ -749,6 +749,7 @@ static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
 static bool stripe_can_batch(struct stripe_head *sh)
 {
        return test_bit(STRIPE_BATCH_READY, &sh->state) &&
+               !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
                is_full_stripe_write(sh);
 }
 
@@ -837,6 +838,15 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
                    < IO_THRESHOLD)
                        md_wakeup_thread(conf->mddev->thread);
 
+       if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) {
+               int seq = sh->bm_seq;
+               if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) &&
+                   sh->batch_head->bm_seq > seq)
+                       seq = sh->batch_head->bm_seq;
+               set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state);
+               sh->batch_head->bm_seq = seq;
+       }
+
        atomic_inc(&sh->count);
 unlock_out:
        unlock_two_stripes(head, sh);
@@ -1822,7 +1832,7 @@ again:
        } else
                init_async_submit(&submit, 0, tx, NULL, NULL,
                                  to_addr_conv(sh, percpu, j));
-       async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
+       tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
        if (!last_stripe) {
                j++;
                sh = list_first_entry(&sh->batch_list, struct stripe_head,
@@ -2987,14 +2997,32 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
        pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
                (unsigned long long)(*bip)->bi_iter.bi_sector,
                (unsigned long long)sh->sector, dd_idx);
-       spin_unlock_irq(&sh->stripe_lock);
 
        if (conf->mddev->bitmap && firstwrite) {
+               /* Cannot hold spinlock over bitmap_startwrite,
+                * but must ensure this isn't added to a batch until
+                * we have added to the bitmap and set bm_seq.
+                * So set STRIPE_BITMAP_PENDING to prevent
+                * batching.
+                * If multiple add_stripe_bio() calls race here they
+                * much all set STRIPE_BITMAP_PENDING.  So only the first one
+                * to complete "bitmap_startwrite" gets to set
+                * STRIPE_BIT_DELAY.  This is important as once a stripe
+                * is added to a batch, STRIPE_BIT_DELAY cannot be changed
+                * any more.
+                */
+               set_bit(STRIPE_BITMAP_PENDING, &sh->state);
+               spin_unlock_irq(&sh->stripe_lock);
                bitmap_startwrite(conf->mddev->bitmap, sh->sector,
                                  STRIPE_SECTORS, 0);
-               sh->bm_seq = conf->seq_flush+1;
-               set_bit(STRIPE_BIT_DELAY, &sh->state);
+               spin_lock_irq(&sh->stripe_lock);
+               clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
+               if (!sh->batch_head) {
+                       sh->bm_seq = conf->seq_flush+1;
+                       set_bit(STRIPE_BIT_DELAY, &sh->state);
+               }
        }
+       spin_unlock_irq(&sh->stripe_lock);
 
        if (stripe_can_batch(sh))
                stripe_add_to_batch_list(conf, sh);
@@ -3392,6 +3420,8 @@ static void handle_stripe_fill(struct stripe_head *sh,
        set_bit(STRIPE_HANDLE, &sh->state);
 }
 
+static void break_stripe_batch_list(struct stripe_head *head_sh,
+                                   unsigned long handle_flags);
 /* handle_stripe_clean_event
  * any written block on an uptodate or failed drive can be returned.
  * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
@@ -3405,7 +3435,6 @@ static void handle_stripe_clean_event(struct r5conf *conf,
        int discard_pending = 0;
        struct stripe_head *head_sh = sh;
        bool do_endio = false;
-       int wakeup_nr = 0;
 
        for (i = disks; i--; )
                if (sh->dev[i].written) {
@@ -3494,44 +3523,8 @@ unhash:
                if (atomic_dec_and_test(&conf->pending_full_writes))
                        md_wakeup_thread(conf->mddev->thread);
 
-       if (!head_sh->batch_head || !do_endio)
-               return;
-       for (i = 0; i < head_sh->disks; i++) {
-               if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
-                       wakeup_nr++;
-       }
-       while (!list_empty(&head_sh->batch_list)) {
-               int i;
-               sh = list_first_entry(&head_sh->batch_list,
-                                     struct stripe_head, batch_list);
-               list_del_init(&sh->batch_list);
-
-               set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
-                             head_sh->state & ~((1 << STRIPE_ACTIVE) |
-                                                (1 << STRIPE_PREREAD_ACTIVE) |
-                                                STRIPE_EXPAND_SYNC_FLAG));
-               sh->check_state = head_sh->check_state;
-               sh->reconstruct_state = head_sh->reconstruct_state;
-               for (i = 0; i < sh->disks; i++) {
-                       if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
-                               wakeup_nr++;
-                       sh->dev[i].flags = head_sh->dev[i].flags;
-               }
-
-               spin_lock_irq(&sh->stripe_lock);
-               sh->batch_head = NULL;
-               spin_unlock_irq(&sh->stripe_lock);
-               if (sh->state & STRIPE_EXPAND_SYNC_FLAG)
-                       set_bit(STRIPE_HANDLE, &sh->state);
-               release_stripe(sh);
-       }
-
-       spin_lock_irq(&head_sh->stripe_lock);
-       head_sh->batch_head = NULL;
-       spin_unlock_irq(&head_sh->stripe_lock);
-       wake_up_nr(&conf->wait_for_overlap, wakeup_nr);
-       if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG)
-               set_bit(STRIPE_HANDLE, &head_sh->state);
+       if (head_sh->batch_head && do_endio)
+               break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
 }
 
 static void handle_stripe_dirtying(struct r5conf *conf,
@@ -4172,9 +4165,13 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
 
 static int clear_batch_ready(struct stripe_head *sh)
 {
+       /* Return '1' if this is a member of batch, or
+        * '0' if it is a lone stripe or a head which can now be
+        * handled.
+        */
        struct stripe_head *tmp;
        if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
-               return 0;
+               return (sh->batch_head && sh->batch_head != sh);
        spin_lock(&sh->stripe_lock);
        if (!sh->batch_head) {
                spin_unlock(&sh->stripe_lock);
@@ -4202,38 +4199,65 @@ static int clear_batch_ready(struct stripe_head *sh)
        return 0;
 }
 
-static void check_break_stripe_batch_list(struct stripe_head *sh)
+static void break_stripe_batch_list(struct stripe_head *head_sh,
+                                   unsigned long handle_flags)
 {
-       struct stripe_head *head_sh, *next;
+       struct stripe_head *sh, *next;
        int i;
-
-       if (!test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
-               return;
-
-       head_sh = sh;
+       int do_wakeup = 0;
 
        list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
 
                list_del_init(&sh->batch_list);
 
-               set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
-                             head_sh->state & ~((1 << STRIPE_ACTIVE) |
-                                                (1 << STRIPE_PREREAD_ACTIVE) |
-                                                (1 << STRIPE_DEGRADED) |
-                                                STRIPE_EXPAND_SYNC_FLAG));
+               WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
+                                         (1 << STRIPE_SYNCING) |
+                                         (1 << STRIPE_REPLACED) |
+                                         (1 << STRIPE_PREREAD_ACTIVE) |
+                                         (1 << STRIPE_DELAYED) |
+                                         (1 << STRIPE_BIT_DELAY) |
+                                         (1 << STRIPE_FULL_WRITE) |
+                                         (1 << STRIPE_BIOFILL_RUN) |
+                                         (1 << STRIPE_COMPUTE_RUN)  |
+                                         (1 << STRIPE_OPS_REQ_PENDING) |
+                                         (1 << STRIPE_DISCARD) |
+                                         (1 << STRIPE_BATCH_READY) |
+                                         (1 << STRIPE_BATCH_ERR) |
+                                         (1 << STRIPE_BITMAP_PENDING)));
+               WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
+                                             (1 << STRIPE_REPLACED)));
+
+               set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+                                           (1 << STRIPE_DEGRADED)),
+                             head_sh->state & (1 << STRIPE_INSYNC));
+
                sh->check_state = head_sh->check_state;
                sh->reconstruct_state = head_sh->reconstruct_state;
-               for (i = 0; i < sh->disks; i++)
+               for (i = 0; i < sh->disks; i++) {
+                       if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+                               do_wakeup = 1;
                        sh->dev[i].flags = head_sh->dev[i].flags &
                                (~((1 << R5_WriteError) | (1 << R5_Overlap)));
-
+               }
                spin_lock_irq(&sh->stripe_lock);
                sh->batch_head = NULL;
                spin_unlock_irq(&sh->stripe_lock);
-
-               set_bit(STRIPE_HANDLE, &sh->state);
+               if (handle_flags == 0 ||
+                   sh->state & handle_flags)
+                       set_bit(STRIPE_HANDLE, &sh->state);
                release_stripe(sh);
        }
+       spin_lock_irq(&head_sh->stripe_lock);
+       head_sh->batch_head = NULL;
+       spin_unlock_irq(&head_sh->stripe_lock);
+       for (i = 0; i < head_sh->disks; i++)
+               if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
+                       do_wakeup = 1;
+       if (head_sh->state & handle_flags)
+               set_bit(STRIPE_HANDLE, &head_sh->state);
+
+       if (do_wakeup)
+               wake_up(&head_sh->raid_conf->wait_for_overlap);
 }
 
 static void handle_stripe(struct stripe_head *sh)
@@ -4258,7 +4282,8 @@ static void handle_stripe(struct stripe_head *sh)
                return;
        }
 
-       check_break_stripe_batch_list(sh);
+       if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
+               break_stripe_batch_list(sh, 0);
 
        if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
                spin_lock(&sh->stripe_lock);
@@ -4312,6 +4337,7 @@ static void handle_stripe(struct stripe_head *sh)
        if (s.failed > conf->max_degraded) {
                sh->check_state = 0;
                sh->reconstruct_state = 0;
+               break_stripe_batch_list(sh, 0);
                if (s.to_read+s.to_write+s.written)
                        handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
                if (s.syncing + s.replacing)
index 7dc0dd8..896d603 100644 (file)
@@ -337,9 +337,12 @@ enum {
        STRIPE_ON_RELEASE_LIST,
        STRIPE_BATCH_READY,
        STRIPE_BATCH_ERR,
+       STRIPE_BITMAP_PENDING,  /* Being added to bitmap, don't add
+                                * to batch yet.
+                                */
 };
 
-#define STRIPE_EXPAND_SYNC_FLAG \
+#define STRIPE_EXPAND_SYNC_FLAGS \
        ((1 << STRIPE_EXPAND_SOURCE) |\
        (1 << STRIPE_EXPAND_READY) |\
        (1 << STRIPE_EXPANDING) |\
index ae498b5..46e3840 100644 (file)
@@ -433,6 +433,10 @@ EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
 static const struct mfd_cell da9052_subdev_info[] = {
        {
                .name = "da9052-regulator",
+               .id = 0,
+       },
+       {
+               .name = "da9052-regulator",
                .id = 1,
        },
        {
@@ -484,10 +488,6 @@ static const struct mfd_cell da9052_subdev_info[] = {
                .id = 13,
        },
        {
-               .name = "da9052-regulator",
-               .id = 14,
-       },
-       {
                .name = "da9052-onkey",
        },
        {
index 03d7c75..9a39e0b 100644 (file)
@@ -1304,7 +1304,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 
        if (ios->clock) {
                unsigned int clock_min = ~0U;
-               u32 clkdiv;
+               int clkdiv;
 
                spin_lock_bh(&host->lock);
                if (!host->mode_reg) {
@@ -1328,7 +1328,12 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                /* Calculate clock divider */
                if (host->caps.has_odd_clk_div) {
                        clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
-                       if (clkdiv > 511) {
+                       if (clkdiv < 0) {
+                               dev_warn(&mmc->class_dev,
+                                        "clock %u too fast; using %lu\n",
+                                        clock_min, host->bus_hz / 2);
+                               clkdiv = 0;
+                       } else if (clkdiv > 511) {
                                dev_warn(&mmc->class_dev,
                                         "clock %u too slow; using %lu\n",
                                         clock_min, host->bus_hz / (511 + 2));
index 7c8b169..3af137f 100644 (file)
@@ -223,7 +223,7 @@ static int m25p_probe(struct spi_device *spi)
         */
        if (data && data->type)
                flash_name = data->type;
-       else if (!strcmp(spi->modalias, "nor-jedec"))
+       else if (!strcmp(spi->modalias, "spi-nor"))
                flash_name = NULL; /* auto-detect */
        else
                flash_name = spi->modalias;
@@ -255,7 +255,7 @@ static int m25p_remove(struct spi_device *spi)
  * since most of these flash are compatible to some extent, and their
  * differences can often be differentiated by the JEDEC read-ID command, we
  * encourage new users to add support to the spi-nor library, and simply bind
- * against a generic string here (e.g., "nor-jedec").
+ * against a generic string here (e.g., "jedec,spi-nor").
  *
  * Many flash names are kept here in this list (as well as in spi-nor.c) to
  * keep them available as module aliases for existing platforms.
@@ -305,7 +305,7 @@ static const struct spi_device_id m25p_ids[] = {
         * Generic support for SPI NOR that can be identified by the JEDEC READ
         * ID opcode (0x9F). Use this, if possible.
         */
-       {"nor-jedec"},
+       {"spi-nor"},
        { },
 };
 MODULE_DEVICE_TABLE(spi, m25p_ids);
index a3196b7..58df07a 100644 (file)
@@ -191,9 +191,11 @@ static int __init mtd_readtest_init(void)
                                err = ret;
                }
 
-               err = mtdtest_relax();
-               if (err)
+               ret = mtdtest_relax();
+               if (ret) {
+                       err = ret;
                        goto out;
+               }
        }
 
        if (err)
index 4df2894..e8d3c1d 100644 (file)
@@ -624,7 +624,7 @@ int __bond_opt_set(struct bonding *bond,
 out:
        if (ret)
                bond_opt_error_interpret(bond, opt, ret, val);
-       else
+       else if (bond->dev->reg_state == NETREG_REGISTERED)
                call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
 
        return ret;
index a3b0f7a..1f82a04 100644 (file)
@@ -1774,7 +1774,7 @@ struct bnx2x {
        int                     stats_state;
 
        /* used for synchronization of concurrent threads statistics handling */
-       struct mutex            stats_lock;
+       struct semaphore        stats_lock;
 
        /* used by dmae command loader */
        struct dmae_command     stats_dmae;
index fd52ce9..33501bc 100644 (file)
@@ -12054,7 +12054,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
        mutex_init(&bp->port.phy_mutex);
        mutex_init(&bp->fw_mb_mutex);
        mutex_init(&bp->drv_info_mutex);
-       mutex_init(&bp->stats_lock);
+       sema_init(&bp->stats_lock, 1);
        bp->drv_info_mng_owner = false;
 
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
@@ -13690,9 +13690,10 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
        cancel_delayed_work_sync(&bp->sp_task);
        cancel_delayed_work_sync(&bp->period_task);
 
-       mutex_lock(&bp->stats_lock);
-       bp->stats_state = STATS_STATE_DISABLED;
-       mutex_unlock(&bp->stats_lock);
+       if (!down_timeout(&bp->stats_lock, HZ / 10)) {
+               bp->stats_state = STATS_STATE_DISABLED;
+               up(&bp->stats_lock);
+       }
 
        bnx2x_save_statistics(bp);
 
index 266b055..69d699f 100644 (file)
@@ -1372,19 +1372,23 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
         * that context in case someone is in the middle of a transition.
         * For other events, wait a bit until lock is taken.
         */
-       if (!mutex_trylock(&bp->stats_lock)) {
+       if (down_trylock(&bp->stats_lock)) {
                if (event == STATS_EVENT_UPDATE)
                        return;
 
                DP(BNX2X_MSG_STATS,
                   "Unlikely stats' lock contention [event %d]\n", event);
-               mutex_lock(&bp->stats_lock);
+               if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) {
+                       BNX2X_ERR("Failed to take stats lock [event %d]\n",
+                                 event);
+                       return;
+               }
        }
 
        bnx2x_stats_stm[state][event].action(bp);
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
 
-       mutex_unlock(&bp->stats_lock);
+       up(&bp->stats_lock);
 
        if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
                DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1970,7 +1974,11 @@ int bnx2x_stats_safe_exec(struct bnx2x *bp,
        /* Wait for statistics to end [while blocking further requests],
         * then run supplied function 'safely'.
         */
-       mutex_lock(&bp->stats_lock);
+       rc = down_timeout(&bp->stats_lock, HZ / 10);
+       if (unlikely(rc)) {
+               BNX2X_ERR("Failed to take statistics lock for safe execution\n");
+               goto out_no_lock;
+       }
 
        bnx2x_stats_comp(bp);
        while (bp->stats_pending && cnt--)
@@ -1988,7 +1996,7 @@ out:
        /* No need to restart statistics - if they're enabled, the timer
         * will restart the statistics.
         */
-       mutex_unlock(&bp->stats_lock);
-
+       up(&bp->stats_lock);
+out_no_lock:
        return rc;
 }
index 594a2ab..68f3c13 100644 (file)
@@ -2414,7 +2414,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
        if (status == BFA_STATUS_OK)
                bfa_ioc_lpu_start(ioc);
        else
-               bfa_nw_iocpf_timeout(ioc);
+               bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
 
        return status;
 }
@@ -3029,7 +3029,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
        }
 
        if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
-               bfa_nw_iocpf_timeout(ioc);
+               bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
        } else {
                ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
                mod_timer(&ioc->iocpf_timer, jiffies +
index 37072a8..caae6cb 100644 (file)
@@ -3701,10 +3701,6 @@ bnad_pci_probe(struct pci_dev *pdev,
        setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
                                ((unsigned long)bnad));
 
-       /* Now start the timer before calling IOC */
-       mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
-                 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
-
        /*
         * Start the chip
         * If the call back comes with error, we bail out.
index ebf462d..badea36 100644 (file)
@@ -30,6 +30,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
                        u32 *bfi_image_size, char *fw_name)
 {
        const struct firmware *fw;
+       u32 n;
 
        if (request_firmware(&fw, fw_name, &pdev->dev)) {
                pr_alert("Can't locate firmware %s\n", fw_name);
@@ -40,6 +41,12 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
        *bfi_image_size = fw->size/sizeof(u32);
        bfi_fw = fw;
 
+       /* Convert loaded firmware to host order as it is stored in file
+        * as sequence of LE32 integers.
+        */
+       for (n = 0; n < *bfi_image_size; n++)
+               le32_to_cpus(*bfi_image + n);
+
        return *bfi_image;
 error:
        return NULL;
index 61aa570..fc646a4 100644 (file)
@@ -350,6 +350,9 @@ static int macb_mii_probe(struct net_device *dev)
        else
                phydev->supported &= PHY_BASIC_FEATURES;
 
+       if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
+               phydev->supported &= ~SUPPORTED_1000baseT_Half;
+
        phydev->advertising = phydev->supported;
 
        bp->link = 0;
@@ -1037,6 +1040,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                 * add that if/when we get our hands on a full-blown MII PHY.
                 */
 
+               /* There is a hardware issue under heavy load where DMA can
+                * stop, this causes endless "used buffer descriptor read"
+                * interrupts but it can be cleared by re-enabling RX. See
+                * the at91 manual, section 41.3.1 or the Zynq manual
+                * section 16.7.4 for details.
+                */
                if (status & MACB_BIT(RXUBR)) {
                        ctrl = macb_readl(bp, NCR);
                        macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
@@ -2693,6 +2702,14 @@ static const struct macb_config emac_config = {
        .init = at91ether_init,
 };
 
+static const struct macb_config zynq_config = {
+       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+               MACB_CAPS_NO_GIGABIT_HALF,
+       .dma_burst_length = 16,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+};
+
 static const struct of_device_id macb_dt_ids[] = {
        { .compatible = "cdns,at32ap7000-macb" },
        { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
@@ -2703,6 +2720,7 @@ static const struct of_device_id macb_dt_ids[] = {
        { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
        { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
        { .compatible = "cdns,emac", .data = &emac_config },
+       { .compatible = "cdns,zynq-gem", .data = &zynq_config },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, macb_dt_ids);
index eb7d76f..24b1d9b 100644 (file)
 #define MACB_CAPS_ISR_CLEAR_ON_WRITE           0x00000001
 #define MACB_CAPS_USRIO_HAS_CLKEN              0x00000002
 #define MACB_CAPS_USRIO_DEFAULT_IS_MII         0x00000004
+#define MACB_CAPS_NO_GIGABIT_HALF              0x00000008
 #define MACB_CAPS_FIFO_MODE                    0x10000000
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
 #define MACB_CAPS_SG_DISABLED                  0x40000000
index a6dcbf8..6f9ffb9 100644 (file)
@@ -2358,11 +2358,11 @@ static int be_evt_queues_create(struct be_adapter *adapter)
                                    adapter->cfg_num_qs);
 
        for_all_evt_queues(adapter, eqo, i) {
+               int numa_node = dev_to_node(&adapter->pdev->dev);
                if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
                        return -ENOMEM;
-               cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
-                                           eqo->affinity_mask);
-
+               cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+                               eqo->affinity_mask);
                netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
                               BE_NAPI_WEIGHT);
                napi_hash_add(&eqo->napi);
index de79193..b9df0cb 100644 (file)
@@ -2084,12 +2084,8 @@ static void emac_ethtool_get_pauseparam(struct net_device *ndev,
 
 static int emac_get_regs_len(struct emac_instance *dev)
 {
-       if (emac_has_feature(dev, EMAC_FTR_EMAC4))
-               return sizeof(struct emac_ethtool_regs_subhdr) +
-                       EMAC4_ETHTOOL_REGS_SIZE(dev);
-       else
                return sizeof(struct emac_ethtool_regs_subhdr) +
-                       EMAC_ETHTOOL_REGS_SIZE(dev);
+                       sizeof(struct emac_regs);
 }
 
 static int emac_ethtool_get_regs_len(struct net_device *ndev)
@@ -2114,15 +2110,15 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf)
        struct emac_ethtool_regs_subhdr *hdr = buf;
 
        hdr->index = dev->cell_index;
-       if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
+       if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
+               hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
+       } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
                hdr->version = EMAC4_ETHTOOL_REGS_VER;
-               memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
-               return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
        } else {
                hdr->version = EMAC_ETHTOOL_REGS_VER;
-               memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
-               return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
        }
+       memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
+       return (void *)(hdr + 1) + sizeof(struct emac_regs);
 }
 
 static void emac_ethtool_get_regs(struct net_device *ndev,
index 67f342a..28df374 100644 (file)
@@ -461,10 +461,7 @@ struct emac_ethtool_regs_subhdr {
 };
 
 #define EMAC_ETHTOOL_REGS_VER          0
-#define EMAC_ETHTOOL_REGS_SIZE(dev)    ((dev)->rsrc_regs.end - \
-                                        (dev)->rsrc_regs.start + 1)
-#define EMAC4_ETHTOOL_REGS_VER         1
-#define EMAC4_ETHTOOL_REGS_SIZE(dev)   ((dev)->rsrc_regs.end - \
-                                        (dev)->rsrc_regs.start + 1)
+#define EMAC4_ETHTOOL_REGS_VER         1
+#define EMAC4SYNC_ETHTOOL_REGS_VER     2
 
 #endif /* __IBM_NEWEMAC_CORE_H */
index 4f7dc04..529ef05 100644 (file)
@@ -714,8 +714,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                                         msecs_to_jiffies(timeout))) {
                mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
                          op);
-               err = -EIO;
-               goto out_reset;
+               if (op == MLX4_CMD_NOP) {
+                       err = -EBUSY;
+                       goto out;
+               } else {
+                       err = -EIO;
+                       goto out_reset;
+               }
        }
 
        err = context->result;
index 32f5ec7..cf467a9 100644 (file)
@@ -1501,17 +1501,13 @@ static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
 {
        struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
        int numa_node = priv->mdev->dev->numa_node;
-       int ret = 0;
 
        if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
                return -ENOMEM;
 
-       ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
-                                         ring->affinity_mask);
-       if (ret)
-               free_cpumask_var(ring->affinity_mask);
-
-       return ret;
+       cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
+                       ring->affinity_mask);
+       return 0;
 }
 
 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
index f7bf312..7bed3a8 100644 (file)
@@ -144,9 +144,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
        ring->queue_index = queue_index;
 
        if (queue_index < priv->num_tx_rings_p_up)
-               cpumask_set_cpu_local_first(queue_index,
-                                           priv->mdev->dev->numa_node,
-                                           &ring->affinity_mask);
+               cpumask_set_cpu(cpumask_local_spread(queue_index,
+                                                    priv->mdev->dev->numa_node),
+                               &ring->affinity_mask);
 
        *pring = ring;
        return 0;
index 92fce1b..bafe218 100644 (file)
@@ -3187,7 +3187,7 @@ int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
        int cqn = vhcr->in_modifier;
        struct mlx4_cq_context *cqc = inbox->buf;
        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
-       struct res_cq *cq;
+       struct res_cq *cq = NULL;
        struct res_mtt *mtt;
 
        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
@@ -3223,7 +3223,7 @@ int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
 {
        int err;
        int cqn = vhcr->in_modifier;
-       struct res_cq *cq;
+       struct res_cq *cq = NULL;
 
        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
        if (err)
@@ -3362,7 +3362,7 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
        int err;
        int srqn = vhcr->in_modifier;
        struct res_mtt *mtt;
-       struct res_srq *srq;
+       struct res_srq *srq = NULL;
        struct mlx4_srq_context *srqc = inbox->buf;
        int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
 
@@ -3406,7 +3406,7 @@ int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
 {
        int err;
        int srqn = vhcr->in_modifier;
-       struct res_srq *srq;
+       struct res_srq *srq = NULL;
 
        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
        if (err)
index e0c31e3..6409a06 100644 (file)
@@ -3025,9 +3025,9 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
        u8 dw, rows, cols, banks, ranks;
        u32 val;
 
-       if (size != sizeof(struct netxen_dimm_cfg)) {
+       if (size < attr->size) {
                netdev_err(netdev, "Invalid size\n");
-               return -1;
+               return -EINVAL;
        }
 
        memset(&dimm, 0, sizeof(struct netxen_dimm_cfg));
@@ -3137,7 +3137,7 @@ out:
 
 static struct bin_attribute bin_attr_dimm = {
        .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) },
-       .size = 0,
+       .size = sizeof(struct netxen_dimm_cfg),
        .read = netxen_sysfs_read_dimm,
 };
 
index ec25153..cf98cc9 100644 (file)
@@ -2921,10 +2921,11 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
        struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
        int err = 0;
 
-       if (!n)
+       if (!n) {
                n = neigh_create(&arp_tbl, &ip_addr, dev);
-       if (!n)
-               return -ENOMEM;
+               if (IS_ERR(n))
+                       return IS_ERR(n);
+       }
 
        /* If the neigh is already resolved, then go ahead and
         * install the entry, otherwise start the ARP process to
@@ -2936,6 +2937,7 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
        else
                neigh_event_send(n, NULL);
 
+       neigh_release(n);
        return err;
 }
 
index c0ad95d..809ea46 100644 (file)
@@ -224,12 +224,17 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
        }
 }
 
-static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
+static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+                               struct efx_rx_buffer *rx_buf,
+                               unsigned int num_bufs)
 {
-       if (rx_buf->page) {
-               put_page(rx_buf->page);
-               rx_buf->page = NULL;
-       }
+       do {
+               if (rx_buf->page) {
+                       put_page(rx_buf->page);
+                       rx_buf->page = NULL;
+               }
+               rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+       } while (--num_bufs);
 }
 
 /* Attempt to recycle the page if there is an RX recycle ring; the page can
@@ -278,7 +283,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
        /* If this is the last buffer in a page, unmap and free it. */
        if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
                efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
-               efx_free_rx_buffer(rx_buf);
+               efx_free_rx_buffers(rx_queue, rx_buf, 1);
        }
        rx_buf->page = NULL;
 }
@@ -304,10 +309,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
 
        efx_recycle_rx_pages(channel, rx_buf, n_frags);
 
-       do {
-               efx_free_rx_buffer(rx_buf);
-               rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
-       } while (--n_frags);
+       efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
 }
 
 /**
@@ -431,11 +433,10 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
 
        skb = napi_get_frags(napi);
        if (unlikely(!skb)) {
-               while (n_frags--) {
-                       put_page(rx_buf->page);
-                       rx_buf->page = NULL;
-                       rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
-               }
+               struct efx_rx_queue *rx_queue;
+
+               rx_queue = efx_channel_get_rx_queue(channel);
+               efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
                return;
        }
 
@@ -622,7 +623,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
 
        skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
        if (unlikely(skb == NULL)) {
-               efx_free_rx_buffer(rx_buf);
+               struct efx_rx_queue *rx_queue;
+
+               rx_queue = efx_channel_get_rx_queue(channel);
+               efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
                return;
        }
        skb_record_rx_queue(skb, channel->rx_queue.core_index);
@@ -661,8 +665,12 @@ void __efx_rx_packet(struct efx_channel *channel)
         * loopback layer, and free the rx_buf here
         */
        if (unlikely(efx->loopback_selftest)) {
+               struct efx_rx_queue *rx_queue;
+
                efx_loopback_rx_packet(efx, eh, rx_buf->len);
-               efx_free_rx_buffer(rx_buf);
+               rx_queue = efx_channel_get_rx_queue(channel);
+               efx_free_rx_buffers(rx_queue, rx_buf,
+                                   channel->rx_pkt_n_frags);
                goto out;
        }
 
index 2ac9552..73bab98 100644 (file)
@@ -117,6 +117,12 @@ struct stmmac_priv {
        int use_riwt;
        int irq_wake;
        spinlock_t ptp_lock;
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *dbgfs_dir;
+       struct dentry *dbgfs_rings_status;
+       struct dentry *dbgfs_dma_cap;
+#endif
 };
 
 int stmmac_mdio_unregister(struct net_device *ndev);
index 05c146f..2c5ce2b 100644 (file)
@@ -118,7 +118,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 
 #ifdef CONFIG_DEBUG_FS
 static int stmmac_init_fs(struct net_device *dev);
-static void stmmac_exit_fs(void);
+static void stmmac_exit_fs(struct net_device *dev);
 #endif
 
 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
@@ -1916,7 +1916,7 @@ static int stmmac_release(struct net_device *dev)
        netif_carrier_off(dev);
 
 #ifdef CONFIG_DEBUG_FS
-       stmmac_exit_fs();
+       stmmac_exit_fs(dev);
 #endif
 
        stmmac_release_ptp(priv);
@@ -2508,8 +2508,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 
 #ifdef CONFIG_DEBUG_FS
 static struct dentry *stmmac_fs_dir;
-static struct dentry *stmmac_rings_status;
-static struct dentry *stmmac_dma_cap;
 
 static void sysfs_display_ring(void *head, int size, int extend_desc,
                               struct seq_file *seq)
@@ -2648,36 +2646,39 @@ static const struct file_operations stmmac_dma_cap_fops = {
 
 static int stmmac_init_fs(struct net_device *dev)
 {
-       /* Create debugfs entries */
-       stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       /* Create per netdev entries */
+       priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
 
-       if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
-               pr_err("ERROR %s, debugfs create directory failed\n",
-                      STMMAC_RESOURCE_NAME);
+       if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
+               pr_err("ERROR %s/%s, debugfs create directory failed\n",
+                      STMMAC_RESOURCE_NAME, dev->name);
 
                return -ENOMEM;
        }
 
        /* Entry to report DMA RX/TX rings */
-       stmmac_rings_status = debugfs_create_file("descriptors_status",
-                                                 S_IRUGO, stmmac_fs_dir, dev,
-                                                 &stmmac_rings_status_fops);
+       priv->dbgfs_rings_status =
+               debugfs_create_file("descriptors_status", S_IRUGO,
+                                   priv->dbgfs_dir, dev,
+                                   &stmmac_rings_status_fops);
 
-       if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
+       if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
                pr_info("ERROR creating stmmac ring debugfs file\n");
-               debugfs_remove(stmmac_fs_dir);
+               debugfs_remove_recursive(priv->dbgfs_dir);
 
                return -ENOMEM;
        }
 
        /* Entry to report the DMA HW features */
-       stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
-                                            dev, &stmmac_dma_cap_fops);
+       priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
+                                           priv->dbgfs_dir,
+                                           dev, &stmmac_dma_cap_fops);
 
-       if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
+       if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
                pr_info("ERROR creating stmmac MMC debugfs file\n");
-               debugfs_remove(stmmac_rings_status);
-               debugfs_remove(stmmac_fs_dir);
+               debugfs_remove_recursive(priv->dbgfs_dir);
 
                return -ENOMEM;
        }
@@ -2685,11 +2686,11 @@ static int stmmac_init_fs(struct net_device *dev)
        return 0;
 }
 
-static void stmmac_exit_fs(void)
+static void stmmac_exit_fs(struct net_device *dev)
 {
-       debugfs_remove(stmmac_rings_status);
-       debugfs_remove(stmmac_dma_cap);
-       debugfs_remove(stmmac_fs_dir);
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       debugfs_remove_recursive(priv->dbgfs_dir);
 }
 #endif /* CONFIG_DEBUG_FS */
 
@@ -3149,6 +3150,35 @@ err:
 __setup("stmmaceth=", stmmac_cmdline_opt);
 #endif /* MODULE */
 
+static int __init stmmac_init(void)
+{
+#ifdef CONFIG_DEBUG_FS
+       /* Create debugfs main directory if it doesn't exist yet */
+       if (!stmmac_fs_dir) {
+               stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+
+               if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
+                       pr_err("ERROR %s, debugfs create directory failed\n",
+                              STMMAC_RESOURCE_NAME);
+
+                       return -ENOMEM;
+               }
+       }
+#endif
+
+       return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+#ifdef CONFIG_DEBUG_FS
+       debugfs_remove_recursive(stmmac_fs_dir);
+#endif
+}
+
+module_init(stmmac_init)
+module_exit(stmmac_exit)
+
 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
 MODULE_LICENSE("GPL");
index fb276f6..34a75cb 100644 (file)
@@ -755,6 +755,45 @@ static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
        return ret;
 }
 
+static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev)
+{
+       if (phydev->autoneg == AUTONEG_ENABLE) {
+               if (phydev->advertising & ADVERTISED_10000baseKR_Full)
+                       return true;
+       } else {
+               if (phydev->speed == SPEED_10000)
+                       return true;
+       }
+
+       return false;
+}
+
+static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev)
+{
+       if (phydev->autoneg == AUTONEG_ENABLE) {
+               if (phydev->advertising & ADVERTISED_2500baseX_Full)
+                       return true;
+       } else {
+               if (phydev->speed == SPEED_2500)
+                       return true;
+       }
+
+       return false;
+}
+
+static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev)
+{
+       if (phydev->autoneg == AUTONEG_ENABLE) {
+               if (phydev->advertising & ADVERTISED_1000baseKX_Full)
+                       return true;
+       } else {
+               if (phydev->speed == SPEED_1000)
+                       return true;
+       }
+
+       return false;
+}
+
 static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
                               bool restart)
 {
@@ -1235,11 +1274,11 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev)
        /* Set initial mode - call the mode setting routines
         * directly to insure we are properly configured
         */
-       if (phydev->advertising & SUPPORTED_10000baseKR_Full)
+       if (amd_xgbe_phy_use_xgmii_mode(phydev))
                ret = amd_xgbe_phy_xgmii_mode(phydev);
-       else if (phydev->advertising & SUPPORTED_1000baseKX_Full)
+       else if (amd_xgbe_phy_use_gmii_mode(phydev))
                ret = amd_xgbe_phy_gmii_mode(phydev);
-       else if (phydev->advertising & SUPPORTED_2500baseX_Full)
+       else if (amd_xgbe_phy_use_gmii_2500_mode(phydev))
                ret = amd_xgbe_phy_gmii_2500_mode(phydev);
        else
                ret = -EINVAL;
index 64c74c6..b5dc59d 100644 (file)
@@ -404,7 +404,7 @@ static struct phy_driver bcm7xxx_driver[] = {
        .name           = "Broadcom BCM7425",
        .features       = PHY_GBIT_FEATURES |
                          SUPPORTED_Pause | SUPPORTED_Asym_Pause,
-       .flags          = 0,
+       .flags          = PHY_IS_INTERNAL,
        .config_init    = bcm7xxx_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
index 496e02f..00cb41e 100644 (file)
@@ -47,7 +47,7 @@
 #define PSF_TX         0x1000
 #define EXT_EVENT      1
 #define CAL_EVENT      7
-#define CAL_TRIGGER    7
+#define CAL_TRIGGER    1
 #define DP83640_N_PINS 12
 
 #define MII_DP83640_MICR 0x11
@@ -496,7 +496,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
                        else
                                evnt |= EVNT_RISE;
                }
+               mutex_lock(&clock->extreg_lock);
                ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
+               mutex_unlock(&clock->extreg_lock);
                return 0;
 
        case PTP_CLK_REQ_PEROUT:
@@ -532,6 +534,8 @@ static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
 
 static void enable_status_frames(struct phy_device *phydev, bool on)
 {
+       struct dp83640_private *dp83640 = phydev->priv;
+       struct dp83640_clock *clock = dp83640->clock;
        u16 cfg0 = 0, ver;
 
        if (on)
@@ -539,9 +543,13 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
 
        ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
 
+       mutex_lock(&clock->extreg_lock);
+
        ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
        ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
 
+       mutex_unlock(&clock->extreg_lock);
+
        if (!phydev->attached_dev) {
                pr_warn("expected to find an attached netdevice\n");
                return;
@@ -838,7 +846,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
        list_del_init(&rxts->list);
        phy2rxts(phy_rxts, rxts);
 
-       spin_lock_irqsave(&dp83640->rx_queue.lock, flags);
+       spin_lock(&dp83640->rx_queue.lock);
        skb_queue_walk(&dp83640->rx_queue, skb) {
                struct dp83640_skb_info *skb_info;
 
@@ -853,7 +861,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
                        break;
                }
        }
-       spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags);
+       spin_unlock(&dp83640->rx_queue.lock);
 
        if (!shhwtstamps)
                list_add_tail(&rxts->list, &dp83640->rxts);
@@ -1173,11 +1181,18 @@ static int dp83640_config_init(struct phy_device *phydev)
 
        if (clock->chosen && !list_empty(&clock->phylist))
                recalibrate(clock);
-       else
+       else {
+               mutex_lock(&clock->extreg_lock);
                enable_broadcast(phydev, clock->page, 1);
+               mutex_unlock(&clock->extreg_lock);
+       }
 
        enable_status_frames(phydev, true);
+
+       mutex_lock(&clock->extreg_lock);
        ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
+       mutex_unlock(&clock->extreg_lock);
+
        return 0;
 }
 
index 52cd8db..47cd578 100644 (file)
@@ -742,6 +742,9 @@ EXPORT_SYMBOL(phy_stop);
  */
 void phy_start(struct phy_device *phydev)
 {
+       bool do_resume = false;
+       int err = 0;
+
        mutex_lock(&phydev->lock);
 
        switch (phydev->state) {
@@ -752,11 +755,22 @@ void phy_start(struct phy_device *phydev)
                phydev->state = PHY_UP;
                break;
        case PHY_HALTED:
+               /* make sure interrupts are re-enabled for the PHY */
+               err = phy_enable_interrupts(phydev);
+               if (err < 0)
+                       break;
+
                phydev->state = PHY_RESUMING;
+               do_resume = true;
+               break;
        default:
                break;
        }
        mutex_unlock(&phydev->lock);
+
+       /* if phy was suspended, bring the physical link up again */
+       if (do_resume)
+               phy_resume(phydev);
 }
 EXPORT_SYMBOL(phy_start);
 
@@ -769,7 +783,7 @@ void phy_state_machine(struct work_struct *work)
        struct delayed_work *dwork = to_delayed_work(work);
        struct phy_device *phydev =
                        container_of(dwork, struct phy_device, state_queue);
-       bool needs_aneg = false, do_suspend = false, do_resume = false;
+       bool needs_aneg = false, do_suspend = false;
        int err = 0;
 
        mutex_lock(&phydev->lock);
@@ -888,14 +902,6 @@ void phy_state_machine(struct work_struct *work)
                }
                break;
        case PHY_RESUMING:
-               err = phy_clear_interrupt(phydev);
-               if (err)
-                       break;
-
-               err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
-               if (err)
-                       break;
-
                if (AUTONEG_ENABLE == phydev->autoneg) {
                        err = phy_aneg_done(phydev);
                        if (err < 0)
@@ -933,7 +939,6 @@ void phy_state_machine(struct work_struct *work)
                        }
                        phydev->adjust_link(phydev->attached_dev);
                }
-               do_resume = true;
                break;
        }
 
@@ -943,8 +948,6 @@ void phy_state_machine(struct work_struct *work)
                err = phy_start_aneg(phydev);
        else if (do_suspend)
                phy_suspend(phydev);
-       else if (do_resume)
-               phy_resume(phydev);
 
        if (err < 0)
                phy_error(phydev);
@@ -1053,13 +1056,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
 {
        /* According to 802.3az,the EEE is supported only in full duplex-mode.
         * Also EEE feature is active when core is operating with MII, GMII
-        * or RGMII. Internal PHYs are also allowed to proceed and should
-        * return an error if they do not support EEE.
+        * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
+        * should return an error if they do not support EEE.
         */
        if ((phydev->duplex == DUPLEX_FULL) &&
            ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
            (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
-           (phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
+           (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
+            phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
             phy_is_internal(phydev))) {
                int eee_lp, eee_cap, eee_adv;
                u32 lp, cap, adv;
index c3e4da9..8067b8f 100644 (file)
@@ -1182,7 +1182,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
         * payload data instead.
         */
        usbnet_set_skb_tx_stats(skb_out, n,
-                               ctx->tx_curr_frame_payload - skb_out->len);
+                               (long)ctx->tx_curr_frame_payload - skb_out->len);
 
        return skb_out;
 
index 27a5f95..21a0fbf 100644 (file)
@@ -2961,7 +2961,7 @@ static void __net_exit vxlan_exit_net(struct net *net)
                 * to the list by the previous loop.
                 */
                if (!net_eq(dev_net(vxlan->dev), net))
-                       unregister_netdevice_queue(dev, &list);
+                       unregister_netdevice_queue(vxlan->dev, &list);
        }
 
        unregister_netdevice_many(&list);
index 4ec9811..65efb14 100644 (file)
@@ -511,11 +511,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
                                     msgbuf->rx_pktids,
                                     msgbuf->ioctl_resp_pktid);
        if (msgbuf->ioctl_resp_ret_len != 0) {
-               if (!skb) {
-                       brcmf_err("Invalid packet id idx recv'd %d\n",
-                                 msgbuf->ioctl_resp_pktid);
+               if (!skb)
                        return -EBADF;
-               }
+
                memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
                                       len : msgbuf->ioctl_resp_ret_len);
        }
@@ -874,10 +872,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
        flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
                                     msgbuf->tx_pktids, idx);
-       if (!skb) {
-               brcmf_err("Invalid packet id idx recv'd %d\n", idx);
+       if (!skb)
                return;
-       }
 
        set_bit(flowid, msgbuf->txstatus_done_map);
        commonring = msgbuf->flowrings[flowid];
@@ -1156,6 +1152,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
 
        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
                                     msgbuf->rx_pktids, idx);
+       if (!skb)
+               return;
 
        if (data_offset)
                skb_pull(skb, data_offset);
index ab019b4..f89f446 100644 (file)
@@ -21,6 +21,7 @@ config IWLWIFI
                Intel 7260 Wi-Fi Adapter
                Intel 3160 Wi-Fi Adapter
                Intel 7265 Wi-Fi Adapter
+               Intel 3165 Wi-Fi Adapter
 
 
          This driver uses the kernel's mac80211 subsystem.
index 36e786f..74ad278 100644 (file)
 
 /* Highest firmware API version supported */
 #define IWL7260_UCODE_API_MAX  13
-#define IWL3160_UCODE_API_MAX  13
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   12
-#define IWL3160_UCODE_API_OK   12
+#define IWL3165_UCODE_API_OK   13
 
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  10
-#define IWL3160_UCODE_API_MIN  10
+#define IWL3165_UCODE_API_MIN  13
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
 #define IWL3160_FW_PRE "iwlwifi-3160-"
 #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
 
-#define IWL3165_FW_PRE "iwlwifi-3165-"
-#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode"
-
 #define IWL7265_FW_PRE "iwlwifi-7265-"
 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
 
@@ -248,8 +244,13 @@ static const struct iwl_ht_params iwl7265_ht_params = {
 
 const struct iwl_cfg iwl3165_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 3165",
-       .fw_name_pre = IWL3165_FW_PRE,
+       .fw_name_pre = IWL7265D_FW_PRE,
        IWL_DEVICE_7000,
+       /* sparse doens't like the re-assignment but it is safe */
+#ifndef __CHECKER__
+       .ucode_api_ok = IWL3165_UCODE_API_OK,
+       .ucode_api_min = IWL3165_UCODE_API_MIN,
+#endif
        .ht_params = &iwl7000_ht_params,
        .nvm_ver = IWL3165_NVM_VERSION,
        .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
@@ -325,6 +326,5 @@ const struct iwl_cfg iwl7265d_n_cfg = {
 
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
-MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
index 41ff85d..21302b6 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -748,6 +750,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
                return;
        }
 
+       if (data->sku_cap_mimo_disabled)
+               rx_chains = 1;
+
        ht_info->ht_supported = true;
        ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
 
index 5234a0b..750c8c9 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -84,6 +86,7 @@ struct iwl_nvm_data {
        bool sku_cap_11ac_enable;
        bool sku_cap_amt_enable;
        bool sku_cap_ipan_enable;
+       bool sku_cap_mimo_disabled;
 
        u16 radio_cfg_type;
        u8 radio_cfg_step;
index 83903a5..8e604a3 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -116,10 +116,11 @@ enum family_8000_nvm_offsets {
 
 /* SKU Capabilities (actual values from NVM definition) */
 enum nvm_sku_bits {
-       NVM_SKU_CAP_BAND_24GHZ  = BIT(0),
-       NVM_SKU_CAP_BAND_52GHZ  = BIT(1),
-       NVM_SKU_CAP_11N_ENABLE  = BIT(2),
-       NVM_SKU_CAP_11AC_ENABLE = BIT(3),
+       NVM_SKU_CAP_BAND_24GHZ          = BIT(0),
+       NVM_SKU_CAP_BAND_52GHZ          = BIT(1),
+       NVM_SKU_CAP_11N_ENABLE          = BIT(2),
+       NVM_SKU_CAP_11AC_ENABLE         = BIT(3),
+       NVM_SKU_CAP_MIMO_DISABLE        = BIT(5),
 };
 
 /*
@@ -368,6 +369,11 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
        if (cfg->ht_params->ldpc)
                vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
 
+       if (data->sku_cap_mimo_disabled) {
+               num_rx_ants = 1;
+               num_tx_ants = 1;
+       }
+
        if (num_tx_ants > 1)
                vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
        else
@@ -465,7 +471,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                return le16_to_cpup(nvm_sw + RADIO_CFG);
 
-       return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
+       return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000));
 
 }
 
@@ -527,6 +533,10 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
        const u8 *hw_addr;
 
        if (mac_override) {
+               static const u8 reserved_mac[] = {
+                       0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
+               };
+
                hw_addr = (const u8 *)(mac_override +
                                 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
 
@@ -538,7 +548,12 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
                data->hw_addr[4] = hw_addr[5];
                data->hw_addr[5] = hw_addr[4];
 
-               if (is_valid_ether_addr(data->hw_addr))
+               /*
+                * Force the use of the OTP MAC address in case of reserved MAC
+                * address in the NVM, or if address is given but invalid.
+                */
+               if (is_valid_ether_addr(data->hw_addr) &&
+                   memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0)
                        return;
 
                IWL_ERR_DEV(dev,
@@ -610,6 +625,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                data->sku_cap_11n_enable = false;
        data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
                                    (sku & NVM_SKU_CAP_11AC_ENABLE);
+       data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
 
        data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
 
index d954591..6ac6de2 100644 (file)
@@ -776,7 +776,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
        struct iwl_host_cmd cmd = {
                .id = BT_CONFIG,
                .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+               .dataflags = { IWL_HCMD_DFL_DUP, },
                .flags = CMD_ASYNC,
        };
        struct iwl_mvm_sta *mvmsta;
index 1b1b2bf..4310cf1 100644 (file)
@@ -1750,8 +1750,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
        int i, j, n_matches, ret;
 
        fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
-       if (!IS_ERR_OR_NULL(fw_status))
+       if (!IS_ERR_OR_NULL(fw_status)) {
                reasons = le32_to_cpu(fw_status->wakeup_reasons);
+               kfree(fw_status);
+       }
 
        if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
                wakeup.rfkill_release = true;
@@ -1868,15 +1870,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        /* get the BSS vif pointer again */
        vif = iwl_mvm_get_bss_vif(mvm);
        if (IS_ERR_OR_NULL(vif))
-               goto out_unlock;
+               goto err;
 
        ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
        if (ret)
-               goto out_unlock;
+               goto err;
 
        if (d3_status != IWL_D3_STATUS_ALIVE) {
                IWL_INFO(mvm, "Device was reset during suspend\n");
-               goto out_unlock;
+               goto err;
        }
 
        /* query SRAM first in case we want event logging */
@@ -1902,7 +1904,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
                goto out_iterate;
        }
 
- out_unlock:
+err:
+       iwl_mvm_free_nd(mvm);
        mutex_unlock(&mvm->mutex);
 
 out_iterate:
@@ -1915,6 +1918,14 @@ out:
        /* return 1 to reconfigure the device */
        set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
        set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
+
+       /* We always return 1, which causes mac80211 to do a reconfig
+        * with IEEE80211_RECONFIG_TYPE_RESTART.  This type of
+        * reconfig calls iwl_mvm_restart_complete(), where we unref
+        * the IWL_MVM_REF_UCODE_DOWN, so we need to take the
+        * reference here.
+        */
+       iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
        return 1;
 }
 
@@ -2021,7 +2032,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
        __iwl_mvm_resume(mvm, true);
        rtnl_unlock();
        iwl_abort_notification_waits(&mvm->notif_wait);
-       iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
        ieee80211_restart_hw(mvm->hw);
 
        /* wait for restart and disconnect all interfaces */
index 40265b9..dda9f7b 100644 (file)
@@ -3995,9 +3995,6 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
        if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
                return;
 
-       if (event->u.mlme.status == MLME_SUCCESS)
-               return;
-
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
        trig_mlme = (void *)trig->data;
        if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
index 1c66297..2ea0123 100644 (file)
@@ -1263,11 +1263,13 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
                ieee80211_iterate_active_interfaces(
                        mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                        iwl_mvm_d0i3_disconnect_iter, mvm);
-
-       iwl_free_resp(&get_status_cmd);
 out:
        iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
 
+       /* qos_seq might point inside resp_pkt, so free it only now */
+       if (get_status_cmd.resp_pkt)
+               iwl_free_resp(&get_status_cmd);
+
        /* the FW might have updated the regdomain */
        iwl_mvm_update_changed_regdom(mvm);
 
index f9928f2..33cd68a 100644 (file)
@@ -180,6 +180,9 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
                return false;
 
+       if (mvm->nvm_data->sku_cap_mimo_disabled)
+               return false;
+
        return true;
 }
 
index 01996c9..376b84e 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -320,7 +320,7 @@ struct iwl_trans_pcie {
 
        /*protect hw register */
        spinlock_t reg_lock;
-       bool cmd_in_flight;
+       bool cmd_hold_nic_awake;
        bool ref_cmd_in_flight;
 
        /* protect ref counter */
index 47bbf57..dc17909 100644 (file)
@@ -1049,9 +1049,11 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
                iwl_pcie_rx_stop(trans);
 
                /* Power-down device's busmaster DMA clocks */
-               iwl_write_prph(trans, APMG_CLK_DIS_REG,
-                              APMG_CLK_VAL_DMA_CLK_RQT);
-               udelay(5);
+               if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+                       iwl_write_prph(trans, APMG_CLK_DIS_REG,
+                                      APMG_CLK_VAL_DMA_CLK_RQT);
+                       udelay(5);
+               }
        }
 
        /* Make sure (redundant) we've released our request to stay awake */
@@ -1370,7 +1372,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
 
        spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
 
-       if (trans_pcie->cmd_in_flight)
+       if (trans_pcie->cmd_hold_nic_awake)
                goto out;
 
        /* this bit wakes up the NIC */
@@ -1436,7 +1438,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
         */
        __acquire(&trans_pcie->reg_lock);
 
-       if (trans_pcie->cmd_in_flight)
+       if (trans_pcie->cmd_hold_nic_awake)
                goto out;
 
        __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
index 06952aa..5ef8044 100644 (file)
@@ -1039,18 +1039,14 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
                iwl_trans_pcie_ref(trans);
        }
 
-       if (trans_pcie->cmd_in_flight)
-               return 0;
-
-       trans_pcie->cmd_in_flight = true;
-
        /*
         * wake up the NIC to make sure that the firmware will see the host
         * command - we will let the NIC sleep once all the host commands
         * returned. This needs to be done only on NICs that have
         * apmg_wake_up_wa set.
         */
-       if (trans->cfg->base_params->apmg_wake_up_wa) {
+       if (trans->cfg->base_params->apmg_wake_up_wa &&
+           !trans_pcie->cmd_hold_nic_awake) {
                __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
                                         CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
                if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
@@ -1064,10 +1060,10 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
                if (ret < 0) {
                        __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
                                        CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-                       trans_pcie->cmd_in_flight = false;
                        IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
                        return -EIO;
                }
+               trans_pcie->cmd_hold_nic_awake = true;
        }
 
        return 0;
@@ -1085,15 +1081,14 @@ static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
                iwl_trans_pcie_unref(trans);
        }
 
-       if (WARN_ON(!trans_pcie->cmd_in_flight))
-               return 0;
-
-       trans_pcie->cmd_in_flight = false;
+       if (trans->cfg->base_params->apmg_wake_up_wa) {
+               if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
+                       return 0;
 
-       if (trans->cfg->base_params->apmg_wake_up_wa)
+               trans_pcie->cmd_hold_nic_awake = false;
                __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
-                                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
+                                          CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+       }
        return 0;
 }
 
index 4de46aa..0d25943 100644 (file)
@@ -1250,7 +1250,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        netdev_err(queue->vif->dev,
                                   "txreq.offset: %x, size: %u, end: %lu\n",
                                   txreq.offset, txreq.size,
-                                  (txreq.offset&~PAGE_MASK) + txreq.size);
+                                  (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size);
                        xenvif_fatal_tx_err(queue->vif);
                        break;
                }
index 3d8dbf5..968787a 100644 (file)
@@ -34,6 +34,8 @@ struct backend_info {
        enum xenbus_state frontend_state;
        struct xenbus_watch hotplug_status_watch;
        u8 have_hotplug_status_watch:1;
+
+       const char *hotplug_script;
 };
 
 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
@@ -238,6 +240,7 @@ static int netback_remove(struct xenbus_device *dev)
                xenvif_free(be->vif);
                be->vif = NULL;
        }
+       kfree(be->hotplug_script);
        kfree(be);
        dev_set_drvdata(&dev->dev, NULL);
        return 0;
@@ -255,6 +258,7 @@ static int netback_probe(struct xenbus_device *dev,
        struct xenbus_transaction xbt;
        int err;
        int sg;
+       const char *script;
        struct backend_info *be = kzalloc(sizeof(struct backend_info),
                                          GFP_KERNEL);
        if (!be) {
@@ -347,6 +351,15 @@ static int netback_probe(struct xenbus_device *dev,
        if (err)
                pr_debug("Error writing multi-queue-max-queues\n");
 
+       script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
+       if (IS_ERR(script)) {
+               err = PTR_ERR(script);
+               xenbus_dev_fatal(dev, err, "reading script");
+               goto fail;
+       }
+
+       be->hotplug_script = script;
+
        err = xenbus_switch_state(dev, XenbusStateInitWait);
        if (err)
                goto fail;
@@ -379,22 +392,14 @@ static int netback_uevent(struct xenbus_device *xdev,
                          struct kobj_uevent_env *env)
 {
        struct backend_info *be = dev_get_drvdata(&xdev->dev);
-       char *val;
 
-       val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
-       if (IS_ERR(val)) {
-               int err = PTR_ERR(val);
-               xenbus_dev_fatal(xdev, err, "reading script");
-               return err;
-       } else {
-               if (add_uevent_var(env, "script=%s", val)) {
-                       kfree(val);
-                       return -ENOMEM;
-               }
-               kfree(val);
-       }
+       if (!be)
+               return 0;
 
-       if (!be || !be->vif)
+       if (add_uevent_var(env, "script=%s", be->hotplug_script))
+               return -ENOMEM;
+
+       if (!be->vif)
                return 0;
 
        return add_uevent_var(env, "vif=%s", be->vif->dev->name);
@@ -793,6 +798,7 @@ static void connect(struct backend_info *be)
                        goto err;
                }
 
+               queue->credit_bytes = credit_bytes;
                queue->remaining_credit = credit_bytes;
                queue->credit_usec = credit_usec;
 
index 3f45afd..e031c94 100644 (file)
@@ -1698,6 +1698,7 @@ static void xennet_destroy_queues(struct netfront_info *info)
 
                if (netif_running(info->netdev))
                        napi_disable(&queue->napi);
+               del_timer_sync(&queue->rx_refill_timer);
                netif_napi_del(&queue->napi);
        }
 
@@ -2102,9 +2103,6 @@ static const struct attribute_group xennet_dev_group = {
 static int xennet_remove(struct xenbus_device *dev)
 {
        struct netfront_info *info = dev_get_drvdata(&dev->dev);
-       unsigned int num_queues = info->netdev->real_num_tx_queues;
-       struct netfront_queue *queue = NULL;
-       unsigned int i = 0;
 
        dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
@@ -2112,16 +2110,7 @@ static int xennet_remove(struct xenbus_device *dev)
 
        unregister_netdev(info->netdev);
 
-       for (i = 0; i < num_queues; ++i) {
-               queue = &info->queues[i];
-               del_timer_sync(&queue->rx_refill_timer);
-       }
-
-       if (num_queues) {
-               kfree(info->queues);
-               info->queues = NULL;
-       }
-
+       xennet_destroy_queues(info);
        xennet_free_netdev(info->netdev);
 
        return 0;
index 4ad5c1a..e406e3d 100644 (file)
@@ -643,7 +643,9 @@ static const struct cygnus_gpio_pin_range cygnus_gpio_pintable[] = {
        CYGNUS_PINRANGE(87, 104, 12),
        CYGNUS_PINRANGE(99, 102, 2),
        CYGNUS_PINRANGE(101, 90, 4),
-       CYGNUS_PINRANGE(105, 116, 10),
+       CYGNUS_PINRANGE(105, 116, 6),
+       CYGNUS_PINRANGE(111, 100, 2),
+       CYGNUS_PINRANGE(113, 122, 4),
        CYGNUS_PINRANGE(123, 11, 1),
        CYGNUS_PINRANGE(124, 38, 4),
        CYGNUS_PINRANGE(128, 43, 1),
index 82f691e..732ff75 100644 (file)
@@ -1292,6 +1292,49 @@ static void chv_gpio_irq_unmask(struct irq_data *d)
        chv_gpio_irq_mask_unmask(d, false);
 }
 
+static unsigned chv_gpio_irq_startup(struct irq_data *d)
+{
+       /*
+        * Check if the interrupt has been requested with 0 as triggering
+        * type. In that case it is assumed that the current values
+        * programmed to the hardware are used (e.g BIOS configured
+        * defaults).
+        *
+        * In that case ->irq_set_type() will never be called so we need to
+        * read back the values from hardware now, set correct flow handler
+        * and update mappings before the interrupt is being used.
+        */
+       if (irqd_get_trigger_type(d) == IRQ_TYPE_NONE) {
+               struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+               struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
+               unsigned offset = irqd_to_hwirq(d);
+               int pin = chv_gpio_offset_to_pin(pctrl, offset);
+               irq_flow_handler_t handler;
+               unsigned long flags;
+               u32 intsel, value;
+
+               intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+               intsel &= CHV_PADCTRL0_INTSEL_MASK;
+               intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
+
+               value = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
+               if (value & CHV_PADCTRL1_INTWAKECFG_LEVEL)
+                       handler = handle_level_irq;
+               else
+                       handler = handle_edge_irq;
+
+               spin_lock_irqsave(&pctrl->lock, flags);
+               if (!pctrl->intr_lines[intsel]) {
+                       __irq_set_handler_locked(d->irq, handler);
+                       pctrl->intr_lines[intsel] = offset;
+               }
+               spin_unlock_irqrestore(&pctrl->lock, flags);
+       }
+
+       chv_gpio_irq_unmask(d);
+       return 0;
+}
+
 static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1357,6 +1400,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
 
 static struct irq_chip chv_gpio_irqchip = {
        .name = "chv-gpio",
+       .irq_startup = chv_gpio_irq_startup,
        .irq_ack = chv_gpio_irq_ack,
        .irq_mask = chv_gpio_irq_mask,
        .irq_unmask = chv_gpio_irq_unmask,
index edcd140..a70a5fe 100644 (file)
@@ -569,7 +569,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
                domain->chip.direction_output = meson_gpio_direction_output;
                domain->chip.get = meson_gpio_get;
                domain->chip.set = meson_gpio_set;
-               domain->chip.base = -1;
+               domain->chip.base = domain->data->pin_base;
                domain->chip.ngpio = domain->data->num_pins;
                domain->chip.can_sleep = false;
                domain->chip.of_node = domain->of_node;
index 2f7ea62..9677807 100644 (file)
@@ -876,13 +876,13 @@ static struct meson_domain_data meson8b_domain_data[] = {
                .banks          = meson8b_banks,
                .num_banks      = ARRAY_SIZE(meson8b_banks),
                .pin_base       = 0,
-               .num_pins       = 83,
+               .num_pins       = 130,
        },
        {
                .name           = "ao-bank",
                .banks          = meson8b_ao_banks,
                .num_banks      = ARRAY_SIZE(meson8b_ao_banks),
-               .pin_base       = 83,
+               .pin_base       = 130,
                .num_pins       = 16,
        },
 };
index 9bb9ad6..28f3281 100644 (file)
@@ -2897,7 +2897,7 @@ static ssize_t hotkey_wakeup_reason_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason);
 }
 
-static DEVICE_ATTR_RO(hotkey_wakeup_reason);
+static DEVICE_ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
 
 static void hotkey_wakeup_reason_notify_change(void)
 {
@@ -2913,7 +2913,8 @@ static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack);
 }
 
-static DEVICE_ATTR_RO(hotkey_wakeup_hotunplug_complete);
+static DEVICE_ATTR(wakeup_hotunplug_complete, S_IRUGO,
+                  hotkey_wakeup_hotunplug_complete_show, NULL);
 
 static void hotkey_wakeup_hotunplug_complete_notify_change(void)
 {
@@ -2978,8 +2979,8 @@ static struct attribute *hotkey_attributes[] __initdata = {
        &dev_attr_hotkey_enable.attr,
        &dev_attr_hotkey_bios_enabled.attr,
        &dev_attr_hotkey_bios_mask.attr,
-       &dev_attr_hotkey_wakeup_reason.attr,
-       &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
+       &dev_attr_wakeup_reason.attr,
+       &dev_attr_wakeup_hotunplug_complete.attr,
        &dev_attr_hotkey_mask.attr,
        &dev_attr_hotkey_all_mask.attr,
        &dev_attr_hotkey_recommended_mask.attr,
@@ -4393,12 +4394,13 @@ static ssize_t wan_enable_store(struct device *dev,
                        attr, buf, count);
 }
 
-static DEVICE_ATTR_RW(wan_enable);
+static DEVICE_ATTR(wwan_enable, S_IWUSR | S_IRUGO,
+                  wan_enable_show, wan_enable_store);
 
 /* --------------------------------------------------------------------- */
 
 static struct attribute *wan_attributes[] = {
-       &dev_attr_wan_enable.attr,
+       &dev_attr_wwan_enable.attr,
        NULL
 };
 
@@ -8138,7 +8140,8 @@ static ssize_t fan_pwm1_enable_store(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR_RW(fan_pwm1_enable);
+static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
+                  fan_pwm1_enable_show, fan_pwm1_enable_store);
 
 /* sysfs fan pwm1 ------------------------------------------------------ */
 static ssize_t fan_pwm1_show(struct device *dev,
@@ -8198,7 +8201,7 @@ static ssize_t fan_pwm1_store(struct device *dev,
        return (rc) ? rc : count;
 }
 
-static DEVICE_ATTR_RW(fan_pwm1);
+static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, fan_pwm1_show, fan_pwm1_store);
 
 /* sysfs fan fan1_input ------------------------------------------------ */
 static ssize_t fan_fan1_input_show(struct device *dev,
@@ -8215,7 +8218,7 @@ static ssize_t fan_fan1_input_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%u\n", speed);
 }
 
-static DEVICE_ATTR_RO(fan_fan1_input);
+static DEVICE_ATTR(fan1_input, S_IRUGO, fan_fan1_input_show, NULL);
 
 /* sysfs fan fan2_input ------------------------------------------------ */
 static ssize_t fan_fan2_input_show(struct device *dev,
@@ -8232,7 +8235,7 @@ static ssize_t fan_fan2_input_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%u\n", speed);
 }
 
-static DEVICE_ATTR_RO(fan_fan2_input);
+static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL);
 
 /* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
 static ssize_t fan_fan_watchdog_show(struct device_driver *drv,
@@ -8265,8 +8268,8 @@ static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO,
 
 /* --------------------------------------------------------------------- */
 static struct attribute *fan_attributes[] = {
-       &dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr,
-       &dev_attr_fan_fan1_input.attr,
+       &dev_attr_pwm1_enable.attr, &dev_attr_pwm1.attr,
+       &dev_attr_fan1_input.attr,
        NULL, /* for fan2_input */
        NULL
 };
@@ -8400,7 +8403,7 @@ static int __init fan_init(struct ibm_init_struct *iibm)
                if (tp_features.second_fan) {
                        /* attach second fan tachometer */
                        fan_attributes[ARRAY_SIZE(fan_attributes)-2] =
-                                       &dev_attr_fan_fan2_input.attr;
+                                       &dev_attr_fan2_input.attr;
                }
                rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
                                         &fan_attr_group);
@@ -8848,7 +8851,7 @@ static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME);
 }
 
-static DEVICE_ATTR_RO(thinkpad_acpi_pdev_name);
+static DEVICE_ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL);
 
 /* --------------------------------------------------------------------- */
 
@@ -9390,8 +9393,7 @@ static void thinkpad_acpi_module_exit(void)
                hwmon_device_unregister(tpacpi_hwmon);
 
        if (tp_features.sensors_pdev_attrs_registered)
-               device_remove_file(&tpacpi_sensors_pdev->dev,
-                                  &dev_attr_thinkpad_acpi_pdev_name);
+               device_remove_file(&tpacpi_sensors_pdev->dev, &dev_attr_name);
        if (tpacpi_sensors_pdev)
                platform_device_unregister(tpacpi_sensors_pdev);
        if (tpacpi_pdev)
@@ -9512,8 +9514,7 @@ static int __init thinkpad_acpi_module_init(void)
                thinkpad_acpi_module_exit();
                return ret;
        }
-       ret = device_create_file(&tpacpi_sensors_pdev->dev,
-                                &dev_attr_thinkpad_acpi_pdev_name);
+       ret = device_create_file(&tpacpi_sensors_pdev->dev, &dev_attr_name);
        if (ret) {
                pr_err("unable to create sysfs hwmon device attributes\n");
                thinkpad_acpi_module_exit();
index 476171a..8a029f9 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
 #include <linux/regmap.h>
 #define PERIP_PWM_PDM_CONTROL_CH_MASK          0x1
 #define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch)     ((ch) * 4)
 
-#define MAX_TMBASE_STEPS                       65536
+/*
+ * PWM period is specified with a timebase register,
+ * in number of step periods. The PWM duty cycle is also
+ * specified in step periods, in the [0, $timebase] range.
+ * In other words, the timebase imposes the duty cycle
+ * resolution. Therefore, let's constraint the timebase to
+ * a minimum value to allow a sane range of duty cycle values.
+ * Imposing a minimum timebase, will impose a maximum PWM frequency.
+ *
+ * The value chosen is completely arbitrary.
+ */
+#define MIN_TMBASE_STEPS                       16
+
+struct img_pwm_soc_data {
+       u32 max_timebase;
+};
 
 struct img_pwm_chip {
        struct device   *dev;
@@ -47,6 +63,9 @@ struct img_pwm_chip {
        struct clk      *sys_clk;
        void __iomem    *base;
        struct regmap   *periph_regs;
+       int             max_period_ns;
+       int             min_period_ns;
+       const struct img_pwm_soc_data   *data;
 };
 
 static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
@@ -72,24 +91,31 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        u32 val, div, duty, timebase;
        unsigned long mul, output_clk_hz, input_clk_hz;
        struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+       unsigned int max_timebase = pwm_chip->data->max_timebase;
+
+       if (period_ns < pwm_chip->min_period_ns ||
+           period_ns > pwm_chip->max_period_ns) {
+               dev_err(chip->dev, "configured period not in range\n");
+               return -ERANGE;
+       }
 
        input_clk_hz = clk_get_rate(pwm_chip->pwm_clk);
        output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns);
 
        mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz);
-       if (mul <= MAX_TMBASE_STEPS) {
+       if (mul <= max_timebase) {
                div = PWM_CTRL_CFG_NO_SUB_DIV;
                timebase = DIV_ROUND_UP(mul, 1);
-       } else if (mul <= MAX_TMBASE_STEPS * 8) {
+       } else if (mul <= max_timebase * 8) {
                div = PWM_CTRL_CFG_SUB_DIV0;
                timebase = DIV_ROUND_UP(mul, 8);
-       } else if (mul <= MAX_TMBASE_STEPS * 64) {
+       } else if (mul <= max_timebase * 64) {
                div = PWM_CTRL_CFG_SUB_DIV1;
                timebase = DIV_ROUND_UP(mul, 64);
-       } else if (mul <= MAX_TMBASE_STEPS * 512) {
+       } else if (mul <= max_timebase * 512) {
                div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
                timebase = DIV_ROUND_UP(mul, 512);
-       } else if (mul > MAX_TMBASE_STEPS * 512) {
+       } else if (mul > max_timebase * 512) {
                dev_err(chip->dev,
                        "failed to configure timebase steps/divider value\n");
                return -EINVAL;
@@ -143,11 +169,27 @@ static const struct pwm_ops img_pwm_ops = {
        .owner = THIS_MODULE,
 };
 
+static const struct img_pwm_soc_data pistachio_pwm = {
+       .max_timebase = 255,
+};
+
+static const struct of_device_id img_pwm_of_match[] = {
+       {
+               .compatible = "img,pistachio-pwm",
+               .data = &pistachio_pwm,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(of, img_pwm_of_match);
+
 static int img_pwm_probe(struct platform_device *pdev)
 {
        int ret;
+       u64 val;
+       unsigned long clk_rate;
        struct resource *res;
        struct img_pwm_chip *pwm;
+       const struct of_device_id *of_dev_id;
 
        pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
        if (!pwm)
@@ -160,6 +202,11 @@ static int img_pwm_probe(struct platform_device *pdev)
        if (IS_ERR(pwm->base))
                return PTR_ERR(pwm->base);
 
+       of_dev_id = of_match_device(img_pwm_of_match, &pdev->dev);
+       if (!of_dev_id)
+               return -ENODEV;
+       pwm->data = of_dev_id->data;
+
        pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
                                                           "img,cr-periph");
        if (IS_ERR(pwm->periph_regs))
@@ -189,6 +236,17 @@ static int img_pwm_probe(struct platform_device *pdev)
                goto disable_sysclk;
        }
 
+       clk_rate = clk_get_rate(pwm->pwm_clk);
+
+       /* The maximum input clock divider is 512 */
+       val = (u64)NSEC_PER_SEC * 512 * pwm->data->max_timebase;
+       do_div(val, clk_rate);
+       pwm->max_period_ns = val;
+
+       val = (u64)NSEC_PER_SEC * MIN_TMBASE_STEPS;
+       do_div(val, clk_rate);
+       pwm->min_period_ns = val;
+
        pwm->chip.dev = &pdev->dev;
        pwm->chip.ops = &img_pwm_ops;
        pwm->chip.base = -1;
@@ -228,12 +286,6 @@ static int img_pwm_remove(struct platform_device *pdev)
        return pwmchip_remove(&pwm_chip->chip);
 }
 
-static const struct of_device_id img_pwm_of_match[] = {
-       { .compatible = "img,pistachio-pwm", },
-       { }
-};
-MODULE_DEVICE_TABLE(of, img_pwm_of_match);
-
 static struct platform_driver img_pwm_driver = {
        .driver = {
                .name = "img-pwm",
index 8a4df7a..e628d4c 100644 (file)
@@ -394,6 +394,7 @@ static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
 
 static int da9052_regulator_probe(struct platform_device *pdev)
 {
+       const struct mfd_cell *cell = mfd_get_cell(pdev);
        struct regulator_config config = { };
        struct da9052_regulator *regulator;
        struct da9052 *da9052;
@@ -409,7 +410,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
        regulator->da9052 = da9052;
 
        regulator->info = find_regulator_info(regulator->da9052->chip_id,
-                                             pdev->id);
+                                             cell->id);
        if (regulator->info == NULL) {
                dev_err(&pdev->dev, "invalid regulator ID specified\n");
                return -EINVAL;
@@ -419,7 +420,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
        config.driver_data = regulator;
        config.regmap = da9052->regmap;
        if (pdata && pdata->regulators) {
-               config.init_data = pdata->regulators[pdev->id];
+               config.init_data = pdata->regulators[cell->id];
        } else {
 #ifdef CONFIG_OF
                struct device_node *nproot = da9052->dev->of_node;
index f0b9871..3ba6114 100644 (file)
@@ -1158,11 +1158,12 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
        poll_timeout = time;
        hr_time = ktime_set(0, poll_timeout);
 
-       if (!hrtimer_is_queued(&ap_poll_timer) ||
-           !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
-               hrtimer_set_expires(&ap_poll_timer, hr_time);
-               hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
-       }
+       spin_lock_bh(&ap_poll_timer_lock);
+       hrtimer_cancel(&ap_poll_timer);
+       hrtimer_set_expires(&ap_poll_timer, hr_time);
+       hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
+       spin_unlock_bh(&ap_poll_timer_lock);
+
        return count;
 }
 
@@ -1528,14 +1529,11 @@ static inline void __ap_schedule_poll_timer(void)
        ktime_t hr_time;
 
        spin_lock_bh(&ap_poll_timer_lock);
-       if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
-               goto out;
-       if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
+       if (!hrtimer_is_queued(&ap_poll_timer) && !ap_suspend_flag) {
                hr_time = ktime_set(0, poll_timeout);
                hrtimer_forward_now(&ap_poll_timer, hr_time);
                hrtimer_restart(&ap_poll_timer);
        }
-out:
        spin_unlock_bh(&ap_poll_timer_lock);
 }
 
@@ -1952,7 +1950,7 @@ static void ap_reset_domain(void)
 {
        int i;
 
-       if (ap_domain_index != -1)
+       if ((ap_domain_index != -1) && (ap_test_config_domain(ap_domain_index)))
                for (i = 0; i < AP_DEVICES; i++)
                        ap_reset_queue(AP_MKQID(i, ap_domain_index));
 }
@@ -2097,7 +2095,6 @@ void ap_module_exit(void)
        hrtimer_cancel(&ap_poll_timer);
        destroy_workqueue(ap_work_queue);
        tasklet_kill(&ap_tasklet);
-       root_device_unregister(ap_root_device);
        while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
                    __ap_match_all)))
        {
@@ -2106,6 +2103,7 @@ void ap_module_exit(void)
        }
        for (i = 0; ap_bus_attrs[i]; i++)
                bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
+       root_device_unregister(ap_root_device);
        bus_unregister(&ap_bus_type);
        unregister_reset_call(&ap_reset_call);
        if (ap_using_interrupts())
index 81e83a6..3207009 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -8,9 +8,9 @@
  * Public License is included in this distribution in the file called COPYING.
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index 1028760..447cf7c 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -8,9 +8,9 @@
  * Public License is included in this distribution in the file called COPYING.
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index 9889743..f11d325 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -8,9 +8,9 @@
  * Public License is included in this distribution in the file called COPYING.
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index b7391a3..2f07007 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index e0b3b2d..0c84e1c 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index 923a2b5..1f74760 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
@@ -50,7 +50,7 @@ static unsigned int enable_msix = 1;
 
 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
 MODULE_VERSION(BUILD_STR);
-MODULE_AUTHOR("Emulex Corporation");
+MODULE_AUTHOR("Avago Technologies");
 MODULE_LICENSE("GPL");
 module_param(be_iopoll_budget, int, 0);
 module_param(enable_msix, int, 0);
@@ -552,7 +552,7 @@ MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
 
 static struct scsi_host_template beiscsi_sht = {
        .module = THIS_MODULE,
-       .name = "Emulex 10Gbe open-iscsi Initiator Driver",
+       .name = "Avago Technologies 10Gbe open-iscsi Initiator Driver",
        .proc_name = DRV_NAME,
        .queuecommand = iscsi_queuecommand,
        .change_queue_depth = scsi_change_queue_depth,
index 7ee0ffc..e70ea26 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
@@ -37,7 +37,7 @@
 
 #define DRV_NAME               "be2iscsi"
 #define BUILD_STR              "10.4.114.0"
-#define BE_NAME                        "Emulex OneConnect" \
+#define BE_NAME                        "Avago Technologies OneConnect" \
                                "Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC               BE_NAME " " "Driver"
 
index 681d4e8..c2c4d69 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index bd81446..9356b9a 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index cb73cf9..c140f99 100644 (file)
@@ -1130,25 +1130,6 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 }
 
 /**
- * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
- * @data: A pointer to the immediate command data portion of the IOCB.
- * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
- *
- * The routine copies the entire FCP command from @fcp_cmnd to @data while
- * byte swapping the data to big endian format for transmission on the wire.
- **/
-static void
-lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
-{
-       int i, j;
-
-       for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
-            i += sizeof(uint32_t), j++) {
-               ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
-       }
-}
-
-/**
  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
  * @phba: The Hba for which this call is being executed.
  * @lpfc_cmd: The scsi buffer which is going to be mapped.
@@ -1283,7 +1264,6 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
         * we need to set word 4 of IOCB here
         */
        iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
-       lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
        return 0;
 }
 
@@ -4147,6 +4127,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
 }
 
 /**
+ * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
+ * @data: A pointer to the immediate command data portion of the IOCB.
+ * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ *
+ * The routine copies the entire FCP command from @fcp_cmnd to @data while
+ * byte swapping the data to big endian format for transmission on the wire.
+ **/
+static void
+lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
+{
+       int i, j;
+       for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
+            i += sizeof(uint32_t), j++) {
+               ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
+       }
+}
+
+/**
  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
  * @vport: The virtual port for which this call is being executed.
  * @lpfc_cmd: The scsi command which needs to send.
@@ -4225,6 +4223,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
                fcp_cmnd->fcpCntl3 = 0;
                phba->fc4ControlRequests++;
        }
+       if (phba->sli_rev == 3 &&
+           !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+               lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
        /*
         * Finish initializing those IOCB fields that are independent
         * of the scsi_cmnd request_buffer
index 68c2002..5c9e680 100644 (file)
@@ -1020,8 +1020,7 @@ static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
        struct se_portal_group *se_tpg = &base_tpg->se_tpg;
        struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
 
-       if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
-                                 &se_tpg->tpg_group.cg_item)) {
+       if (!target_depend_item(&se_tpg->tpg_group.cg_item)) {
                atomic_set(&base_tpg->lport_tpg_enabled, 1);
                qlt_enable_vha(base_vha);
        }
@@ -1037,8 +1036,7 @@ static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
 
        if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
                atomic_set(&base_tpg->lport_tpg_enabled, 0);
-               configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
-                                      &se_tpg->tpg_group.cg_item);
+               target_undepend_item(&se_tpg->tpg_group.cg_item);
        }
        complete(&base_tpg->tpg_base_comp);
 }
index 79beebf..7f9d65f 100644 (file)
@@ -1600,6 +1600,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
 {
        u64 start_lba = blk_rq_pos(scmd->request);
        u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
+       u64 factor = scmd->device->sector_size / 512;
        u64 bad_lba;
        int info_valid;
        /*
@@ -1621,16 +1622,9 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
        if (scsi_bufflen(scmd) <= scmd->device->sector_size)
                return 0;
 
-       if (scmd->device->sector_size < 512) {
-               /* only legitimate sector_size here is 256 */
-               start_lba <<= 1;
-               end_lba <<= 1;
-       } else {
-               /* be careful ... don't want any overflows */
-               unsigned int factor = scmd->device->sector_size / 512;
-               do_div(start_lba, factor);
-               do_div(end_lba, factor);
-       }
+       /* be careful ... don't want any overflows */
+       do_div(start_lba, factor);
+       do_div(end_lba, factor);
 
        /* The bad lba was reported incorrectly, we have no idea where
         * the error is.
@@ -2188,8 +2182,7 @@ got_data:
        if (sector_size != 512 &&
            sector_size != 1024 &&
            sector_size != 2048 &&
-           sector_size != 4096 &&
-           sector_size != 256) {
+           sector_size != 4096) {
                sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
                          sector_size);
                /*
@@ -2244,8 +2237,6 @@ got_data:
                sdkp->capacity <<= 2;
        else if (sector_size == 1024)
                sdkp->capacity <<= 1;
-       else if (sector_size == 256)
-               sdkp->capacity >>= 1;
 
        blk_queue_physical_block_size(sdp->request_queue,
                                      sdkp->physical_block_size);
index d9dad90..3c6584f 100644 (file)
@@ -1600,8 +1600,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
                break;
        default:
                vm_srb->data_in = UNKNOWN_TYPE;
-               vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN |
-                                                    SRB_FLAGS_DATA_OUT);
+               vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
                break;
        }
 
index 15a7ee3..5fe1c22 100644 (file)
@@ -359,12 +359,13 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
 
        /*
         * Accessing PCI config without a proper delay after devices reset (not
-        * GPIO reset) was causing reboots on WRT300N v1.0.
+        * GPIO reset) was causing reboots on WRT300N v1.0 (BCM4704).
         * Tested delay 850 us lowered reboot chance to 50-80%, 1000 us fixed it
         * completely. Flushing all writes was also tested but with no luck.
+        * The same problem was reported for WRT350N v1 (BCM4705), so we just
+        * sleep here unconditionally.
         */
-       if (pc->dev->bus->chip_id == 0x4704)
-               usleep_range(1000, 2000);
+       usleep_range(1000, 2000);
 
        /* Enable PCI bridge BAR0 prefetch and burst */
        val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
index 34871a6..74e6114 100644 (file)
@@ -230,7 +230,7 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
         * Here we serialize access across the TIQN+TPG Tuple.
         */
        ret = down_interruptible(&tpg->np_login_sem);
-       if ((ret != 0) || signal_pending(current))
+       if (ret != 0)
                return -1;
 
        spin_lock_bh(&tpg->tpg_state_lock);
index 8ce94ff..70d799d 100644 (file)
@@ -346,6 +346,7 @@ static int iscsi_login_zero_tsih_s1(
        if (IS_ERR(sess->se_sess)) {
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
+               kfree(sess->sess_ops);
                kfree(sess);
                return -ENOMEM;
        }
index e8a2408..5e3295f 100644 (file)
@@ -161,10 +161,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
 int iscsit_get_tpg(
        struct iscsi_portal_group *tpg)
 {
-       int ret;
-
-       ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
-       return ((ret != 0) || signal_pending(current)) ? -1 : 0;
+       return mutex_lock_interruptible(&tpg->tpg_access_lock);
 }
 
 void iscsit_put_tpg(struct iscsi_portal_group *tpg)
index 75cbde1..4f8d4d4 100644 (file)
@@ -704,7 +704,7 @@ target_alua_state_check(struct se_cmd *cmd)
 
        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
                return 0;
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        if (!port)
@@ -2377,7 +2377,7 @@ ssize_t core_alua_store_secondary_write_metadata(
 
 int core_setup_alua(struct se_device *dev)
 {
-       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
                struct t10_alua_lu_gp_member *lu_gp_mem;
 
index ddaf76a..e7b0430 100644 (file)
@@ -212,10 +212,6 @@ static struct config_group *target_core_register_fabric(
 
        pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
                        " %s\n", tf->tf_group.cg_item.ci_name);
-       /*
-        * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
-        */
-       tf->tf_ops.tf_subsys = tf->tf_subsys;
        tf->tf_fabric = &tf->tf_group.cg_item;
        pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
                        " for %s\n", name);
@@ -291,10 +287,17 @@ static struct configfs_subsystem target_core_fabrics = {
        },
 };
 
-struct configfs_subsystem *target_core_subsystem[] = {
-       &target_core_fabrics,
-       NULL,
-};
+int target_depend_item(struct config_item *item)
+{
+       return configfs_depend_item(&target_core_fabrics, item);
+}
+EXPORT_SYMBOL(target_depend_item);
+
+void target_undepend_item(struct config_item *item)
+{
+       return configfs_undepend_item(&target_core_fabrics, item);
+}
+EXPORT_SYMBOL(target_undepend_item);
 
 /*##############################################################################
 // Start functions called by external Target Fabrics Modules
@@ -467,7 +470,6 @@ int target_register_template(const struct target_core_fabric_ops *fo)
         * struct target_fabric_configfs->tf_cit_tmpl
         */
        tf->tf_module = fo->module;
-       tf->tf_subsys = target_core_subsystem[0];
        snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name);
 
        tf->tf_ops = *fo;
@@ -809,7 +811,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
 {
        int ret;
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return sprintf(page, "Passthrough\n");
 
        spin_lock(&dev->dev_reservation_lock);
@@ -960,7 +962,7 @@ SE_DEV_PR_ATTR_RO(res_pr_type);
 static ssize_t target_core_dev_pr_show_attr_res_type(
                struct se_device *dev, char *page)
 {
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return sprintf(page, "SPC_PASSTHROUGH\n");
        else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
                return sprintf(page, "SPC2_RESERVATIONS\n");
@@ -973,7 +975,7 @@ SE_DEV_PR_ATTR_RO(res_type);
 static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
                struct se_device *dev, char *page)
 {
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        return sprintf(page, "APTPL Bit Status: %s\n",
@@ -988,7 +990,7 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active);
 static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
                struct se_device *dev, char *page)
 {
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1035,7 +1037,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
        u16 port_rpti = 0, tpgt = 0;
        u8 type = 0, scope;
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
        if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
                return 0;
@@ -2870,7 +2872,7 @@ static int __init target_core_init_configfs(void)
 {
        struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
        struct config_group *lu_gp_cg = NULL;
-       struct configfs_subsystem *subsys;
+       struct configfs_subsystem *subsys = &target_core_fabrics;
        struct t10_alua_lu_gp *lu_gp;
        int ret;
 
@@ -2878,7 +2880,6 @@ static int __init target_core_init_configfs(void)
                " Engine: %s on %s/%s on "UTS_RELEASE"\n",
                TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
 
-       subsys = target_core_subsystem[0];
        config_group_init(&subsys->su_group);
        mutex_init(&subsys->su_mutex);
 
@@ -3008,13 +3009,10 @@ out_global:
 
 static void __exit target_core_exit_configfs(void)
 {
-       struct configfs_subsystem *subsys;
        struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
        struct config_item *item;
        int i;
 
-       subsys = target_core_subsystem[0];
-
        lu_gp_cg = &alua_lu_gps_group;
        for (i = 0; lu_gp_cg->default_groups[i]; i++) {
                item = &lu_gp_cg->default_groups[i]->cg_item;
@@ -3045,8 +3043,8 @@ static void __exit target_core_exit_configfs(void)
         * We expect subsys->su_group.default_groups to be released
         * by configfs subsystem provider logic..
         */
-       configfs_unregister_subsystem(subsys);
-       kfree(subsys->su_group.default_groups);
+       configfs_unregister_subsystem(&target_core_fabrics);
+       kfree(target_core_fabrics.su_group.default_groups);
 
        core_alua_free_lu_gp(default_lu_gp);
        default_lu_gp = NULL;
index 7faa6ae..ce5f768 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/kthread.h>
 #include <linux/in.h>
 #include <linux/export.h>
+#include <asm/unaligned.h>
 #include <net/sock.h>
 #include <net/tcp.h>
 #include <scsi/scsi.h>
@@ -527,7 +528,7 @@ static void core_export_port(
        list_add_tail(&port->sep_list, &dev->dev_sep_list);
        spin_unlock(&dev->se_port_lock);
 
-       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
                tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
                if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
@@ -1603,7 +1604,7 @@ int target_configure_device(struct se_device *dev)
         * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
         * passthrough because this is being provided by the backend LLD.
         */
-       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
                strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
                strncpy(&dev->t10_wwn.model[0],
                        dev->transport->inquiry_prod, 16);
@@ -1707,3 +1708,76 @@ void core_dev_release_virtual_lun0(void)
                target_free_device(g_lun0_dev);
        core_delete_hba(hba);
 }
+
+/*
+ * Common CDB parsing for kernel and user passthrough.
+ */
+sense_reason_t
+passthrough_parse_cdb(struct se_cmd *cmd,
+       sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
+{
+       unsigned char *cdb = cmd->t_task_cdb;
+
+       /*
+        * Clear a lun set in the cdb if the initiator talking to use spoke
+        * and old standards version, as we can't assume the underlying device
+        * won't choke up on it.
+        */
+       switch (cdb[0]) {
+       case READ_10: /* SBC - RDProtect */
+       case READ_12: /* SBC - RDProtect */
+       case READ_16: /* SBC - RDProtect */
+       case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+       case VERIFY: /* SBC - VRProtect */
+       case VERIFY_16: /* SBC - VRProtect */
+       case WRITE_VERIFY: /* SBC - VRProtect */
+       case WRITE_VERIFY_12: /* SBC - VRProtect */
+       case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
+               break;
+       default:
+               cdb[1] &= 0x1f; /* clear logical unit number */
+               break;
+       }
+
+       /*
+        * For REPORT LUNS we always need to emulate the response, for everything
+        * else, pass it up.
+        */
+       if (cdb[0] == REPORT_LUNS) {
+               cmd->execute_cmd = spc_emulate_report_luns;
+               return TCM_NO_SENSE;
+       }
+
+       /* Set DATA_CDB flag for ops that should have it */
+       switch (cdb[0]) {
+       case READ_6:
+       case READ_10:
+       case READ_12:
+       case READ_16:
+       case WRITE_6:
+       case WRITE_10:
+       case WRITE_12:
+       case WRITE_16:
+       case WRITE_VERIFY:
+       case WRITE_VERIFY_12:
+       case 0x8e: /* WRITE_VERIFY_16 */
+       case COMPARE_AND_WRITE:
+       case XDWRITEREAD_10:
+               cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+               break;
+       case VARIABLE_LENGTH_CMD:
+               switch (get_unaligned_be16(&cdb[8])) {
+               case READ_32:
+               case WRITE_32:
+               case 0x0c: /* WRITE_VERIFY_32 */
+               case XDWRITEREAD_32:
+                       cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+                       break;
+               }
+       }
+
+       cmd->execute_cmd = exec_cmd;
+
+       return TCM_NO_SENSE;
+}
+EXPORT_SYMBOL(passthrough_parse_cdb);
index f7e6e51..3f27bfd 100644 (file)
@@ -958,7 +958,6 @@ static struct se_subsystem_api fileio_template = {
        .inquiry_prod           = "FILEIO",
        .inquiry_rev            = FD_VERSION,
        .owner                  = THIS_MODULE,
-       .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
        .attach_hba             = fd_attach_hba,
        .detach_hba             = fd_detach_hba,
        .alloc_device           = fd_alloc_device,
index 1b7947c..8c96568 100644 (file)
@@ -904,7 +904,6 @@ static struct se_subsystem_api iblock_template = {
        .inquiry_prod           = "IBLOCK",
        .inquiry_rev            = IBLOCK_VERSION,
        .owner                  = THIS_MODULE,
-       .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
        .attach_hba             = iblock_attach_hba,
        .detach_hba             = iblock_detach_hba,
        .alloc_device           = iblock_alloc_device,
index 874a9bc..68bd7f5 100644 (file)
@@ -4,9 +4,6 @@
 /* target_core_alua.c */
 extern struct t10_alua_lu_gp *default_lu_gp;
 
-/* target_core_configfs.c */
-extern struct configfs_subsystem *target_core_subsystem[];
-
 /* target_core_device.c */
 extern struct mutex g_device_mutex;
 extern struct list_head g_device_list;
index c1aa965..a15411c 100644 (file)
@@ -1367,41 +1367,26 @@ void core_scsi3_free_all_registrations(
 
 static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
 {
-       return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &tpg->tpg_group.cg_item);
+       return target_depend_item(&tpg->tpg_group.cg_item);
 }
 
 static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
 {
-       configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &tpg->tpg_group.cg_item);
-
+       target_undepend_item(&tpg->tpg_group.cg_item);
        atomic_dec_mb(&tpg->tpg_pr_ref_count);
 }
 
 static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
 {
-       struct se_portal_group *tpg = nacl->se_tpg;
-
        if (nacl->dynamic_node_acl)
                return 0;
-
-       return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &nacl->acl_group.cg_item);
+       return target_depend_item(&nacl->acl_group.cg_item);
 }
 
 static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
 {
-       struct se_portal_group *tpg = nacl->se_tpg;
-
-       if (nacl->dynamic_node_acl) {
-               atomic_dec_mb(&nacl->acl_pr_ref_count);
-               return;
-       }
-
-       configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &nacl->acl_group.cg_item);
-
+       if (!nacl->dynamic_node_acl)
+               target_undepend_item(&nacl->acl_group.cg_item);
        atomic_dec_mb(&nacl->acl_pr_ref_count);
 }
 
@@ -1419,8 +1404,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
        nacl = lun_acl->se_lun_nacl;
        tpg = nacl->se_tpg;
 
-       return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &lun_acl->se_lun_group.cg_item);
+       return target_depend_item(&lun_acl->se_lun_group.cg_item);
 }
 
 static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
@@ -1438,9 +1422,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
        nacl = lun_acl->se_lun_nacl;
        tpg = nacl->se_tpg;
 
-       configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &lun_acl->se_lun_group.cg_item);
-
+       target_undepend_item(&lun_acl->se_lun_group.cg_item);
        atomic_dec_mb(&se_deve->pr_ref_count);
 }
 
@@ -4111,7 +4093,7 @@ target_check_reservation(struct se_cmd *cmd)
                return 0;
        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
                return 0;
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        spin_lock(&dev->dev_reservation_lock);
index f6c954c..ecc5eae 100644 (file)
@@ -521,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev)
                                        " pdv_host_id: %d\n", pdv->pdv_host_id);
                                return -EINVAL;
                        }
+                       pdv->pdv_lld_host = sh;
                }
        } else {
                if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
@@ -603,6 +604,8 @@ static void pscsi_free_device(struct se_device *dev)
                if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
                    (phv->phv_lld_host != NULL))
                        scsi_host_put(phv->phv_lld_host);
+               else if (pdv->pdv_lld_host)
+                       scsi_host_put(pdv->pdv_lld_host);
 
                if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
                        scsi_device_put(sd);
@@ -970,64 +973,13 @@ fail:
        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 }
 
-/*
- * Clear a lun set in the cdb if the initiator talking to use spoke
- * and old standards version, as we can't assume the underlying device
- * won't choke up on it.
- */
-static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
-{
-       switch (cdb[0]) {
-       case READ_10: /* SBC - RDProtect */
-       case READ_12: /* SBC - RDProtect */
-       case READ_16: /* SBC - RDProtect */
-       case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
-       case VERIFY: /* SBC - VRProtect */
-       case VERIFY_16: /* SBC - VRProtect */
-       case WRITE_VERIFY: /* SBC - VRProtect */
-       case WRITE_VERIFY_12: /* SBC - VRProtect */
-       case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
-               break;
-       default:
-               cdb[1] &= 0x1f; /* clear logical unit number */
-               break;
-       }
-}
-
 static sense_reason_t
 pscsi_parse_cdb(struct se_cmd *cmd)
 {
-       unsigned char *cdb = cmd->t_task_cdb;
-
        if (cmd->se_cmd_flags & SCF_BIDI)
                return TCM_UNSUPPORTED_SCSI_OPCODE;
 
-       pscsi_clear_cdb_lun(cdb);
-
-       /*
-        * For REPORT LUNS we always need to emulate the response, for everything
-        * else the default for pSCSI is to pass the command to the underlying
-        * LLD / physical hardware.
-        */
-       switch (cdb[0]) {
-       case REPORT_LUNS:
-               cmd->execute_cmd = spc_emulate_report_luns;
-               return 0;
-       case READ_6:
-       case READ_10:
-       case READ_12:
-       case READ_16:
-       case WRITE_6:
-       case WRITE_10:
-       case WRITE_12:
-       case WRITE_16:
-       case WRITE_VERIFY:
-               cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-               /* FALLTHROUGH*/
-       default:
-               cmd->execute_cmd = pscsi_execute_cmd;
-               return 0;
-       }
+       return passthrough_parse_cdb(cmd, pscsi_execute_cmd);
 }
 
 static sense_reason_t
@@ -1189,7 +1141,7 @@ static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
 static struct se_subsystem_api pscsi_template = {
        .name                   = "pscsi",
        .owner                  = THIS_MODULE,
-       .transport_type         = TRANSPORT_PLUGIN_PHBA_PDEV,
+       .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
        .attach_hba             = pscsi_attach_hba,
        .detach_hba             = pscsi_detach_hba,
        .pmode_enable_hba       = pscsi_pmode_enable_hba,
index 1bd757d..820d305 100644 (file)
@@ -45,6 +45,7 @@ struct pscsi_dev_virt {
        int     pdv_lun_id;
        struct block_device *pdv_bd;
        struct scsi_device *pdv_sd;
+       struct Scsi_Host *pdv_lld_host;
 } ____cacheline_aligned;
 
 typedef enum phv_modes {
index a263bf5..d16489b 100644 (file)
@@ -733,7 +733,6 @@ static struct se_subsystem_api rd_mcp_template = {
        .name                   = "rd_mcp",
        .inquiry_prod           = "RAMDISK-MCP",
        .inquiry_rev            = RD_MCP_VERSION,
-       .transport_type         = TRANSPORT_PLUGIN_VHBA_VDEV,
        .attach_hba             = rd_attach_hba,
        .detach_hba             = rd_detach_hba,
        .alloc_device           = rd_alloc_device,
index 8855781..733824e 100644 (file)
@@ -568,7 +568,7 @@ sbc_compare_and_write(struct se_cmd *cmd)
         * comparision using SGLs at cmd->t_bidi_data_sg..
         */
        rc = down_interruptible(&dev->caw_sem);
-       if ((rc != 0) || signal_pending(current)) {
+       if (rc != 0) {
                cmd->transport_complete_callback = NULL;
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
index 3fe5cb2..675f2d9 100644 (file)
@@ -1196,7 +1196,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
         * Check if SAM Task Attribute emulation is enabled for this
         * struct se_device storage object
         */
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        if (cmd->sam_task_attr == TCM_ACA_TAG) {
@@ -1770,7 +1770,7 @@ static int target_write_prot_action(struct se_cmd *cmd)
                                                   sectors, 0, NULL, 0);
                if (unlikely(cmd->pi_err)) {
                        spin_lock_irq(&cmd->t_state_lock);
-                       cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT;
+                       cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
                        spin_unlock_irq(&cmd->t_state_lock);
                        transport_generic_request_failure(cmd, cmd->pi_err);
                        return -1;
@@ -1787,7 +1787,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return false;
 
        /*
@@ -1868,7 +1868,7 @@ void target_execute_cmd(struct se_cmd *cmd)
 
        if (target_handle_task_attr(cmd)) {
                spin_lock_irq(&cmd->t_state_lock);
-               cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT;
+               cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
                spin_unlock_irq(&cmd->t_state_lock);
                return;
        }
@@ -1912,7 +1912,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return;
 
        if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
@@ -1957,8 +1957,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
        case DMA_TO_DEVICE:
                if (cmd->se_cmd_flags & SCF_BIDI) {
                        ret = cmd->se_tfo->queue_data_in(cmd);
-                       if (ret < 0)
-                               break;
+                       break;
                }
                /* Fall through for DMA_TO_DEVICE */
        case DMA_NONE:
index dbc872a..07d2996 100644 (file)
@@ -71,13 +71,6 @@ struct tcmu_hba {
        u32 host_id;
 };
 
-/* User wants all cmds or just some */
-enum passthru_level {
-       TCMU_PASS_ALL = 0,
-       TCMU_PASS_IO,
-       TCMU_PASS_INVALID,
-};
-
 #define TCMU_CONFIG_LEN 256
 
 struct tcmu_dev {
@@ -89,7 +82,6 @@ struct tcmu_dev {
 #define TCMU_DEV_BIT_OPEN 0
 #define TCMU_DEV_BIT_BROKEN 1
        unsigned long flags;
-       enum passthru_level pass_level;
 
        struct uio_info uio_info;
 
@@ -683,8 +675,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
        setup_timer(&udev->timeout, tcmu_device_timedout,
                (unsigned long)udev);
 
-       udev->pass_level = TCMU_PASS_ALL;
-
        return &udev->se_dev;
 }
 
@@ -948,13 +938,13 @@ static void tcmu_free_device(struct se_device *dev)
 }
 
 enum {
-       Opt_dev_config, Opt_dev_size, Opt_err, Opt_pass_level,
+       Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
 };
 
 static match_table_t tokens = {
        {Opt_dev_config, "dev_config=%s"},
        {Opt_dev_size, "dev_size=%u"},
-       {Opt_pass_level, "pass_level=%u"},
+       {Opt_hw_block_size, "hw_block_size=%u"},
        {Opt_err, NULL}
 };
 
@@ -965,7 +955,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
        char *orig, *ptr, *opts, *arg_p;
        substring_t args[MAX_OPT_ARGS];
        int ret = 0, token;
-       int arg;
+       unsigned long tmp_ul;
 
        opts = kstrdup(page, GFP_KERNEL);
        if (!opts)
@@ -998,15 +988,23 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
                        if (ret < 0)
                                pr_err("kstrtoul() failed for dev_size=\n");
                        break;
-               case Opt_pass_level:
-                       match_int(args, &arg);
-                       if (arg >= TCMU_PASS_INVALID) {
-                               pr_warn("TCMU: Invalid pass_level: %d\n", arg);
+               case Opt_hw_block_size:
+                       arg_p = match_strdup(&args[0]);
+                       if (!arg_p) {
+                               ret = -ENOMEM;
                                break;
                        }
-
-                       pr_debug("TCMU: Setting pass_level to %d\n", arg);
-                       udev->pass_level = arg;
+                       ret = kstrtoul(arg_p, 0, &tmp_ul);
+                       kfree(arg_p);
+                       if (ret < 0) {
+                               pr_err("kstrtoul() failed for hw_block_size=\n");
+                               break;
+                       }
+                       if (!tmp_ul) {
+                               pr_err("hw_block_size must be nonzero\n");
+                               break;
+                       }
+                       dev->dev_attrib.hw_block_size = tmp_ul;
                        break;
                default:
                        break;
@@ -1024,8 +1022,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
 
        bl = sprintf(b + bl, "Config: %s ",
                     udev->dev_config[0] ? udev->dev_config : "NULL");
-       bl += sprintf(b + bl, "Size: %zu PassLevel: %u\n",
-                     udev->dev_size, udev->pass_level);
+       bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
 
        return bl;
 }
@@ -1039,20 +1036,6 @@ static sector_t tcmu_get_blocks(struct se_device *dev)
 }
 
 static sense_reason_t
-tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents,
-               enum dma_data_direction data_direction)
-{
-       int ret;
-
-       ret = tcmu_queue_cmd(se_cmd);
-
-       if (ret != 0)
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-       else
-               return TCM_NO_SENSE;
-}
-
-static sense_reason_t
 tcmu_pass_op(struct se_cmd *se_cmd)
 {
        int ret = tcmu_queue_cmd(se_cmd);
@@ -1063,91 +1046,29 @@ tcmu_pass_op(struct se_cmd *se_cmd)
                return TCM_NO_SENSE;
 }
 
-static struct sbc_ops tcmu_sbc_ops = {
-       .execute_rw = tcmu_execute_rw,
-       .execute_sync_cache     = tcmu_pass_op,
-       .execute_write_same     = tcmu_pass_op,
-       .execute_write_same_unmap = tcmu_pass_op,
-       .execute_unmap          = tcmu_pass_op,
-};
-
 static sense_reason_t
 tcmu_parse_cdb(struct se_cmd *cmd)
 {
-       unsigned char *cdb = cmd->t_task_cdb;
-       struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev);
-       sense_reason_t ret;
-
-       switch (udev->pass_level) {
-       case TCMU_PASS_ALL:
-               /* We're just like pscsi, then */
-               /*
-                * For REPORT LUNS we always need to emulate the response, for everything
-                * else, pass it up.
-                */
-               switch (cdb[0]) {
-               case REPORT_LUNS:
-                       cmd->execute_cmd = spc_emulate_report_luns;
-                       break;
-               case READ_6:
-               case READ_10:
-               case READ_12:
-               case READ_16:
-               case WRITE_6:
-               case WRITE_10:
-               case WRITE_12:
-               case WRITE_16:
-               case WRITE_VERIFY:
-                       cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-                       /* FALLTHROUGH */
-               default:
-                       cmd->execute_cmd = tcmu_pass_op;
-               }
-               ret = TCM_NO_SENSE;
-               break;
-       case TCMU_PASS_IO:
-               ret = sbc_parse_cdb(cmd, &tcmu_sbc_ops);
-               break;
-       default:
-               pr_err("Unknown tcm-user pass level %d\n", udev->pass_level);
-               ret = TCM_CHECK_CONDITION_ABORT_CMD;
-       }
-
-       return ret;
+       return passthrough_parse_cdb(cmd, tcmu_pass_op);
 }
 
-DEF_TB_DEFAULT_ATTRIBS(tcmu);
+DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type);
+TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type);
+
+DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size);
+TB_DEV_ATTR_RO(tcmu, hw_block_size);
+
+DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors);
+TB_DEV_ATTR_RO(tcmu, hw_max_sectors);
+
+DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth);
+TB_DEV_ATTR_RO(tcmu, hw_queue_depth);
 
 static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
-       &tcmu_dev_attrib_emulate_model_alias.attr,
-       &tcmu_dev_attrib_emulate_dpo.attr,
-       &tcmu_dev_attrib_emulate_fua_write.attr,
-       &tcmu_dev_attrib_emulate_fua_read.attr,
-       &tcmu_dev_attrib_emulate_write_cache.attr,
-       &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr,
-       &tcmu_dev_attrib_emulate_tas.attr,
-       &tcmu_dev_attrib_emulate_tpu.attr,
-       &tcmu_dev_attrib_emulate_tpws.attr,
-       &tcmu_dev_attrib_emulate_caw.attr,
-       &tcmu_dev_attrib_emulate_3pc.attr,
-       &tcmu_dev_attrib_pi_prot_type.attr,
        &tcmu_dev_attrib_hw_pi_prot_type.attr,
-       &tcmu_dev_attrib_pi_prot_format.attr,
-       &tcmu_dev_attrib_enforce_pr_isids.attr,
-       &tcmu_dev_attrib_is_nonrot.attr,
-       &tcmu_dev_attrib_emulate_rest_reord.attr,
-       &tcmu_dev_attrib_force_pr_aptpl.attr,
        &tcmu_dev_attrib_hw_block_size.attr,
-       &tcmu_dev_attrib_block_size.attr,
        &tcmu_dev_attrib_hw_max_sectors.attr,
-       &tcmu_dev_attrib_optimal_sectors.attr,
        &tcmu_dev_attrib_hw_queue_depth.attr,
-       &tcmu_dev_attrib_queue_depth.attr,
-       &tcmu_dev_attrib_max_unmap_lba_count.attr,
-       &tcmu_dev_attrib_max_unmap_block_desc_count.attr,
-       &tcmu_dev_attrib_unmap_granularity.attr,
-       &tcmu_dev_attrib_unmap_granularity_alignment.attr,
-       &tcmu_dev_attrib_max_write_same_len.attr,
        NULL,
 };
 
@@ -1156,7 +1077,7 @@ static struct se_subsystem_api tcmu_template = {
        .inquiry_prod           = "USER",
        .inquiry_rev            = TCMU_VERSION,
        .owner                  = THIS_MODULE,
-       .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
+       .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
        .attach_hba             = tcmu_attach_hba,
        .detach_hba             = tcmu_detach_hba,
        .alloc_device           = tcmu_alloc_device,
index a600ff1..8fd680a 100644 (file)
@@ -58,7 +58,6 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
                                        bool src)
 {
        struct se_device *se_dev;
-       struct configfs_subsystem *subsys = target_core_subsystem[0];
        unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
        int rc;
 
@@ -90,8 +89,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
                                " se_dev\n", xop->src_dev);
                }
 
-               rc = configfs_depend_item(subsys,
-                               &se_dev->dev_group.cg_item);
+               rc = target_depend_item(&se_dev->dev_group.cg_item);
                if (rc != 0) {
                        pr_err("configfs_depend_item attempt failed:"
                                " %d for se_dev: %p\n", rc, se_dev);
@@ -99,8 +97,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
                        return rc;
                }
 
-               pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p"
-                       " se_dev->se_dev_group: %p\n", subsys, se_dev,
+               pr_debug("Called configfs_depend_item for se_dev: %p"
+                       " se_dev->se_dev_group: %p\n", se_dev,
                        &se_dev->dev_group);
 
                mutex_unlock(&g_device_mutex);
@@ -373,7 +371,6 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
 
 static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
 {
-       struct configfs_subsystem *subsys = target_core_subsystem[0];
        struct se_device *remote_dev;
 
        if (xop->op_origin == XCOL_SOURCE_RECV_OP)
@@ -381,11 +378,11 @@ static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
        else
                remote_dev = xop->src_dev;
 
-       pr_debug("Calling configfs_undepend_item for subsys: %p"
+       pr_debug("Calling configfs_undepend_item for"
                  " remote_dev: %p remote_dev->dev_group: %p\n",
-                 subsys, remote_dev, &remote_dev->dev_group.cg_item);
+                 remote_dev, &remote_dev->dev_group.cg_item);
 
-       configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item);
+       target_undepend_item(&remote_dev->dev_group.cg_item);
 }
 
 static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
index c2556cf..01255fd 100644 (file)
@@ -224,9 +224,9 @@ static const struct armada_thermal_data armada380_data = {
        .is_valid_shift = 10,
        .temp_shift = 0,
        .temp_mask = 0x3ff,
-       .coef_b = 1169498786UL,
-       .coef_m = 2000000UL,
-       .coef_div = 4289,
+       .coef_b = 2931108200UL,
+       .coef_m = 5000000UL,
+       .coef_div = 10502,
        .inverted = true,
 };
 
index a492927..58b5c66 100644 (file)
@@ -420,7 +420,8 @@ const struct ti_bandgap_data dra752_data = {
                        TI_BANDGAP_FEATURE_FREEZE_BIT |
                        TI_BANDGAP_FEATURE_TALERT |
                        TI_BANDGAP_FEATURE_COUNTER_DELAY |
-                       TI_BANDGAP_FEATURE_HISTORY_BUFFER,
+                       TI_BANDGAP_FEATURE_HISTORY_BUFFER |
+                       TI_BANDGAP_FEATURE_ERRATA_814,
        .fclock_name = "l3instr_ts_gclk_div",
        .div_ck_name = "l3instr_ts_gclk_div",
        .conv_table = dra752_adc_to_temp,
index eff0c80..79ff70c 100644 (file)
@@ -319,7 +319,8 @@ const struct ti_bandgap_data omap5430_data = {
                        TI_BANDGAP_FEATURE_FREEZE_BIT |
                        TI_BANDGAP_FEATURE_TALERT |
                        TI_BANDGAP_FEATURE_COUNTER_DELAY |
-                       TI_BANDGAP_FEATURE_HISTORY_BUFFER,
+                       TI_BANDGAP_FEATURE_HISTORY_BUFFER |
+                       TI_BANDGAP_FEATURE_ERRATA_813,
        .fclock_name = "l3instr_ts_gclk_div",
        .div_ck_name = "l3instr_ts_gclk_div",
        .conv_table = omap5430_adc_to_temp,
index 62a5d44..bc14dc8 100644 (file)
@@ -119,6 +119,37 @@ exit:
 }
 
 /**
+ * ti_errata814_bandgap_read_temp() - helper function to read dra7 sensor temperature
+ * @bgp: pointer to ti_bandgap structure
+ * @reg: desired register (offset) to be read
+ *
+ * Function to read dra7 bandgap sensor temperature. This is done separately
+ * so as to workaround the errata "Bandgap Temperature read Dtemp can be
+ * corrupted" - Errata ID: i814".
+ * Read accesses to registers listed below can be corrupted due to incorrect
+ * resynchronization between clock domains.
+ * Read access to registers below can be corrupted :
+ * CTRL_CORE_DTEMP_MPU/GPU/CORE/DSPEVE/IVA_n (n = 0 to 4)
+ * CTRL_CORE_TEMP_SENSOR_MPU/GPU/CORE/DSPEVE/IVA_n
+ *
+ * Return: the register value.
+ */
+static u32 ti_errata814_bandgap_read_temp(struct ti_bandgap *bgp,  u32 reg)
+{
+       u32 val1, val2;
+
+       val1 = ti_bandgap_readl(bgp, reg);
+       val2 = ti_bandgap_readl(bgp, reg);
+
+       /* If both times we read the same value then that is right */
+       if (val1 == val2)
+               return val1;
+
+       /* if val1 and val2 are different read it third time */
+       return ti_bandgap_readl(bgp, reg);
+}
+
+/**
  * ti_bandgap_read_temp() - helper function to read sensor temperature
  * @bgp: pointer to ti_bandgap structure
  * @id: bandgap sensor id
@@ -148,7 +179,11 @@ static u32 ti_bandgap_read_temp(struct ti_bandgap *bgp, int id)
        }
 
        /* read temperature */
-       temp = ti_bandgap_readl(bgp, reg);
+       if (TI_BANDGAP_HAS(bgp, ERRATA_814))
+               temp = ti_errata814_bandgap_read_temp(bgp, reg);
+       else
+               temp = ti_bandgap_readl(bgp, reg);
+
        temp &= tsr->bgap_dtemp_mask;
 
        if (TI_BANDGAP_HAS(bgp, FREEZE_BIT))
@@ -410,7 +445,7 @@ static int ti_bandgap_update_alert_threshold(struct ti_bandgap *bgp, int id,
 {
        struct temp_sensor_data *ts_data = bgp->conf->sensors[id].ts_data;
        struct temp_sensor_registers *tsr;
-       u32 thresh_val, reg_val, t_hot, t_cold;
+       u32 thresh_val, reg_val, t_hot, t_cold, ctrl;
        int err = 0;
 
        tsr = bgp->conf->sensors[id].registers;
@@ -442,8 +477,47 @@ static int ti_bandgap_update_alert_threshold(struct ti_bandgap *bgp, int id,
                  ~(tsr->threshold_thot_mask | tsr->threshold_tcold_mask);
        reg_val |= (t_hot << __ffs(tsr->threshold_thot_mask)) |
                   (t_cold << __ffs(tsr->threshold_tcold_mask));
+
+       /**
+        * Errata i813:
+        * Spurious Thermal Alert: Talert can happen randomly while the device
+        * remains under the temperature limit defined for this event to trig.
+        * This spurious event is caused by a incorrect re-synchronization
+        * between clock domains. The comparison between configured threshold
+        * and current temperature value can happen while the value is
+        * transitioning (metastable), thus causing inappropriate event
+        * generation. No spurious event occurs as long as the threshold value
+        * stays unchanged. Spurious event can be generated while a thermal
+        * alert threshold is modified in
+        * CONTROL_BANDGAP_THRESHOLD_MPU/GPU/CORE/DSPEVE/IVA_n.
+        */
+
+       if (TI_BANDGAP_HAS(bgp, ERRATA_813)) {
+               /* Mask t_hot and t_cold events at the IP Level */
+               ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
+
+               if (hot)
+                       ctrl &= ~tsr->mask_hot_mask;
+               else
+                       ctrl &= ~tsr->mask_cold_mask;
+
+               ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl);
+       }
+
+       /* Write the threshold value */
        ti_bandgap_writel(bgp, reg_val, tsr->bgap_threshold);
 
+       if (TI_BANDGAP_HAS(bgp, ERRATA_813)) {
+               /* Unmask t_hot and t_cold events at the IP Level */
+               ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
+               if (hot)
+                       ctrl |= tsr->mask_hot_mask;
+               else
+                       ctrl |= tsr->mask_cold_mask;
+
+               ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl);
+       }
+
        if (err) {
                dev_err(bgp->dev, "failed to reprogram thot threshold\n");
                err = -EIO;
index b3adf72..0c52f7a 100644 (file)
@@ -318,6 +318,10 @@ struct ti_temp_sensor {
  * TI_BANDGAP_FEATURE_HISTORY_BUFFER - used when the bandgap device features
  *     a history buffer of temperatures.
  *
+ * TI_BANDGAP_FEATURE_ERRATA_814 - used to workaorund when the bandgap device
+ *     has Errata 814
+ * TI_BANDGAP_FEATURE_ERRATA_813 - used to workaorund when the bandgap device
+ *     has Errata 813
  * TI_BANDGAP_HAS(b, f) - macro to check if a bandgap device is capable of a
  *      specific feature (above) or not. Return non-zero, if yes.
  */
@@ -331,6 +335,8 @@ struct ti_temp_sensor {
 #define TI_BANDGAP_FEATURE_FREEZE_BIT          BIT(7)
 #define TI_BANDGAP_FEATURE_COUNTER_DELAY       BIT(8)
 #define TI_BANDGAP_FEATURE_HISTORY_BUFFER      BIT(9)
+#define TI_BANDGAP_FEATURE_ERRATA_814          BIT(10)
+#define TI_BANDGAP_FEATURE_ERRATA_813          BIT(11)
 #define TI_BANDGAP_HAS(b, f)                   \
                        ((b)->conf->features & TI_BANDGAP_FEATURE_ ## f)
 
index 5bab1c6..7a3d146 100644 (file)
@@ -289,7 +289,7 @@ static int xen_initial_domain_console_init(void)
                        return -ENOMEM;
        }
 
-       info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
+       info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
        info->vtermno = HVC_COOKIE;
 
        spin_lock(&xencons_lock);
index 04d9e23..358323c 100644 (file)
@@ -174,13 +174,13 @@ struct mips_ejtag_fdc_tty {
 static inline void mips_ejtag_fdc_write(struct mips_ejtag_fdc_tty *priv,
                                        unsigned int offs, unsigned int data)
 {
-       iowrite32(data, priv->reg + offs);
+       __raw_writel(data, priv->reg + offs);
 }
 
 static inline unsigned int mips_ejtag_fdc_read(struct mips_ejtag_fdc_tty *priv,
                                               unsigned int offs)
 {
-       return ioread32(priv->reg + offs);
+       return __raw_readl(priv->reg + offs);
 }
 
 /* Encoding of byte stream in FDC words */
@@ -347,9 +347,9 @@ static void mips_ejtag_fdc_console_write(struct console *c, const char *s,
                s += inc[word.bytes - 1];
 
                /* Busy wait until there's space in fifo */
-               while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
+               while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
                        ;
-               iowrite32(word.word, regs + REG_FDTX(c->index));
+               __raw_writel(word.word, regs + REG_FDTX(c->index));
        }
 out:
        local_irq_restore(flags);
@@ -1227,7 +1227,7 @@ static int kgdbfdc_read_char(void)
 
                /* Read next word from KGDB channel */
                do {
-                       stat = ioread32(regs + REG_FDSTAT);
+                       stat = __raw_readl(regs + REG_FDSTAT);
 
                        /* No data waiting? */
                        if (stat & REG_FDSTAT_RXE)
@@ -1236,7 +1236,7 @@ static int kgdbfdc_read_char(void)
                        /* Read next word */
                        channel = (stat & REG_FDSTAT_RXCHAN) >>
                                        REG_FDSTAT_RXCHAN_SHIFT;
-                       data = ioread32(regs + REG_FDRX);
+                       data = __raw_readl(regs + REG_FDRX);
                } while (channel != CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN);
 
                /* Decode into rbuf */
@@ -1266,9 +1266,10 @@ static void kgdbfdc_push_one(void)
                return;
 
        /* Busy wait until there's space in fifo */
-       while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
+       while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
                ;
-       iowrite32(word.word, regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN));
+       __raw_writel(word.word,
+                    regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN));
 }
 
 /* flush the whole write buffer to the TX FIFO */
index 5e19bb5..ea32b38 100644 (file)
@@ -1409,8 +1409,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
                         * dependency now.
                         */
                        se_tpg = &tpg->se_tpg;
-                       ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
-                                                  &se_tpg->tpg_group.cg_item);
+                       ret = target_depend_item(&se_tpg->tpg_group.cg_item);
                        if (ret) {
                                pr_warn("configfs_depend_item() failed: %d\n", ret);
                                kfree(vs_tpg);
@@ -1513,8 +1512,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
                 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
                 */
                se_tpg = &tpg->se_tpg;
-               configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
-                                      &se_tpg->tpg_group.cg_item);
+               target_undepend_item(&se_tpg->tpg_group.cg_item);
        }
        if (match) {
                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
index 3a145a6..6897f1c 100644 (file)
@@ -274,6 +274,10 @@ static int pwm_backlight_probe(struct platform_device *pdev)
 
        pb->pwm = devm_pwm_get(&pdev->dev, NULL);
        if (IS_ERR(pb->pwm)) {
+               ret = PTR_ERR(pb->pwm);
+               if (ret == -EPROBE_DEFER)
+                       goto err_alloc;
+
                dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n");
                pb->legacy = true;
                pb->pwm = pwm_request(data->pwm_id, "pwm-backlight");
index 2b8553b..3838795 100644 (file)
@@ -957,7 +957,7 @@ unsigned xen_evtchn_nr_channels(void)
 }
 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
 
-int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
+int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
 {
        struct evtchn_bind_virq bind_virq;
        int evtchn, irq, ret;
@@ -971,8 +971,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
                if (irq < 0)
                        goto out;
 
-               irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
-                                             handle_percpu_irq, "virq");
+               if (percpu)
+                       irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
+                                                     handle_percpu_irq, "virq");
+               else
+                       irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
+                                                     handle_edge_irq, "virq");
 
                bind_virq.virq = virq;
                bind_virq.vcpu = cpu;
@@ -1062,7 +1066,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
 {
        int irq, retval;
 
-       irq = bind_virq_to_irq(virq, cpu);
+       irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
        if (irq < 0)
                return irq;
        retval = request_irq(irq, handler, irqflags, devname, dev_id);
index 241ef68..cd46e41 100644 (file)
@@ -918,7 +918,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
                        total_size = total_mapping_size(elf_phdata,
                                                        loc->elf_ex.e_phnum);
                        if (!total_size) {
-                               error = -EINVAL;
+                               retval = -EINVAL;
                                goto out_free_dentry;
                        }
                }
index 9de772e..614aaa1 100644 (file)
@@ -880,6 +880,8 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
  * indirect refs to their parent bytenr.
  * When roots are found, they're added to the roots list
  *
+ * NOTE: This can return values > 0
+ *
  * FIXME some caching might speed things up
  */
 static int find_parent_nodes(struct btrfs_trans_handle *trans,
@@ -1198,6 +1200,19 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
        return ret;
 }
 
+/**
+ * btrfs_check_shared - tell us whether an extent is shared
+ *
+ * @trans: optional trans handle
+ *
+ * btrfs_check_shared uses the backref walking code but will short
+ * circuit as soon as it finds a root or inode that doesn't match the
+ * one passed in. This provides a significant performance benefit for
+ * callers (such as fiemap) which want to know whether the extent is
+ * shared but do not need a ref count.
+ *
+ * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
+ */
 int btrfs_check_shared(struct btrfs_trans_handle *trans,
                       struct btrfs_fs_info *fs_info, u64 root_objectid,
                       u64 inum, u64 bytenr)
@@ -1226,11 +1241,13 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
                ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
                                        roots, NULL, root_objectid, inum);
                if (ret == BACKREF_FOUND_SHARED) {
+                       /* this is the only condition under which we return 1 */
                        ret = 1;
                        break;
                }
                if (ret < 0 && ret != -ENOENT)
                        break;
+               ret = 0;
                node = ulist_next(tmp, &uiter);
                if (!node)
                        break;
index 7effed6..0ec3acd 100644 (file)
@@ -8829,6 +8829,24 @@ again:
                goto again;
        }
 
+       /*
+        * if we are changing raid levels, try to allocate a corresponding
+        * block group with the new raid level.
+        */
+       alloc_flags = update_block_group_flags(root, cache->flags);
+       if (alloc_flags != cache->flags) {
+               ret = do_chunk_alloc(trans, root, alloc_flags,
+                                    CHUNK_ALLOC_FORCE);
+               /*
+                * ENOSPC is allowed here, we may have enough space
+                * already allocated at the new raid level to
+                * carry on
+                */
+               if (ret == -ENOSPC)
+                       ret = 0;
+               if (ret < 0)
+                       goto out;
+       }
 
        ret = set_block_group_ro(cache, 0);
        if (!ret)
@@ -8842,7 +8860,9 @@ again:
 out:
        if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
                alloc_flags = update_block_group_flags(root, cache->flags);
+               lock_chunks(root->fs_info->chunk_root);
                check_system_chunk(trans, root, alloc_flags);
+               unlock_chunks(root->fs_info->chunk_root);
        }
        mutex_unlock(&root->fs_info->ro_block_group_mutex);
 
index 96aebf3..174f5e1 100644 (file)
@@ -4625,6 +4625,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 {
        u64 chunk_offset;
 
+       ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
        chunk_offset = find_next_chunk(extent_root->fs_info);
        return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
 }
index 430e034..7dc886c 100644 (file)
@@ -24,6 +24,7 @@
 #include "cifsfs.h"
 #include "dns_resolve.h"
 #include "cifs_debug.h"
+#include "cifs_unicode.h"
 
 static LIST_HEAD(cifs_dfs_automount_list);
 
@@ -312,7 +313,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
        xid = get_xid();
        rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls,
                &num_referrals, &referrals,
-               cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+               cifs_remap(cifs_sb));
        free_xid(xid);
 
        cifs_put_tlink(tlink);
index 0303c67..5a53ac6 100644 (file)
 #include "cifsglob.h"
 #include "cifs_debug.h"
 
-/*
- * cifs_utf16_bytes - how long will a string be after conversion?
- * @utf16 - pointer to input string
- * @maxbytes - don't go past this many bytes of input string
- * @codepage - destination codepage
- *
- * Walk a utf16le string and return the number of bytes that the string will
- * be after being converted to the given charset, not including any null
- * termination required. Don't walk past maxbytes in the source buffer.
- */
-int
-cifs_utf16_bytes(const __le16 *from, int maxbytes,
-               const struct nls_table *codepage)
-{
-       int i;
-       int charlen, outlen = 0;
-       int maxwords = maxbytes / 2;
-       char tmp[NLS_MAX_CHARSET_SIZE];
-       __u16 ftmp;
-
-       for (i = 0; i < maxwords; i++) {
-               ftmp = get_unaligned_le16(&from[i]);
-               if (ftmp == 0)
-                       break;
-
-               charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
-               if (charlen > 0)
-                       outlen += charlen;
-               else
-                       outlen++;
-       }
-
-       return outlen;
-}
-
 int cifs_remap(struct cifs_sb_info *cifs_sb)
 {
        int map_type;
@@ -155,10 +120,13 @@ convert_sfm_char(const __u16 src_char, char *target)
  * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
  */
 static int
-cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
+cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp,
             int maptype)
 {
        int len = 1;
+       __u16 src_char;
+
+       src_char = *from;
 
        if ((maptype == SFM_MAP_UNI_RSVD) && convert_sfm_char(src_char, target))
                return len;
@@ -168,10 +136,23 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
 
        /* if character not one of seven in special remap set */
        len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
-       if (len <= 0) {
-               *target = '?';
-               len = 1;
-       }
+       if (len <= 0)
+               goto surrogate_pair;
+
+       return len;
+
+surrogate_pair:
+       /* convert SURROGATE_PAIR and IVS */
+       if (strcmp(cp->charset, "utf8"))
+               goto unknown;
+       len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6);
+       if (len <= 0)
+               goto unknown;
+       return len;
+
+unknown:
+       *target = '?';
+       len = 1;
        return len;
 }
 
@@ -206,7 +187,7 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
        int nullsize = nls_nullsize(codepage);
        int fromwords = fromlen / 2;
        char tmp[NLS_MAX_CHARSET_SIZE];
-       __u16 ftmp;
+       __u16 ftmp[3];          /* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */
 
        /*
         * because the chars can be of varying widths, we need to take care
@@ -217,9 +198,17 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
        safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
 
        for (i = 0; i < fromwords; i++) {
-               ftmp = get_unaligned_le16(&from[i]);
-               if (ftmp == 0)
+               ftmp[0] = get_unaligned_le16(&from[i]);
+               if (ftmp[0] == 0)
                        break;
+               if (i + 1 < fromwords)
+                       ftmp[1] = get_unaligned_le16(&from[i + 1]);
+               else
+                       ftmp[1] = 0;
+               if (i + 2 < fromwords)
+                       ftmp[2] = get_unaligned_le16(&from[i + 2]);
+               else
+                       ftmp[2] = 0;
 
                /*
                 * check to see if converting this character might make the
@@ -234,6 +223,17 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
                /* put converted char into 'to' buffer */
                charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type);
                outlen += charlen;
+
+               /* charlen (=bytes of UTF-8 for 1 character)
+                * 4bytes UTF-8(surrogate pair) is charlen=4
+                *   (4bytes UTF-16 code)
+                * 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4
+                *   (2 UTF-8 pairs divided to 2 UTF-16 pairs) */
+               if (charlen == 4)
+                       i++;
+               else if (charlen >= 5)
+                       /* 5-6bytes UTF-8 */
+                       i += 2;
        }
 
        /* properly null-terminate string */
@@ -296,6 +296,46 @@ success:
 }
 
 /*
+ * cifs_utf16_bytes - how long will a string be after conversion?
+ * @utf16 - pointer to input string
+ * @maxbytes - don't go past this many bytes of input string
+ * @codepage - destination codepage
+ *
+ * Walk a utf16le string and return the number of bytes that the string will
+ * be after being converted to the given charset, not including any null
+ * termination required. Don't walk past maxbytes in the source buffer.
+ */
+int
+cifs_utf16_bytes(const __le16 *from, int maxbytes,
+               const struct nls_table *codepage)
+{
+       int i;
+       int charlen, outlen = 0;
+       int maxwords = maxbytes / 2;
+       char tmp[NLS_MAX_CHARSET_SIZE];
+       __u16 ftmp[3];
+
+       for (i = 0; i < maxwords; i++) {
+               ftmp[0] = get_unaligned_le16(&from[i]);
+               if (ftmp[0] == 0)
+                       break;
+               if (i + 1 < maxwords)
+                       ftmp[1] = get_unaligned_le16(&from[i + 1]);
+               else
+                       ftmp[1] = 0;
+               if (i + 2 < maxwords)
+                       ftmp[2] = get_unaligned_le16(&from[i + 2]);
+               else
+                       ftmp[2] = 0;
+
+               charlen = cifs_mapchar(tmp, ftmp, codepage, NO_MAP_UNI_RSVD);
+               outlen += charlen;
+       }
+
+       return outlen;
+}
+
+/*
  * cifs_strndup_from_utf16 - copy a string from wire format to the local
  * codepage
  * @src - source string
@@ -409,10 +449,15 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
        char src_char;
        __le16 dst_char;
        wchar_t tmp;
+       wchar_t *wchar_to;      /* UTF-16 */
+       int ret;
+       unicode_t u;
 
        if (map_chars == NO_MAP_UNI_RSVD)
                return cifs_strtoUTF16(target, source, PATH_MAX, cp);
 
+       wchar_to = kzalloc(6, GFP_KERNEL);
+
        for (i = 0; i < srclen; j++) {
                src_char = source[i];
                charlen = 1;
@@ -441,11 +486,55 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
                         * if no match, use question mark, which at least in
                         * some cases serves as wild card
                         */
-                       if (charlen < 1) {
-                               dst_char = cpu_to_le16(0x003f);
-                               charlen = 1;
+                       if (charlen > 0)
+                               goto ctoUTF16;
+
+                       /* convert SURROGATE_PAIR */
+                       if (strcmp(cp->charset, "utf8") || !wchar_to)
+                               goto unknown;
+                       if (*(source + i) & 0x80) {
+                               charlen = utf8_to_utf32(source + i, 6, &u);
+                               if (charlen < 0)
+                                       goto unknown;
+                       } else
+                               goto unknown;
+                       ret  = utf8s_to_utf16s(source + i, charlen,
+                                              UTF16_LITTLE_ENDIAN,
+                                              wchar_to, 6);
+                       if (ret < 0)
+                               goto unknown;
+
+                       i += charlen;
+                       dst_char = cpu_to_le16(*wchar_to);
+                       if (charlen <= 3)
+                               /* 1-3bytes UTF-8 to 2bytes UTF-16 */
+                               put_unaligned(dst_char, &target[j]);
+                       else if (charlen == 4) {
+                               /* 4bytes UTF-8(surrogate pair) to 4bytes UTF-16
+                                * 7-8bytes UTF-8(IVS) divided to 2 UTF-16
+                                *   (charlen=3+4 or 4+4) */
+                               put_unaligned(dst_char, &target[j]);
+                               dst_char = cpu_to_le16(*(wchar_to + 1));
+                               j++;
+                               put_unaligned(dst_char, &target[j]);
+                       } else if (charlen >= 5) {
+                               /* 5-6bytes UTF-8 to 6bytes UTF-16 */
+                               put_unaligned(dst_char, &target[j]);
+                               dst_char = cpu_to_le16(*(wchar_to + 1));
+                               j++;
+                               put_unaligned(dst_char, &target[j]);
+                               dst_char = cpu_to_le16(*(wchar_to + 2));
+                               j++;
+                               put_unaligned(dst_char, &target[j]);
                        }
+                       continue;
+
+unknown:
+                       dst_char = cpu_to_le16(0x003f);
+                       charlen = 1;
                }
+
+ctoUTF16:
                /*
                 * character may take more than one byte in the source string,
                 * but will take exactly two bytes in the target string
@@ -456,6 +545,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
 
 ctoUTF16_out:
        put_unaligned(0, &target[j]); /* Null terminate target unicode string */
+       kfree(wchar_to);
        return j;
 }
 
index f5089bd..0a9fb6b 100644 (file)
@@ -469,6 +469,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                seq_puts(s, ",nouser_xattr");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
                seq_puts(s, ",mapchars");
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
+               seq_puts(s, ",mapposix");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
                seq_puts(s, ",sfu");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
index c31ce98..c63fd1d 100644 (file)
@@ -361,11 +361,11 @@ extern int CIFSUnixCreateHardLink(const unsigned int xid,
 extern int CIFSUnixCreateSymLink(const unsigned int xid,
                        struct cifs_tcon *tcon,
                        const char *fromName, const char *toName,
-                       const struct nls_table *nls_codepage);
+                       const struct nls_table *nls_codepage, int remap);
 extern int CIFSSMBUnixQuerySymLink(const unsigned int xid,
                        struct cifs_tcon *tcon,
                        const unsigned char *searchName, char **syminfo,
-                       const struct nls_table *nls_codepage);
+                       const struct nls_table *nls_codepage, int remap);
 extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
                               __u16 fid, char **symlinkinfo,
                               const struct nls_table *nls_codepage);
index 84650a5..f26ffbf 100644 (file)
@@ -2784,7 +2784,7 @@ copyRetry:
 int
 CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon,
                      const char *fromName, const char *toName,
-                     const struct nls_table *nls_codepage)
+                     const struct nls_table *nls_codepage, int remap)
 {
        TRANSACTION2_SPI_REQ *pSMB = NULL;
        TRANSACTION2_SPI_RSP *pSMBr = NULL;
@@ -2804,9 +2804,9 @@ createSymLinkRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifs_strtoUTF16((__le16 *) pSMB->FileName, fromName,
-                                   /* find define for this maxpathcomponent */
-                                   PATH_MAX, nls_codepage);
+                   cifsConvertToUTF16((__le16 *) pSMB->FileName, fromName,
+                               /* find define for this maxpathcomponent */
+                                       PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
 
@@ -2828,9 +2828,9 @@ createSymLinkRetry:
        data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len_target =
-                   cifs_strtoUTF16((__le16 *) data_offset, toName, PATH_MAX
-                                   /* find define for this maxpathcomponent */
-                                   , nls_codepage);
+                   cifsConvertToUTF16((__le16 *) data_offset, toName,
+                               /* find define for this maxpathcomponent */
+                                       PATH_MAX, nls_codepage, remap);
                name_len_target++;      /* trailing null */
                name_len_target *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -3034,7 +3034,7 @@ winCreateHardLinkRetry:
 int
 CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
                        const unsigned char *searchName, char **symlinkinfo,
-                       const struct nls_table *nls_codepage)
+                       const struct nls_table *nls_codepage, int remap)
 {
 /* SMB_QUERY_FILE_UNIX_LINK */
        TRANSACTION2_QPI_REQ *pSMB = NULL;
@@ -3055,8 +3055,9 @@ querySymLinkRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                       cifs_strtoUTF16((__le16 *) pSMB->FileName, searchName,
-                                       PATH_MAX, nls_codepage);
+                       cifsConvertToUTF16((__le16 *) pSMB->FileName,
+                                          searchName, PATH_MAX, nls_codepage,
+                                          remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -4917,7 +4918,7 @@ getDFSRetry:
                strncpy(pSMB->RequestFileName, search_name, name_len);
        }
 
-       if (ses->server && ses->server->sign)
+       if (ses->server->sign)
                pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 
        pSMB->hdr.Uid = ses->Suid;
index f3bfe08..8383d5e 100644 (file)
@@ -386,6 +386,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
                rc = generic_ip_connect(server);
                if (rc) {
                        cifs_dbg(FYI, "reconnect error %d\n", rc);
+                       mutex_unlock(&server->srv_mutex);
                        msleep(3000);
                } else {
                        atomic_inc(&tcpSesReconnectCount);
@@ -393,8 +394,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
                        if (server->tcpStatus != CifsExiting)
                                server->tcpStatus = CifsNeedNegotiate;
                        spin_unlock(&GlobalMid_Lock);
+                       mutex_unlock(&server->srv_mutex);
                }
-               mutex_unlock(&server->srv_mutex);
        } while (server->tcpStatus == CifsNeedReconnect);
 
        return rc;
index 338d569..c3eb998 100644 (file)
@@ -620,8 +620,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
                }
                rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
                                            cifs_sb->local_nls,
-                                           cifs_sb->mnt_cifs_flags &
-                                               CIFS_MOUNT_MAP_SPECIAL_CHR);
+                                           cifs_remap(cifs_sb));
                if (rc)
                        goto mknod_out;
 
index cafbf10..3f50cee 100644 (file)
@@ -140,8 +140,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
        posix_flags = cifs_posix_convert_flags(f_flags);
        rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
                             poplock, full_path, cifs_sb->local_nls,
-                            cifs_sb->mnt_cifs_flags &
-                                       CIFS_MOUNT_MAP_SPECIAL_CHR);
+                            cifs_remap(cifs_sb));
        cifs_put_tlink(tlink);
 
        if (rc)
@@ -1553,8 +1552,8 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
                rc = server->ops->mand_unlock_range(cfile, flock, xid);
 
 out:
-       if (flock->fl_flags & FL_POSIX)
-               posix_lock_file_wait(file, flock);
+       if (flock->fl_flags & FL_POSIX && !rc)
+               rc = posix_lock_file_wait(file, flock);
        return rc;
 }
 
index 55b5811..f621b44 100644 (file)
@@ -373,8 +373,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
 
        /* could have done a find first instead but this returns more info */
        rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
-                                 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
-                                       CIFS_MOUNT_MAP_SPECIAL_CHR);
+                                 cifs_sb->local_nls, cifs_remap(cifs_sb));
        cifs_put_tlink(tlink);
 
        if (!rc) {
@@ -402,9 +401,25 @@ int cifs_get_inode_info_unix(struct inode **pinode,
                        rc = -ENOMEM;
        } else {
                /* we already have inode, update it */
+
+               /* if uniqueid is different, return error */
+               if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
+                   CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
+                       rc = -ESTALE;
+                       goto cgiiu_exit;
+               }
+
+               /* if filetype is different, return error */
+               if (unlikely(((*pinode)->i_mode & S_IFMT) !=
+                   (fattr.cf_mode & S_IFMT))) {
+                       rc = -ESTALE;
+                       goto cgiiu_exit;
+               }
+
                cifs_fattr_to_inode(*pinode, &fattr);
        }
 
+cgiiu_exit:
        return rc;
 }
 
@@ -839,6 +854,15 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
                if (!*inode)
                        rc = -ENOMEM;
        } else {
+               /* we already have inode, update it */
+
+               /* if filetype is different, return error */
+               if (unlikely(((*inode)->i_mode & S_IFMT) !=
+                   (fattr.cf_mode & S_IFMT))) {
+                       rc = -ESTALE;
+                       goto cgii_exit;
+               }
+
                cifs_fattr_to_inode(*inode, &fattr);
        }
 
@@ -2215,8 +2239,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
                pTcon = tlink_tcon(tlink);
                rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args,
                                    cifs_sb->local_nls,
-                                   cifs_sb->mnt_cifs_flags &
-                                       CIFS_MOUNT_MAP_SPECIAL_CHR);
+                                   cifs_remap(cifs_sb));
                cifs_put_tlink(tlink);
        }
 
index 252e672..e6c707c 100644 (file)
@@ -717,7 +717,8 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
                rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname);
        else if (pTcon->unix_ext)
                rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
-                                          cifs_sb->local_nls);
+                                          cifs_sb->local_nls,
+                                          cifs_remap(cifs_sb));
        /* else
           rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,
                                        cifs_sb_target->local_nls); */
index b4a4723..b1eede3 100644 (file)
@@ -90,6 +90,8 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
        if (dentry) {
                inode = d_inode(dentry);
                if (inode) {
+                       if (d_mountpoint(dentry))
+                               goto out;
                        /*
                         * If we're generating inode numbers, then we don't
                         * want to clobber the existing one with the one that
index 7bfdd60..fc537c2 100644 (file)
@@ -960,7 +960,8 @@ cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
        /* Check for unix extensions */
        if (cap_unix(tcon->ses)) {
                rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path,
-                                            cifs_sb->local_nls);
+                                            cifs_sb->local_nls,
+                                            cifs_remap(cifs_sb));
                if (rc == -EREMOTE)
                        rc = cifs_unix_dfs_readlink(xid, tcon, full_path,
                                                    target_path,
index 65cd7a8..54cbe19 100644 (file)
@@ -110,7 +110,7 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
 
        /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
        /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
-       if ((tcon->ses) &&
+       if ((tcon->ses) && (tcon->ses->server) &&
            (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
                hdr->CreditCharge = cpu_to_le16(1);
        /* else CreditCharge MBZ */
index 656ce52..37b5afd 100644 (file)
@@ -1239,13 +1239,13 @@ ascend:
                /* might go back up the wrong parent if we have had a rename. */
                if (need_seqretry(&rename_lock, seq))
                        goto rename_retry;
-               next = child->d_child.next;
-               while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
+               /* go into the first sibling still alive */
+               do {
+                       next = child->d_child.next;
                        if (next == &this_parent->d_subdirs)
                                goto ascend;
                        child = list_entry(next, struct dentry, d_child);
-                       next = next->next;
-               }
+               } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
                rcu_read_unlock();
                goto resume;
        }
index 45b35b9..55e1e3a 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/mm.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
+#include <linux/file.h>
 #include <linux/string.h>
 #include <linux/ratelimit.h>
 #include <linux/printk.h>
@@ -5604,6 +5605,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
        p->server = server;
        atomic_inc(&lsp->ls_count);
        p->ctx = get_nfs_open_context(ctx);
+       get_file(fl->fl_file);
        memcpy(&p->fl, fl, sizeof(p->fl));
        return p;
 out_free_seqid:
@@ -5716,6 +5718,7 @@ static void nfs4_lock_release(void *calldata)
                nfs_free_seqid(data->arg.lock_seqid);
        nfs4_put_lock_state(data->lsp);
        put_nfs_open_context(data->ctx);
+       fput(data->fl.fl_file);
        kfree(data);
        dprintk("%s: done!\n", __func__);
 }
index d12a4be..dfc19f1 100644 (file)
@@ -1845,12 +1845,15 @@ int nfs_wb_all(struct inode *inode)
        trace_nfs_writeback_inode_enter(inode);
 
        ret = filemap_write_and_wait(inode->i_mapping);
-       if (!ret) {
-               ret = nfs_commit_inode(inode, FLUSH_SYNC);
-               if (!ret)
-                       pnfs_sync_inode(inode, true);
-       }
+       if (ret)
+               goto out;
+       ret = nfs_commit_inode(inode, FLUSH_SYNC);
+       if (ret < 0)
+               goto out;
+       pnfs_sync_inode(inode, true);
+       ret = 0;
 
+out:
        trace_nfs_writeback_inode_exit(inode, ret);
        return ret;
 }
index 0822345..83f4e76 100644 (file)
@@ -159,7 +159,7 @@ int omfs_allocate_range(struct super_block *sb,
        goto out;
 
 found:
-       *return_block = i * bits_per_entry + bit;
+       *return_block = (u64) i * bits_per_entry + bit;
        *return_size = run;
        ret = set_run(sb, i, bits_per_entry, bit, run, 1);
 
index 138321b..3d935c8 100644 (file)
@@ -306,7 +306,8 @@ static const struct super_operations omfs_sops = {
  */
 static int omfs_get_imap(struct super_block *sb)
 {
-       unsigned int bitmap_size, count, array_size;
+       unsigned int bitmap_size, array_size;
+       int count;
        struct omfs_sb_info *sbi = OMFS_SB(sb);
        struct buffer_head *bh;
        unsigned long **ptr;
@@ -359,7 +360,7 @@ nomem:
 }
 
 enum {
-       Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask
+       Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err
 };
 
 static const match_table_t tokens = {
@@ -368,6 +369,7 @@ static const match_table_t tokens = {
        {Opt_umask, "umask=%o"},
        {Opt_dmask, "dmask=%o"},
        {Opt_fmask, "fmask=%o"},
+       {Opt_err, NULL},
 };
 
 static int parse_options(char *options, struct omfs_sb_info *sbi)
@@ -548,8 +550,10 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent)
        }
 
        sb->s_root = d_make_root(root);
-       if (!sb->s_root)
+       if (!sb->s_root) {
+               ret = -ENOMEM;
                goto out_brelse_bh2;
+       }
        printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name);
 
        ret = 0;
index 24f6404..84d693d 100644 (file)
@@ -299,6 +299,9 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
        struct cred *override_cred;
        char *link = NULL;
 
+       if (WARN_ON(!workdir))
+               return -EROFS;
+
        ovl_path_upper(parent, &parentpath);
        upperdir = parentpath.dentry;
 
index d139405..692ceda 100644 (file)
@@ -222,6 +222,9 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
        struct kstat stat;
        int err;
 
+       if (WARN_ON(!workdir))
+               return ERR_PTR(-EROFS);
+
        err = ovl_lock_rename_workdir(workdir, upperdir);
        if (err)
                goto out;
@@ -322,6 +325,9 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
        struct dentry *newdentry;
        int err;
 
+       if (WARN_ON(!workdir))
+               return -EROFS;
+
        err = ovl_lock_rename_workdir(workdir, upperdir);
        if (err)
                goto out;
@@ -506,11 +512,28 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
        struct dentry *opaquedir = NULL;
        int err;
 
-       if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
-               opaquedir = ovl_check_empty_and_clear(dentry);
-               err = PTR_ERR(opaquedir);
-               if (IS_ERR(opaquedir))
-                       goto out;
+       if (WARN_ON(!workdir))
+               return -EROFS;
+
+       if (is_dir) {
+               if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
+                       opaquedir = ovl_check_empty_and_clear(dentry);
+                       err = PTR_ERR(opaquedir);
+                       if (IS_ERR(opaquedir))
+                               goto out;
+               } else {
+                       LIST_HEAD(list);
+
+                       /*
+                        * When removing an empty opaque directory, then it
+                        * makes no sense to replace it with an exact replica of
+                        * itself.  But emptiness still needs to be checked.
+                        */
+                       err = ovl_check_empty_dir(dentry, &list);
+                       ovl_cache_free(&list);
+                       if (err)
+                               goto out;
+               }
        }
 
        err = ovl_lock_rename_workdir(workdir, upperdir);
index 5f0d199..bf8537c 100644 (file)
@@ -529,7 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data)
 {
        struct ovl_fs *ufs = sb->s_fs_info;
 
-       if (!(*flags & MS_RDONLY) && !ufs->upper_mnt)
+       if (!(*flags & MS_RDONLY) && (!ufs->upper_mnt || !ufs->workdir))
                return -EROFS;
 
        return 0;
@@ -925,9 +925,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
                err = PTR_ERR(ufs->workdir);
                if (IS_ERR(ufs->workdir)) {
-                       pr_err("overlayfs: failed to create directory %s/%s\n",
-                              ufs->config.workdir, OVL_WORKDIR_NAME);
-                       goto out_put_upper_mnt;
+                       pr_warn("overlayfs: failed to create directory %s/%s (errno: %i); mounting read-only\n",
+                               ufs->config.workdir, OVL_WORKDIR_NAME, -err);
+                       sb->s_flags |= MS_RDONLY;
+                       ufs->workdir = NULL;
                }
        }
 
@@ -997,7 +998,6 @@ out_put_lower_mnt:
        kfree(ufs->lower_mnt);
 out_put_workdir:
        dput(ufs->workdir);
-out_put_upper_mnt:
        mntput(ufs->upper_mnt);
 out_put_lowerpath:
        for (i = 0; i < numlower; i++)
index 04e79d5..e9d401c 100644 (file)
@@ -574,8 +574,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
  * After the last attribute is removed revert to original inode format,
  * making all literal area available to the data fork once more.
  */
-STATIC void
-xfs_attr_fork_reset(
+void
+xfs_attr_fork_remove(
        struct xfs_inode        *ip,
        struct xfs_trans        *tp)
 {
@@ -641,7 +641,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
            (mp->m_flags & XFS_MOUNT_ATTR2) &&
            (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
            !(args->op_flags & XFS_DA_OP_ADDNAME)) {
-               xfs_attr_fork_reset(dp, args->trans);
+               xfs_attr_fork_remove(dp, args->trans);
        } else {
                xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
                dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
@@ -905,7 +905,7 @@ xfs_attr3_leaf_to_shortform(
        if (forkoff == -1) {
                ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
                ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
-               xfs_attr_fork_reset(dp, args->trans);
+               xfs_attr_fork_remove(dp, args->trans);
                goto out;
        }
 
index 025c4b8..882c8d3 100644 (file)
@@ -53,7 +53,7 @@ int   xfs_attr_shortform_remove(struct xfs_da_args *args);
 int    xfs_attr_shortform_list(struct xfs_attr_list_context *context);
 int    xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
 int    xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
-
+void   xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp);
 
 /*
  * Internal routines when attribute fork size == XFS_LBSIZE(mp).
index aeffeaa..f1026e8 100644 (file)
@@ -3224,12 +3224,24 @@ xfs_bmap_extsize_align(
                align_alen += temp;
                align_off -= temp;
        }
+
+       /* Same adjustment for the end of the requested area. */
+       temp = (align_alen % extsz);
+       if (temp)
+               align_alen += extsz - temp;
+
        /*
-        * Same adjustment for the end of the requested area.
+        * For large extent hint sizes, the aligned extent might be larger than
+        * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
+        * the length back under MAXEXTLEN. The outer allocation loops handle
+        * short allocation just fine, so it is safe to do this. We only want to
+        * do it when we are forced to, though, because it means more allocation
+        * operations are required.
         */
-       if ((temp = (align_alen % extsz))) {
-               align_alen += extsz - temp;
-       }
+       while (align_alen > MAXEXTLEN)
+               align_alen -= extsz;
+       ASSERT(align_alen <= MAXEXTLEN);
+
        /*
         * If the previous block overlaps with this proposed allocation
         * then move the start forward without adjusting the length.
@@ -3318,7 +3330,9 @@ xfs_bmap_extsize_align(
                        return -EINVAL;
        } else {
                ASSERT(orig_off >= align_off);
-               ASSERT(orig_end <= align_off + align_alen);
+               /* see MAXEXTLEN handling above */
+               ASSERT(orig_end <= align_off + align_alen ||
+                      align_alen + extsz > MAXEXTLEN);
        }
 
 #ifdef DEBUG
@@ -4099,13 +4113,6 @@ xfs_bmapi_reserve_delalloc(
        /* Figure out the extent size, adjust alen */
        extsz = xfs_get_extsz_hint(ip);
        if (extsz) {
-               /*
-                * Make sure we don't exceed a single extent length when we
-                * align the extent by reducing length we are going to
-                * allocate by the maximum amount extent size aligment may
-                * require.
-                */
-               alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1));
                error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
                                               1, 0, &aoff, &alen);
                ASSERT(!error);
index 07349a1..1c9e755 100644 (file)
@@ -376,7 +376,7 @@ xfs_ialloc_ag_alloc(
         */
        newlen = args.mp->m_ialloc_inos;
        if (args.mp->m_maxicount &&
-           percpu_counter_read(&args.mp->m_icount) + newlen >
+           percpu_counter_read_positive(&args.mp->m_icount) + newlen >
                                                        args.mp->m_maxicount)
                return -ENOSPC;
        args.minlen = args.maxlen = args.mp->m_ialloc_blks;
@@ -1339,10 +1339,13 @@ xfs_dialloc(
         * If we have already hit the ceiling of inode blocks then clear
         * okalloc so we scan all available agi structures for a free
         * inode.
+        *
+        * Read rough value of mp->m_icount by percpu_counter_read_positive,
+        * which will sacrifice the preciseness but improve the performance.
         */
        if (mp->m_maxicount &&
-           percpu_counter_read(&mp->m_icount) + mp->m_ialloc_inos >
-                                                       mp->m_maxicount) {
+           percpu_counter_read_positive(&mp->m_icount) + mp->m_ialloc_inos
+                                                       mp->m_maxicount) {
                noroom = 1;
                okalloc = 0;
        }
index f9c1c64..3fbf167 100644 (file)
@@ -380,23 +380,31 @@ xfs_attr3_root_inactive(
        return error;
 }
 
+/*
+ * xfs_attr_inactive kills all traces of an attribute fork on an inode. It
+ * removes both the on-disk and in-memory inode fork. Note that this also has to
+ * handle the condition of inodes without attributes but with an attribute fork
+ * configured, so we can't use xfs_inode_hasattr() here.
+ *
+ * The in-memory attribute fork is removed even on error.
+ */
 int
-xfs_attr_inactive(xfs_inode_t *dp)
+xfs_attr_inactive(
+       struct xfs_inode        *dp)
 {
-       xfs_trans_t *trans;
-       xfs_mount_t *mp;
-       int error;
+       struct xfs_trans        *trans;
+       struct xfs_mount        *mp;
+       int                     cancel_flags = 0;
+       int                     lock_mode = XFS_ILOCK_SHARED;
+       int                     error = 0;
 
        mp = dp->i_mount;
        ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
 
-       xfs_ilock(dp, XFS_ILOCK_SHARED);
-       if (!xfs_inode_hasattr(dp) ||
-           dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-               xfs_iunlock(dp, XFS_ILOCK_SHARED);
-               return 0;
-       }
-       xfs_iunlock(dp, XFS_ILOCK_SHARED);
+       xfs_ilock(dp, lock_mode);
+       if (!XFS_IFORK_Q(dp))
+               goto out_destroy_fork;
+       xfs_iunlock(dp, lock_mode);
 
        /*
         * Start our first transaction of the day.
@@ -408,13 +416,18 @@ xfs_attr_inactive(xfs_inode_t *dp)
         * the inode in every transaction to let it float upward through
         * the log.
         */
+       lock_mode = 0;
        trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
        error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0);
-       if (error) {
-               xfs_trans_cancel(trans, 0);
-               return error;
-       }
-       xfs_ilock(dp, XFS_ILOCK_EXCL);
+       if (error)
+               goto out_cancel;
+
+       lock_mode = XFS_ILOCK_EXCL;
+       cancel_flags = XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT;
+       xfs_ilock(dp, lock_mode);
+
+       if (!XFS_IFORK_Q(dp))
+               goto out_cancel;
 
        /*
         * No need to make quota reservations here. We expect to release some
@@ -422,29 +435,31 @@ xfs_attr_inactive(xfs_inode_t *dp)
         */
        xfs_trans_ijoin(trans, dp, 0);
 
-       /*
-        * Decide on what work routines to call based on the inode size.
-        */
-       if (!xfs_inode_hasattr(dp) ||
-           dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-               error = 0;
-               goto out;
+       /* invalidate and truncate the attribute fork extents */
+       if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
+               error = xfs_attr3_root_inactive(&trans, dp);
+               if (error)
+                       goto out_cancel;
+
+               error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
+               if (error)
+                       goto out_cancel;
        }
-       error = xfs_attr3_root_inactive(&trans, dp);
-       if (error)
-               goto out;
 
-       error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
-       if (error)
-               goto out;
+       /* Reset the attribute fork - this also destroys the in-core fork */
+       xfs_attr_fork_remove(dp, trans);
 
        error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
-       xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
+       xfs_iunlock(dp, lock_mode);
        return error;
 
-out:
-       xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
-       xfs_iunlock(dp, XFS_ILOCK_EXCL);
+out_cancel:
+       xfs_trans_cancel(trans, cancel_flags);
+out_destroy_fork:
+       /* kill the in-core attr fork before we drop the inode lock */
+       if (dp->i_afp)
+               xfs_idestroy_fork(dp, XFS_ATTR_FORK);
+       if (lock_mode)
+               xfs_iunlock(dp, lock_mode);
        return error;
 }
index 8121e75..3b75912 100644 (file)
@@ -124,7 +124,7 @@ xfs_iozero(
                status = 0;
        } while (count);
 
-       return (-status);
+       return status;
 }
 
 int
index d6ebc85..539a85f 100644 (file)
@@ -1946,21 +1946,17 @@ xfs_inactive(
        /*
         * If there are attributes associated with the file then blow them away
         * now.  The code calls a routine that recursively deconstructs the
-        * attribute fork.  We need to just commit the current transaction
-        * because we can't use it for xfs_attr_inactive().
+        * attribute fork. If also blows away the in-core attribute fork.
         */
-       if (ip->i_d.di_anextents > 0) {
-               ASSERT(ip->i_d.di_forkoff != 0);
-
+       if (XFS_IFORK_Q(ip)) {
                error = xfs_attr_inactive(ip);
                if (error)
                        return;
        }
 
-       if (ip->i_afp)
-               xfs_idestroy_fork(ip, XFS_ATTR_FORK);
-
+       ASSERT(!ip->i_afp);
        ASSERT(ip->i_d.di_anextents == 0);
+       ASSERT(ip->i_d.di_forkoff == 0);
 
        /*
         * Free the inode.
@@ -2883,7 +2879,13 @@ xfs_rename_alloc_whiteout(
        if (error)
                return error;
 
-       /* Satisfy xfs_bumplink that this is a real tmpfile */
+       /*
+        * Prepare the tmpfile inode as if it were created through the VFS.
+        * Otherwise, the link increment paths will complain about nlink 0->1.
+        * Drop the link count as done by d_tmpfile(), complete the inode setup
+        * and flag it as linkable.
+        */
+       drop_nlink(VFS_I(tmpfile));
        xfs_finish_inode_setup(tmpfile);
        VFS_I(tmpfile)->i_state |= I_LINKABLE;
 
@@ -3151,7 +3153,7 @@ xfs_rename(
         * intermediate state on disk.
         */
        if (wip) {
-               ASSERT(wip->i_d.di_nlink == 0);
+               ASSERT(VFS_I(wip)->i_nlink == 0 && wip->i_d.di_nlink == 0);
                error = xfs_bumplink(tp, wip);
                if (error)
                        goto out_trans_abort;
index 2ce7ee3..6f23fbd 100644 (file)
@@ -1084,14 +1084,18 @@ xfs_log_sbcount(xfs_mount_t *mp)
        return xfs_sync_sb(mp, true);
 }
 
+/*
+ * Deltas for the inode count are +/-64, hence we use a large batch size
+ * of 128 so we don't need to take the counter lock on every update.
+ */
+#define XFS_ICOUNT_BATCH       128
 int
 xfs_mod_icount(
        struct xfs_mount        *mp,
        int64_t                 delta)
 {
-       /* deltas are +/-64, hence the large batch size of 128. */
-       __percpu_counter_add(&mp->m_icount, delta, 128);
-       if (percpu_counter_compare(&mp->m_icount, 0) < 0) {
+       __percpu_counter_add(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
+       if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) {
                ASSERT(0);
                percpu_counter_add(&mp->m_icount, -delta);
                return -EINVAL;
@@ -1113,6 +1117,14 @@ xfs_mod_ifree(
        return 0;
 }
 
+/*
+ * Deltas for the block count can vary from 1 to very large, but lock contention
+ * only occurs on frequent small block count updates such as in the delayed
+ * allocation path for buffered writes (page a time updates). Hence we set
+ * a large batch count (1024) to minimise global counter updates except when
+ * we get near to ENOSPC and we have to be very accurate with our updates.
+ */
+#define XFS_FDBLOCKS_BATCH     1024
 int
 xfs_mod_fdblocks(
        struct xfs_mount        *mp,
@@ -1151,25 +1163,19 @@ xfs_mod_fdblocks(
         * Taking blocks away, need to be more accurate the closer we
         * are to zero.
         *
-        * batch size is set to a maximum of 1024 blocks - if we are
-        * allocating of freeing extents larger than this then we aren't
-        * going to be hammering the counter lock so a lock per update
-        * is not a problem.
-        *
         * If the counter has a value of less than 2 * max batch size,
         * then make everything serialise as we are real close to
         * ENOSPC.
         */
-#define __BATCH        1024
-       if (percpu_counter_compare(&mp->m_fdblocks, 2 * __BATCH) < 0)
+       if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH,
+                                    XFS_FDBLOCKS_BATCH) < 0)
                batch = 1;
        else
-               batch = __BATCH;
-#undef __BATCH
+               batch = XFS_FDBLOCKS_BATCH;
 
        __percpu_counter_add(&mp->m_fdblocks, delta, batch);
-       if (percpu_counter_compare(&mp->m_fdblocks,
-                                  XFS_ALLOC_SET_ASIDE(mp)) >= 0) {
+       if (__percpu_counter_compare(&mp->m_fdblocks, XFS_ALLOC_SET_ASIDE(mp),
+                                    XFS_FDBLOCKS_BATCH) >= 0) {
                /* we had space! */
                return 0;
        }
index 9db0423..90ccba7 100644 (file)
@@ -769,6 +769,14 @@ static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
 }
 #endif
 
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+{
+       return ioremap_nocache(offset, size);
+}
+#endif
+
 #ifndef ioremap_wc
 #define ioremap_wc ioremap_wc
 static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
index 7f9a516..5d93a66 100644 (file)
@@ -821,8 +821,6 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
                         struct scsi_ioctl_command __user *);
 
-extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
-
 /*
  * A queue has just exitted congestion.  Note this in the global counter of
  * congested queues, and wake up anyone who was waiting for requests to be
index ae2982c..656da2a 100644 (file)
@@ -17,7 +17,7 @@
 #define PHY_ID_BCM7250                 0xae025280
 #define PHY_ID_BCM7364                 0xae025260
 #define PHY_ID_BCM7366                 0x600d8490
-#define PHY_ID_BCM7425                 0x03625e60
+#define PHY_ID_BCM7425                 0x600d86b0
 #define PHY_ID_BCM7429                 0x600d8730
 #define PHY_ID_BCM7439                 0x600d8480
 #define PHY_ID_BCM7439_2               0xae025080
index 27e285b..59915ea 100644 (file)
@@ -151,10 +151,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,
        return 1;
 }
 
-static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
+static inline unsigned int cpumask_local_spread(unsigned int i, int node)
 {
-       set_bit(0, cpumask_bits(dstp));
-
        return 0;
 }
 
@@ -208,7 +206,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
 
 int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
-int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
+unsigned int cpumask_local_spread(unsigned int i, int node);
 
 /**
  * for_each_cpu - iterate over every cpu in a mask
index 0408421..0042bf3 100644 (file)
@@ -74,7 +74,7 @@ struct sensor_hub_pending {
  * @usage:             Usage id for this hub device instance.
  * @start_collection_index: Starting index for a phy type collection
  * @end_collection_index: Last index for a phy type collection
- * @mutex:             synchronizing mutex.
+ * @mutex_ptr:         synchronizing mutex pointer.
  * @pending:           Holds information of pending sync read request.
  */
 struct hid_sensor_hub_device {
@@ -84,7 +84,7 @@ struct hid_sensor_hub_device {
        u32 usage;
        int start_collection_index;
        int end_collection_index;
-       struct mutex mutex;
+       struct mutex *mutex_ptr;
        struct sensor_hub_pending pending;
 };
 
index 986f2bf..04cce4d 100644 (file)
@@ -111,6 +111,13 @@ static inline void arch_phys_wc_del(int handle)
 }
 
 #define arch_phys_wc_add arch_phys_wc_add
+#ifndef arch_phys_wc_index
+static inline int arch_phys_wc_index(int handle)
+{
+       return -1;
+}
+#define arch_phys_wc_index arch_phys_wc_index
+#endif
 #endif
 
 #endif /* _LINUX_IO_H */
index 5fc3d10..2b6a204 100644 (file)
@@ -166,19 +166,34 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
 }
 
 #if BITS_PER_LONG < 64
-extern u64 __ktime_divns(const ktime_t kt, s64 div);
-static inline u64 ktime_divns(const ktime_t kt, s64 div)
+extern s64 __ktime_divns(const ktime_t kt, s64 div);
+static inline s64 ktime_divns(const ktime_t kt, s64 div)
 {
+       /*
+        * Negative divisors could cause an inf loop,
+        * so bug out here.
+        */
+       BUG_ON(div < 0);
        if (__builtin_constant_p(div) && !(div >> 32)) {
-               u64 ns = kt.tv64;
-               do_div(ns, div);
-               return ns;
+               s64 ns = kt.tv64;
+               u64 tmp = ns < 0 ? -ns : ns;
+
+               do_div(tmp, div);
+               return ns < 0 ? -tmp : tmp;
        } else {
                return __ktime_divns(kt, div);
        }
 }
 #else /* BITS_PER_LONG < 64 */
-# define ktime_divns(kt, div)          (u64)((kt).tv64 / (div))
+static inline s64 ktime_divns(const ktime_t kt, s64 div)
+{
+       /*
+        * 32-bit implementation cannot handle negative divisors,
+        * so catch them on 64bit as well.
+        */
+       WARN_ON(div < 0);
+       return kt.tv64 / div;
+}
 #endif
 
 static inline s64 ktime_to_us(const ktime_t kt)
index 50e5009..84a1094 100644 (file)
@@ -41,7 +41,12 @@ void percpu_counter_destroy(struct percpu_counter *fbc);
 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
 s64 __percpu_counter_sum(struct percpu_counter *fbc);
-int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+
+static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+{
+       return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
+}
 
 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 {
@@ -116,6 +121,12 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
                return 0;
 }
 
+static inline int
+__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
+{
+       return percpu_counter_compare(fbc, rhs);
+}
+
 static inline void
 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 {
index a947ab8..533d980 100644 (file)
@@ -5,8 +5,6 @@
 #ifndef __LINUX_PLATFORM_DATA_SI5351_H__
 #define __LINUX_PLATFORM_DATA_SI5351_H__
 
-struct clk;
-
 /**
  * enum si5351_pll_src - Si5351 pll clock source
  * @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config
@@ -107,8 +105,6 @@ struct si5351_clkout_config {
  * @clkout: array of clkout configuration
  */
 struct si5351_platform_data {
-       struct clk *clk_xtal;
-       struct clk *clk_clkin;
        enum si5351_pll_src pll_src[2];
        struct si5351_clkout_config clkout[8];
 };
index dbcbcc5..843ceca 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef _LINUX_RHASHTABLE_H
 #define _LINUX_RHASHTABLE_H
 
+#include <linux/atomic.h>
 #include <linux/compiler.h>
 #include <linux/errno.h>
 #include <linux/jhash.h>
@@ -100,6 +101,7 @@ struct rhashtable;
  * @key_len: Length of key
  * @key_offset: Offset of key in struct to be hashed
  * @head_offset: Offset of rhash_head in struct to be hashed
+ * @insecure_max_entries: Maximum number of entries (may be exceeded)
  * @max_size: Maximum size while expanding
  * @min_size: Minimum size while shrinking
  * @nulls_base: Base value to generate nulls marker
@@ -115,6 +117,7 @@ struct rhashtable_params {
        size_t                  key_len;
        size_t                  key_offset;
        size_t                  head_offset;
+       unsigned int            insecure_max_entries;
        unsigned int            max_size;
        unsigned int            min_size;
        u32                     nulls_base;
@@ -286,6 +289,18 @@ static inline bool rht_grow_above_100(const struct rhashtable *ht,
                (!ht->p.max_size || tbl->size < ht->p.max_size);
 }
 
+/**
+ * rht_grow_above_max - returns true if table is above maximum
+ * @ht:                hash table
+ * @tbl:       current table
+ */
+static inline bool rht_grow_above_max(const struct rhashtable *ht,
+                                     const struct bucket_table *tbl)
+{
+       return ht->p.insecure_max_entries &&
+              atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
+}
+
 /* The bucket lock is selected based on the hash and protects mutations
  * on a group of hash buckets.
  *
@@ -589,6 +604,10 @@ restart:
                goto out;
        }
 
+       err = -E2BIG;
+       if (unlikely(rht_grow_above_max(ht, tbl)))
+               goto out;
+
        if (unlikely(rht_grow_above_100(ht, tbl))) {
 slow_path:
                spin_unlock_bh(lock);
index 66e374d..f15154a 100644 (file)
@@ -176,6 +176,7 @@ struct nf_bridge_info {
        struct net_device       *physindev;
        struct net_device       *physoutdev;
        char                    neigh_header[8];
+       __be32                  ipv4_daddr;
 };
 #endif
 
index 3b29115..e8bbf40 100644 (file)
@@ -158,6 +158,8 @@ struct tcp_sock {
                                 * sum(delta(snd_una)), or how many bytes
                                 * were acked.
                                 */
+       struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */
+
        u32     snd_una;        /* First byte we want an ack for        */
        u32     snd_sml;        /* Last byte of the most recently transmitted small packet */
        u32     rcv_tstamp;     /* timestamp of last received ACK (for keepalives) */
index 48a8158..0320bbb 100644 (file)
@@ -98,7 +98,8 @@ struct inet_connection_sock {
        const struct tcp_congestion_ops *icsk_ca_ops;
        const struct inet_connection_sock_af_ops *icsk_af_ops;
        unsigned int              (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
-       __u8                      icsk_ca_state:7,
+       __u8                      icsk_ca_state:6,
+                                 icsk_ca_setsockopt:1,
                                  icsk_ca_dst_locked:1;
        __u8                      icsk_retransmits;
        __u8                      icsk_pending;
@@ -129,9 +130,10 @@ struct inet_connection_sock {
 
                u32               probe_timestamp;
        } icsk_mtup;
-       u32                       icsk_ca_priv[16];
        u32                       icsk_user_timeout;
-#define ICSK_CA_PRIV_SIZE      (16 * sizeof(u32))
+
+       u64                       icsk_ca_priv[64 / sizeof(u64)];
+#define ICSK_CA_PRIV_SIZE      (8 * sizeof(u64))
 };
 
 #define ICSK_TIME_RETRANS      1       /* Retransmit timer */
index 8e3668b..fc57f6b 100644 (file)
@@ -354,7 +354,7 @@ enum ieee80211_rssi_event_data {
 };
 
 /**
- * enum ieee80211_rssi_event - data attached to an %RSSI_EVENT
+ * struct ieee80211_rssi_event - data attached to an %RSSI_EVENT
  * @data: See &enum ieee80211_rssi_event_data
  */
 struct ieee80211_rssi_event {
@@ -388,7 +388,7 @@ enum ieee80211_mlme_event_status {
 };
 
 /**
- * enum ieee80211_mlme_event - data attached to an %MLME_EVENT
+ * struct ieee80211_mlme_event - data attached to an %MLME_EVENT
  * @data: See &enum ieee80211_mlme_event_data
  * @status: See &enum ieee80211_mlme_event_status
  * @reason: the reason code if applicable
@@ -401,9 +401,10 @@ struct ieee80211_mlme_event {
 
 /**
  * struct ieee80211_event - event to be sent to the driver
- * @type The event itself. See &enum ieee80211_event_type.
+ * @type: The event itself. See &enum ieee80211_event_type.
  * @rssi: relevant if &type is %RSSI_EVENT
  * @mlme: relevant if &type is %AUTH_EVENT
+ * @u:    union holding the above two fields
  */
 struct ieee80211_event {
        enum ieee80211_event_type type;
index c56a438..ce13cf2 100644 (file)
@@ -574,11 +574,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr)
 /* Map v4 address to v4-mapped v6 address */
 static inline void sctp_v4_map_v6(union sctp_addr *addr)
 {
+       __be16 port;
+
+       port = addr->v4.sin_port;
+       addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+       addr->v6.sin6_port = port;
        addr->v6.sin6_family = AF_INET6;
        addr->v6.sin6_flowinfo = 0;
        addr->v6.sin6_scope_id = 0;
-       addr->v6.sin6_port = addr->v4.sin_port;
-       addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
        addr->v6.sin6_addr.s6_addr32[0] = 0;
        addr->v6.sin6_addr.s6_addr32[1] = 0;
        addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
index d61be72..5f12257 100644 (file)
@@ -1,9 +1,7 @@
 #ifndef TARGET_CORE_BACKEND_H
 #define TARGET_CORE_BACKEND_H
 
-#define TRANSPORT_PLUGIN_PHBA_PDEV             1
-#define TRANSPORT_PLUGIN_VHBA_PDEV             2
-#define TRANSPORT_PLUGIN_VHBA_VDEV             3
+#define TRANSPORT_FLAG_PASSTHROUGH             1
 
 struct target_backend_cits {
        struct config_item_type tb_dev_cit;
@@ -22,7 +20,7 @@ struct se_subsystem_api {
        char inquiry_rev[4];
        struct module *owner;
 
-       u8 transport_type;
+       u8 transport_flags;
 
        int (*attach_hba)(struct se_hba *, u32);
        void (*detach_hba)(struct se_hba *);
@@ -138,5 +136,7 @@ int se_dev_set_queue_depth(struct se_device *, u32);
 int    se_dev_set_max_sectors(struct se_device *, u32);
 int    se_dev_set_optimal_sectors(struct se_device *, u32);
 int    se_dev_set_block_size(struct se_device *, u32);
+sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
+       sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
 
 #endif /* TARGET_CORE_BACKEND_H */
index 25bb04c..b99c011 100644 (file)
@@ -40,8 +40,6 @@ struct target_fabric_configfs {
        struct config_item      *tf_fabric;
        /* Passed from fabric modules */
        struct config_item_type *tf_fabric_cit;
-       /* Pointer to target core subsystem */
-       struct configfs_subsystem *tf_subsys;
        /* Pointer to fabric's struct module */
        struct module *tf_module;
        struct target_core_fabric_ops tf_ops;
index 17c7f5a..0f4dc37 100644 (file)
@@ -4,7 +4,6 @@
 struct target_core_fabric_ops {
        struct module *module;
        const char *name;
-       struct configfs_subsystem *tf_subsys;
        char *(*get_fabric_name)(void);
        u8 (*get_fabric_proto_ident)(struct se_portal_group *);
        char *(*tpg_get_wwn)(struct se_portal_group *);
@@ -109,6 +108,9 @@ struct target_core_fabric_ops {
 int target_register_template(const struct target_core_fabric_ops *fo);
 void target_unregister_template(const struct target_core_fabric_ops *fo);
 
+int target_depend_item(struct config_item *item);
+void target_undepend_item(struct config_item *item);
+
 struct se_session *transport_init_session(enum target_prot_op);
 int transport_alloc_session_tags(struct se_session *, unsigned int,
                unsigned int);
index 81ea598..f7554fd 100644 (file)
@@ -140,19 +140,42 @@ DEFINE_EVENT(kmem_free, kfree,
        TP_ARGS(call_site, ptr)
 );
 
-DEFINE_EVENT(kmem_free, kmem_cache_free,
+DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free,
 
        TP_PROTO(unsigned long call_site, const void *ptr),
 
-       TP_ARGS(call_site, ptr)
+       TP_ARGS(call_site, ptr),
+
+       /*
+        * This trace can be potentially called from an offlined cpu.
+        * Since trace points use RCU and RCU should not be used from
+        * offline cpus, filter such calls out.
+        * While this trace can be called from a preemptable section,
+        * it has no impact on the condition since tasks can migrate
+        * only from online cpus to other online cpus. Thus its safe
+        * to use raw_smp_processor_id.
+        */
+       TP_CONDITION(cpu_online(raw_smp_processor_id()))
 );
 
-TRACE_EVENT(mm_page_free,
+TRACE_EVENT_CONDITION(mm_page_free,
 
        TP_PROTO(struct page *page, unsigned int order),
 
        TP_ARGS(page, order),
 
+
+       /*
+        * This trace can be potentially called from an offlined cpu.
+        * Since trace points use RCU and RCU should not be used from
+        * offline cpus, filter such calls out.
+        * While this trace can be called from a preemptable section,
+        * it has no impact on the condition since tasks can migrate
+        * only from online cpus to other online cpus. Thus its safe
+        * to use raw_smp_processor_id.
+        */
+       TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
        TP_STRUCT__entry(
                __field(        unsigned long,  pfn             )
                __field(        unsigned int,   order           )
@@ -253,12 +276,35 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
        TP_ARGS(page, order, migratetype)
 );
 
-DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
+TRACE_EVENT_CONDITION(mm_page_pcpu_drain,
 
        TP_PROTO(struct page *page, unsigned int order, int migratetype),
 
        TP_ARGS(page, order, migratetype),
 
+       /*
+        * This trace can be potentially called from an offlined cpu.
+        * Since trace points use RCU and RCU should not be used from
+        * offline cpus, filter such calls out.
+        * While this trace can be called from a preemptable section,
+        * it has no impact on the condition since tasks can migrate
+        * only from online cpus to other online cpus. Thus its safe
+        * to use raw_smp_processor_id.
+        */
+       TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  pfn             )
+               __field(        unsigned int,   order           )
+               __field(        int,            migratetype     )
+       ),
+
+       TP_fast_assign(
+               __entry->pfn            = page ? page_to_pfn(page) : -1UL;
+               __entry->order          = order;
+               __entry->migratetype    = migratetype;
+       ),
+
        TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
                pfn_to_page(__entry->pfn), __entry->pfn,
                __entry->order, __entry->migratetype)
index 9993a42..ef9f80f 100644 (file)
@@ -42,6 +42,9 @@ enum tcp_conntrack {
 /* The field td_maxack has been set */
 #define IP_CT_TCP_FLAG_MAXACK_SET              0x20
 
+/* Marks possibility for expected RFC5961 challenge ACK */
+#define IP_CT_EXP_CHALLENGE_ACK                0x40
+
 struct nf_ct_tcp_flags {
        __u8 flags;
        __u8 mask;
index 974db03..17fb02f 100644 (file)
@@ -337,7 +337,7 @@ struct rtnexthop {
 #define RTNH_F_DEAD            1       /* Nexthop is dead (used by multipath)  */
 #define RTNH_F_PERVASIVE       2       /* Do recursive gateway lookup  */
 #define RTNH_F_ONLINK          4       /* Gateway is forced on link    */
-#define RTNH_F_EXTERNAL                8       /* Route installed externally   */
+#define RTNH_F_OFFLOAD         8       /* offloaded route */
 
 /* Macros to handle hexthops */
 
index 984169a..d7f1cbc 100644 (file)
@@ -26,6 +26,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE. */
 #include <linux/types.h>
+#include <linux/virtio_types.h>
 #include <linux/virtio_ids.h>
 #include <linux/virtio_config.h>
 
index 5321cd9..7d95fdf 100644 (file)
@@ -17,7 +17,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
                              irq_handler_t handler,
                              unsigned long irqflags, const char *devname,
                              void *dev_id);
-int bind_virq_to_irq(unsigned int virq, unsigned int cpu);
+int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
                            irq_handler_t handler,
                            unsigned long irqflags, const char *devname,
index 42a1d2a..cfc9e84 100644 (file)
@@ -3370,6 +3370,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
        module_bug_cleanup(mod);
        mutex_unlock(&module_mutex);
 
+       blocking_notifier_call_chain(&module_notify_list,
+                                    MODULE_STATE_GOING, mod);
+
        /* we can't deallocate the module until we clear memory protection */
        unset_module_init_ro_nx(mod);
        unset_module_core_ro_nx(mod);
index 57bd333..1236732 100644 (file)
@@ -4389,10 +4389,7 @@ long __sched io_schedule_timeout(long timeout)
        long ret;
 
        current->in_iowait = 1;
-       if (old_iowait)
-               blk_schedule_flush_plug(current);
-       else
-               blk_flush_plug(current);
+       blk_schedule_flush_plug(current);
 
        delayacct_blkio_start();
        rq = raw_rq();
index 76d4bd9..93ef719 100644 (file)
@@ -266,21 +266,23 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 /*
  * Divide a ktime value by a nanosecond value
  */
-u64 __ktime_divns(const ktime_t kt, s64 div)
+s64 __ktime_divns(const ktime_t kt, s64 div)
 {
-       u64 dclc;
        int sft = 0;
+       s64 dclc;
+       u64 tmp;
 
        dclc = ktime_to_ns(kt);
+       tmp = dclc < 0 ? -dclc : dclc;
+
        /* Make sure the divisor is less than 2^32: */
        while (div >> 32) {
                sft++;
                div >>= 1;
        }
-       dclc >>= sft;
-       do_div(dclc, (unsigned long) div);
-
-       return dclc;
+       tmp >>= sft;
+       do_div(tmp, (unsigned long) div);
+       return dclc < 0 ? -tmp : tmp;
 }
 EXPORT_SYMBOL_GPL(__ktime_divns);
 #endif /* BITS_PER_LONG >= 64 */
index 2316f50..581a68a 100644 (file)
@@ -41,6 +41,8 @@
 #define NMI_WATCHDOG_ENABLED      (1 << NMI_WATCHDOG_ENABLED_BIT)
 #define SOFT_WATCHDOG_ENABLED     (1 << SOFT_WATCHDOG_ENABLED_BIT)
 
+static DEFINE_MUTEX(watchdog_proc_mutex);
+
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
 #else
@@ -608,26 +610,36 @@ void watchdog_nmi_enable_all(void)
 {
        int cpu;
 
-       if (!watchdog_user_enabled)
-               return;
+       mutex_lock(&watchdog_proc_mutex);
+
+       if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+               goto unlock;
 
        get_online_cpus();
        for_each_online_cpu(cpu)
                watchdog_nmi_enable(cpu);
        put_online_cpus();
+
+unlock:
+       mutex_unlock(&watchdog_proc_mutex);
 }
 
 void watchdog_nmi_disable_all(void)
 {
        int cpu;
 
+       mutex_lock(&watchdog_proc_mutex);
+
        if (!watchdog_running)
-               return;
+               goto unlock;
 
        get_online_cpus();
        for_each_online_cpu(cpu)
                watchdog_nmi_disable(cpu);
        put_online_cpus();
+
+unlock:
+       mutex_unlock(&watchdog_proc_mutex);
 }
 #else
 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
@@ -744,8 +756,6 @@ static int proc_watchdog_update(void)
 
 }
 
-static DEFINE_MUTEX(watchdog_proc_mutex);
-
 /*
  * common function for watchdog, nmi_watchdog and soft_watchdog parameter
  *
index 830dd5d..5f62708 100644 (file)
@@ -139,64 +139,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
 #endif
 
 /**
- * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first
- *
+ * cpumask_local_spread - select the i'th cpu with local numa cpu's first
  * @i: index number
- * @numa_node: local numa_node
- * @dstp: cpumask with the relevant cpu bit set according to the policy
+ * @node: local numa_node
  *
- * This function sets the cpumask according to a numa aware policy.
- * cpumask could be used as an affinity hint for the IRQ related to a
- * queue. When the policy is to spread queues across cores - local cores
- * first.
+ * This function selects an online CPU according to a numa aware policy;
+ * local cpus are returned first, followed by non-local ones, then it
+ * wraps around.
  *
- * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set
- * the cpu bit and need to re-call the function.
+ * It's not very efficient, but useful for setup.
  */
-int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
+unsigned int cpumask_local_spread(unsigned int i, int node)
 {
-       cpumask_var_t mask;
        int cpu;
-       int ret = 0;
-
-       if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
-               return -ENOMEM;
 
+       /* Wrap: we always want a cpu. */
        i %= num_online_cpus();
 
-       if (numa_node == -1 || !cpumask_of_node(numa_node)) {
-               /* Use all online cpu's for non numa aware system */
-               cpumask_copy(mask, cpu_online_mask);
+       if (node == -1) {
+               for_each_cpu(cpu, cpu_online_mask)
+                       if (i-- == 0)
+                               return cpu;
        } else {
-               int n;
-
-               cpumask_and(mask,
-                           cpumask_of_node(numa_node), cpu_online_mask);
-
-               n = cpumask_weight(mask);
-               if (i >= n) {
-                       i -= n;
-
-                       /* If index > number of local cpu's, mask out local
-                        * cpu's
-                        */
-                       cpumask_andnot(mask, cpu_online_mask, mask);
+               /* NUMA first. */
+               for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
+                       if (i-- == 0)
+                               return cpu;
+
+               for_each_cpu(cpu, cpu_online_mask) {
+                       /* Skip NUMA nodes, done above. */
+                       if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
+                               continue;
+
+                       if (i-- == 0)
+                               return cpu;
                }
        }
-
-       for_each_cpu(cpu, mask) {
-               if (--i < 0)
-                       goto out;
-       }
-
-       ret = -EAGAIN;
-
-out:
-       free_cpumask_var(mask);
-
-       if (!ret)
-               cpumask_set_cpu(cpu, dstp);
-
-       return ret;
+       BUG();
 }
-EXPORT_SYMBOL(cpumask_set_cpu_local_first);
+EXPORT_SYMBOL(cpumask_local_spread);
index 48144cd..f051d69 100644 (file)
@@ -197,13 +197,13 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
  * Compare counter against given value.
  * Return 1 if greater, 0 if equal and -1 if less
  */
-int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
 {
        s64     count;
 
        count = percpu_counter_read(fbc);
        /* Check to see if rough count will be sufficient for comparison */
-       if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
+       if (abs(count - rhs) > (batch * num_online_cpus())) {
                if (count > rhs)
                        return 1;
                else
@@ -218,7 +218,7 @@ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
        else
                return 0;
 }
-EXPORT_SYMBOL(percpu_counter_compare);
+EXPORT_SYMBOL(__percpu_counter_compare);
 
 static int __init percpu_counter_startup(void)
 {
index b28df40..4396434 100644 (file)
@@ -14,6 +14,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/atomic.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/log2.h>
@@ -446,6 +447,10 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
        if (key && rhashtable_lookup_fast(ht, key, ht->p))
                goto exit;
 
+       err = -E2BIG;
+       if (unlikely(rht_grow_above_max(ht, tbl)))
+               goto exit;
+
        err = -EAGAIN;
        if (rhashtable_check_elasticity(ht, tbl, hash) ||
            rht_grow_above_100(ht, tbl))
@@ -738,6 +743,12 @@ int rhashtable_init(struct rhashtable *ht,
        if (params->max_size)
                ht->p.max_size = rounddown_pow_of_two(params->max_size);
 
+       if (params->insecure_max_entries)
+               ht->p.insecure_max_entries =
+                       rounddown_pow_of_two(params->insecure_max_entries);
+       else
+               ht->p.insecure_max_entries = ht->p.max_size * 2;
+
        ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
 
        /* The maximum (not average) chain length grows with the
index 98a30a5..59555f0 100644 (file)
@@ -443,7 +443,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
        case NETDEV_UP:
                /* Put all VLANs for this dev in the up state too.  */
                vlan_group_for_each_dev(grp, i, vlandev) {
-                       flgs = vlandev->flags;
+                       flgs = dev_get_flags(vlandev);
                        if (flgs & IFF_UP)
                                continue;
 
index 4663c3d..c4802f3 100644 (file)
@@ -2854,9 +2854,11 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
                         * state. If we were running both LE and BR/EDR inquiry
                         * simultaneously, and BR/EDR inquiry is already
                         * finished, stop discovery, otherwise BR/EDR inquiry
-                        * will stop discovery when finished.
+                        * will stop discovery when finished. If we will resolve
+                        * remote device name, do not change discovery state.
                         */
-                       if (!test_bit(HCI_INQUIRY, &hdev->flags))
+                       if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
+                           hdev->discovery.state != DISCOVERY_RESOLVING)
                                hci_discovery_set_state(hdev,
                                                        DISCOVERY_STOPPED);
                } else {
index 4b6722f..22fd041 100644 (file)
@@ -1072,7 +1072,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
 
                err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
                                                 vid);
-               if (!err)
+               if (err)
                        break;
        }
 
@@ -1822,7 +1822,7 @@ static void br_multicast_query_expired(struct net_bridge *br,
        if (query->startup_sent < br->multicast_startup_query_count)
                query->startup_sent++;
 
-       RCU_INIT_POINTER(querier, NULL);
+       RCU_INIT_POINTER(querier->port, NULL);
        br_multicast_send_query(br, NULL, query);
        spin_unlock(&br->multicast_lock);
 }
index ab55e24..60ddfbe 100644 (file)
 #include <net/route.h>
 #include <net/netfilter/br_netfilter.h>
 
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-#include <net/netfilter/nf_conntrack.h>
-#endif
-
 #include <asm/uaccess.h>
 #include "br_private.h"
 #ifdef CONFIG_SYSCTL
@@ -350,24 +346,15 @@ free_skb:
        return 0;
 }
 
-static bool dnat_took_place(const struct sk_buff *skb)
+static bool daddr_was_changed(const struct sk_buff *skb,
+                             const struct nf_bridge_info *nf_bridge)
 {
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct;
-
-       ct = nf_ct_get(skb, &ctinfo);
-       if (!ct || nf_ct_is_untracked(ct))
-               return false;
-
-       return test_bit(IPS_DST_NAT_BIT, &ct->status);
-#else
-       return false;
-#endif
+       return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
 }
 
 /* This requires some explaining. If DNAT has taken place,
  * we will need to fix up the destination Ethernet address.
+ * This is also true when SNAT takes place (for the reply direction).
  *
  * There are two cases to consider:
  * 1. The packet was DNAT'ed to a device in the same bridge
@@ -421,7 +408,7 @@ static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb)
                nf_bridge->pkt_otherhost = false;
        }
        nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
-       if (dnat_took_place(skb)) {
+       if (daddr_was_changed(skb, nf_bridge)) {
                if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
                        struct in_device *in_dev = __in_dev_get_rcu(dev);
 
@@ -632,6 +619,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
                                      const struct nf_hook_state *state)
 {
+       struct nf_bridge_info *nf_bridge;
        struct net_bridge_port *p;
        struct net_bridge *br;
        __u32 len = nf_bridge_encap_header_len(skb);
@@ -669,6 +657,9 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
        if (!setup_pre_routing(skb))
                return NF_DROP;
 
+       nf_bridge = nf_bridge_info_get(skb);
+       nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
+
        skb->protocol = htons(ETH_P_IP);
 
        NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->sk, skb,
index 4fcaa67..7caf7fa 100644 (file)
@@ -97,7 +97,9 @@ static void br_forward_delay_timer_expired(unsigned long arg)
                netif_carrier_on(br->dev);
        }
        br_log_state(p);
+       rcu_read_lock();
        br_ifinfo_notify(RTM_NEWLINK, p);
+       rcu_read_unlock();
        spin_unlock(&br->lock);
 }
 
index 4ec0c80..112ad78 100644 (file)
@@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
                release_sock(sk);
                timeo = schedule_timeout(timeo);
                lock_sock(sk);
+
+               if (sock_flag(sk, SOCK_DEAD))
+                       break;
+
                clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
        }
 
@@ -373,6 +377,10 @@ static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg,
                struct sk_buff *skb;
 
                lock_sock(sk);
+               if (sock_flag(sk, SOCK_DEAD)) {
+                       err = -ECONNRESET;
+                       goto unlock;
+               }
                skb = skb_dequeue(&sk->sk_receive_queue);
                caif_check_flow_release(sk);
 
index 41a4abc..c4ec923 100644 (file)
@@ -1306,8 +1306,6 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
                if (list_empty(&req->r_osd_item))
                        req->r_osd = NULL;
        }
-
-       list_del_init(&req->r_req_lru_item); /* can be on notarget */
        ceph_osdc_put_request(req);
 }
 
@@ -2017,20 +2015,29 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
                err = __map_request(osdc, req,
                                    force_resend || force_resend_writes);
                dout("__map_request returned %d\n", err);
-               if (err == 0)
-                       continue;  /* no change and no osd was specified */
                if (err < 0)
                        continue;  /* hrm! */
-               if (req->r_osd == NULL) {
-                       dout("tid %llu maps to no valid osd\n", req->r_tid);
-                       needmap++;  /* request a newer map */
-                       continue;
-               }
+               if (req->r_osd == NULL || err > 0) {
+                       if (req->r_osd == NULL) {
+                               dout("lingering %p tid %llu maps to no osd\n",
+                                    req, req->r_tid);
+                               /*
+                                * A homeless lingering request makes
+                                * no sense, as it's job is to keep
+                                * a particular OSD connection open.
+                                * Request a newer map and kick the
+                                * request, knowing that it won't be
+                                * resent until we actually get a map
+                                * that can tell us where to send it.
+                                */
+                               needmap++;
+                       }
 
-               dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
-                    req->r_osd ? req->r_osd->o_osd : -1);
-               __register_request(osdc, req);
-               __unregister_linger_request(osdc, req);
+                       dout("kicking lingering %p tid %llu osd%d\n", req,
+                            req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
+                       __register_request(osdc, req);
+                       __unregister_linger_request(osdc, req);
+               }
        }
        reset_changed_osds(osdc);
        mutex_unlock(&osdc->request_mutex);
index 666e092..8de3682 100644 (file)
@@ -2416,6 +2416,9 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
 {
        struct sk_buff *skb;
 
+       if (dev->reg_state != NETREG_REGISTERED)
+               return;
+
        skb = rtmsg_ifinfo_build_skb(type, dev, change, flags);
        if (skb)
                rtmsg_ifinfo_send(skb, dev, flags);
index e6f6cc3..392e29a 100644 (file)
@@ -359,7 +359,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
         */
        ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
        if (ds == NULL)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        ds->dst = dst;
        ds->index = index;
@@ -370,7 +370,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
 
        ret = dsa_switch_setup_one(ds, parent);
        if (ret)
-               return NULL;
+               return ERR_PTR(ret);
 
        return ds;
 }
index 421a80b..30b544f 100644 (file)
@@ -256,7 +256,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
        aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
        aead_givcrypt_set_assoc(req, asg, assoclen);
        aead_givcrypt_set_giv(req, esph->enc_data,
-                             XFRM_SKB_CB(skb)->seq.output.low);
+                             XFRM_SKB_CB(skb)->seq.output.low +
+                             ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
 
        ESP_SKB_CB(skb)->tmp = tmp;
        err = crypto_aead_givencrypt(req);
index e13fcc6..09b62e1 100644 (file)
@@ -1164,6 +1164,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
                        state = fa->fa_state;
                        new_fa->fa_state = state & ~FA_S_ACCESSED;
                        new_fa->fa_slen = fa->fa_slen;
+                       new_fa->tb_id = tb->tb_id;
 
                        err = netdev_switch_fib_ipv4_add(key, plen, fi,
                                                         new_fa->fa_tos,
@@ -1764,7 +1765,7 @@ void fib_table_flush_external(struct fib_table *tb)
                        /* record local slen */
                        slen = fa->fa_slen;
 
-                       if (!fi || !(fi->fib_flags & RTNH_F_EXTERNAL))
+                       if (!fi || !(fi->fib_flags & RTNH_F_OFFLOAD))
                                continue;
 
                        netdev_switch_fib_ipv4_del(n->key,
index 9f7269f..0c15208 100644 (file)
@@ -65,7 +65,6 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
                        goto drop;
 
                XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
-               skb->mark = be32_to_cpu(tunnel->parms.i_key);
 
                return xfrm_input(skb, nexthdr, spi, encap_type);
        }
@@ -91,6 +90,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
        struct pcpu_sw_netstats *tstats;
        struct xfrm_state *x;
        struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
+       u32 orig_mark = skb->mark;
+       int ret;
 
        if (!tunnel)
                return 1;
@@ -107,7 +108,11 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
        x = xfrm_input_state(skb);
        family = x->inner_mode->afinfo->family;
 
-       if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+       skb->mark = be32_to_cpu(tunnel->parms.i_key);
+       ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
+       skb->mark = orig_mark;
+
+       if (!ret)
                return -EPERM;
 
        skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
@@ -216,8 +221,6 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 
        memset(&fl, 0, sizeof(fl));
 
-       skb->mark = be32_to_cpu(tunnel->parms.o_key);
-
        switch (skb->protocol) {
        case htons(ETH_P_IP):
                xfrm_decode_session(skb, &fl, AF_INET);
@@ -233,6 +236,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
+       /* override mark with tunnel output key */
+       fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
+
        return vti_xmit(skb, dev, &fl);
 }
 
index 13bfe84..a612007 100644 (file)
@@ -1075,6 +1075,9 @@ static int do_replace(struct net *net, const void __user *user,
        /* overflow check */
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       if (tmp.num_counters == 0)
+               return -EINVAL;
+
        tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
@@ -1499,6 +1502,9 @@ static int compat_do_replace(struct net *net, void __user *user,
                return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       if (tmp.num_counters == 0)
+               return -EINVAL;
+
        tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
index c69db7f..2d0e265 100644 (file)
@@ -1262,6 +1262,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
        /* overflow check */
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       if (tmp.num_counters == 0)
+               return -EINVAL;
+
        tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
@@ -1809,6 +1812,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
                return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       if (tmp.num_counters == 0)
+               return -EINVAL;
+
        tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
index bff62fc..f45f2a1 100644 (file)
@@ -902,6 +902,10 @@ static int ip_error(struct sk_buff *skb)
        bool send;
        int code;
 
+       /* IP on this device is disabled. */
+       if (!in_dev)
+               goto out;
+
        net = dev_net(rt->dst.dev);
        if (!IN_DEV_FORWARD(in_dev)) {
                switch (rt->dst.error) {
index 46efa03..f1377f2 100644 (file)
@@ -402,6 +402,7 @@ void tcp_init_sock(struct sock *sk)
        tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
        tp->snd_cwnd_clamp = ~0;
        tp->mss_cache = TCP_MSS_DEFAULT;
+       u64_stats_init(&tp->syncp);
 
        tp->reordering = sysctl_tcp_reordering;
        tcp_enable_early_retrans(tp);
@@ -2598,6 +2599,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        const struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 now = tcp_time_stamp;
+       unsigned int start;
        u32 rate;
 
        memset(info, 0, sizeof(*info));
@@ -2665,10 +2667,11 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        rate = READ_ONCE(sk->sk_max_pacing_rate);
        info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
 
-       spin_lock_bh(&sk->sk_lock.slock);
-       info->tcpi_bytes_acked = tp->bytes_acked;
-       info->tcpi_bytes_received = tp->bytes_received;
-       spin_unlock_bh(&sk->sk_lock.slock);
+       do {
+               start = u64_stats_fetch_begin_irq(&tp->syncp);
+               info->tcpi_bytes_acked = tp->bytes_acked;
+               info->tcpi_bytes_received = tp->bytes_received;
+       } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
 }
 EXPORT_SYMBOL_GPL(tcp_get_info);
 
index 7a5ae50..84be008 100644 (file)
@@ -187,6 +187,7 @@ static void tcp_reinit_congestion_control(struct sock *sk,
 
        tcp_cleanup_congestion_control(sk);
        icsk->icsk_ca_ops = ca;
+       icsk->icsk_ca_setsockopt = 1;
 
        if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
                icsk->icsk_ca_ops->init(sk);
@@ -335,8 +336,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
        rcu_read_lock();
        ca = __tcp_ca_find_autoload(name);
        /* No change asking for existing value */
-       if (ca == icsk->icsk_ca_ops)
+       if (ca == icsk->icsk_ca_ops) {
+               icsk->icsk_ca_setsockopt = 1;
                goto out;
+       }
        if (!ca)
                err = -ENOENT;
        else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
index 3c673d5..46b087a 100644 (file)
@@ -206,6 +206,10 @@ static bool tcp_fastopen_create_child(struct sock *sk,
                        skb_set_owner_r(skb2, child);
                        __skb_queue_tail(&child->sk_receive_queue, skb2);
                        tp->syn_data_acked = 1;
+
+                       /* u64_stats_update_begin(&tp->syncp) not needed here,
+                        * as we certainly are not changing upper 32bit value (0)
+                        */
                        tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1;
                } else {
                        end_seq = TCP_SKB_CB(skb)->seq + 1;
index bc790ea..c9ab964 100644 (file)
@@ -2698,16 +2698,21 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
        struct tcp_sock *tp = tcp_sk(sk);
        bool recovered = !before(tp->snd_una, tp->high_seq);
 
+       if ((flag & FLAG_SND_UNA_ADVANCED) &&
+           tcp_try_undo_loss(sk, false))
+               return;
+
        if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
                /* Step 3.b. A timeout is spurious if not all data are
                 * lost, i.e., never-retransmitted data are (s)acked.
                 */
-               if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED))
+               if ((flag & FLAG_ORIG_SACK_ACKED) &&
+                   tcp_try_undo_loss(sk, true))
                        return;
 
-               if (after(tp->snd_nxt, tp->high_seq) &&
-                   (flag & FLAG_DATA_SACKED || is_dupack)) {
-                       tp->frto = 0; /* Loss was real: 2nd part of step 3.a */
+               if (after(tp->snd_nxt, tp->high_seq)) {
+                       if (flag & FLAG_DATA_SACKED || is_dupack)
+                               tp->frto = 0; /* Step 3.a. loss was real */
                } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
                        tp->high_seq = tp->snd_nxt;
                        __tcp_push_pending_frames(sk, tcp_current_mss(sk),
@@ -2732,8 +2737,6 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
                else if (flag & FLAG_SND_UNA_ADVANCED)
                        tcp_reset_reno_sack(tp);
        }
-       if (tcp_try_undo_loss(sk, false))
-               return;
        tcp_xmit_retransmit_queue(sk);
 }
 
@@ -3283,7 +3286,9 @@ static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
 {
        u32 delta = ack - tp->snd_una;
 
+       u64_stats_update_begin(&tp->syncp);
        tp->bytes_acked += delta;
+       u64_stats_update_end(&tp->syncp);
        tp->snd_una = ack;
 }
 
@@ -3292,7 +3297,9 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
 {
        u32 delta = seq - tp->rcv_nxt;
 
+       u64_stats_update_begin(&tp->syncp);
        tp->bytes_received += delta;
+       u64_stats_update_end(&tp->syncp);
        tp->rcv_nxt = seq;
 }
 
index e5d7649..17e7339 100644 (file)
@@ -300,7 +300,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                        tw->tw_v6_daddr = sk->sk_v6_daddr;
                        tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
                        tw->tw_tclass = np->tclass;
-                       tw->tw_flowlabel = np->flow_label >> 12;
+                       tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
                        tw->tw_ipv6only = sk->sk_ipv6only;
                }
 #endif
@@ -420,7 +420,10 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
                rcu_read_unlock();
        }
 
-       if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner))
+       /* If no valid choice made yet, assign current system default ca. */
+       if (!ca_got_dst &&
+           (!icsk->icsk_ca_setsockopt ||
+            !try_module_get(icsk->icsk_ca_ops->owner)))
                tcp_assign_congestion_control(sk);
 
        tcp_set_ca_state(sk, TCP_CA_Open);
index d10b7e0..1c92ea6 100644 (file)
@@ -1345,10 +1345,8 @@ csum_copy_err:
        }
        unlock_sock_fast(sk, slow);
 
-       if (noblock)
-               return -EAGAIN;
-
-       /* starting over for a new packet */
+       /* starting over for a new packet, but check if we need to yield */
+       cond_resched();
        msg->msg_flags &= ~MSG_TRUNC;
        goto try_again;
 }
index 31f1b5d..7c07ce3 100644 (file)
@@ -248,7 +248,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
        aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
        aead_givcrypt_set_assoc(req, asg, assoclen);
        aead_givcrypt_set_giv(req, esph->enc_data,
-                             XFRM_SKB_CB(skb)->seq.output.low);
+                             XFRM_SKB_CB(skb)->seq.output.low +
+                             ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
 
        ESP_SKB_CB(skb)->tmp = tmp;
        err = crypto_aead_givencrypt(req);
index 96dbfff..bde57b1 100644 (file)
@@ -693,6 +693,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 {
        struct rt6_info *iter = NULL;
        struct rt6_info **ins;
+       struct rt6_info **fallback_ins = NULL;
        int replace = (info->nlh &&
                       (info->nlh->nlmsg_flags & NLM_F_REPLACE));
        int add = (!info->nlh ||
@@ -716,8 +717,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
                            (info->nlh->nlmsg_flags & NLM_F_EXCL))
                                return -EEXIST;
                        if (replace) {
-                               found++;
-                               break;
+                               if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
+                                       found++;
+                                       break;
+                               }
+                               if (rt_can_ecmp)
+                                       fallback_ins = fallback_ins ?: ins;
+                               goto next_iter;
                        }
 
                        if (iter->dst.dev == rt->dst.dev &&
@@ -753,9 +759,17 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
                if (iter->rt6i_metric > rt->rt6i_metric)
                        break;
 
+next_iter:
                ins = &iter->dst.rt6_next;
        }
 
+       if (fallback_ins && !found) {
+               /* No ECMP-able route found, replace first non-ECMP one */
+               ins = fallback_ins;
+               iter = *ins;
+               found++;
+       }
+
        /* Reset round-robin state, if necessary */
        if (ins == &fn->leaf)
                fn->rr_ptr = NULL;
@@ -815,6 +829,8 @@ add:
                }
 
        } else {
+               int nsiblings;
+
                if (!found) {
                        if (add)
                                goto add;
@@ -835,8 +851,27 @@ add:
                        info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
                        fn->fn_flags |= RTN_RTINFO;
                }
+               nsiblings = iter->rt6i_nsiblings;
                fib6_purge_rt(iter, fn, info->nl_net);
                rt6_release(iter);
+
+               if (nsiblings) {
+                       /* Replacing an ECMP route, remove all siblings */
+                       ins = &rt->dst.rt6_next;
+                       iter = *ins;
+                       while (iter) {
+                               if (rt6_qualify_for_ecmp(iter)) {
+                                       *ins = iter->dst.rt6_next;
+                                       fib6_purge_rt(iter, fn, info->nl_net);
+                                       rt6_release(iter);
+                                       nsiblings--;
+                               } else {
+                                       ins = &iter->dst.rt6_next;
+                               }
+                               iter = *ins;
+                       }
+                       WARN_ON(nsiblings != 0);
+               }
        }
 
        return 0;
index c217775..bc09cb9 100644 (file)
@@ -1300,8 +1300,10 @@ emsgsize:
 
        /* If this is the first and only packet and device
         * supports checksum offloading, let's use it.
+        * Use transhdrlen, same as IPv4, because partial
+        * sums only work when transhdrlen is set.
         */
-       if (!skb && sk->sk_protocol == IPPROTO_UDP &&
+       if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
            length + fragheaderlen < mtu &&
            rt->dst.dev->features & NETIF_F_V6_CSUM &&
            !exthdrlen)
index ed9d681..0224c03 100644 (file)
@@ -322,7 +322,6 @@ static int vti6_rcv(struct sk_buff *skb)
                }
 
                XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
-               skb->mark = be32_to_cpu(t->parms.i_key);
 
                rcu_read_unlock();
 
@@ -342,6 +341,8 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
        struct pcpu_sw_netstats *tstats;
        struct xfrm_state *x;
        struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6;
+       u32 orig_mark = skb->mark;
+       int ret;
 
        if (!t)
                return 1;
@@ -358,7 +359,11 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
        x = xfrm_input_state(skb);
        family = x->inner_mode->afinfo->family;
 
-       if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+       skb->mark = be32_to_cpu(t->parms.i_key);
+       ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
+       skb->mark = orig_mark;
+
+       if (!ret)
                return -EPERM;
 
        skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev)));
@@ -430,6 +435,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        struct net_device *tdev;
        struct xfrm_state *x;
        int err = -1;
+       int mtu;
 
        if (!dst)
                goto tx_err_link_failure;
@@ -463,6 +469,19 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
 
+       mtu = dst_mtu(dst);
+       if (!skb->ignore_df && skb->len > mtu) {
+               skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
+
+               if (skb->protocol == htons(ETH_P_IPV6))
+                       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+               else
+                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+                                 htonl(mtu));
+
+               return -EMSGSIZE;
+       }
+
        err = dst_output(skb);
        if (net_xmit_eval(err) == 0) {
                struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
@@ -495,7 +514,6 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        int ret;
 
        memset(&fl, 0, sizeof(fl));
-       skb->mark = be32_to_cpu(t->parms.o_key);
 
        switch (skb->protocol) {
        case htons(ETH_P_IPV6):
@@ -516,6 +534,9 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                goto tx_err;
        }
 
+       /* override mark with tunnel output key */
+       fl.flowi_mark = be32_to_cpu(t->parms.o_key);
+
        ret = vti6_xmit(skb, dev, &fl);
        if (ret < 0)
                goto tx_err;
index 1a732a1..62f5b0d 100644 (file)
@@ -1275,6 +1275,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
        /* overflow check */
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       if (tmp.num_counters == 0)
+               return -EINVAL;
+
        tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
@@ -1822,6 +1825,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
                return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       if (tmp.num_counters == 0)
+               return -EINVAL;
+
        tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
index d358888..c73ae50 100644 (file)
@@ -2504,9 +2504,9 @@ static int ip6_route_multipath(struct fib6_config *cfg, int add)
        int attrlen;
        int err = 0, last_err = 0;
 
+       remaining = cfg->fc_mp_len;
 beginning:
        rtnh = (struct rtnexthop *)cfg->fc_mp;
-       remaining = cfg->fc_mp_len;
 
        /* Parse a Multipath Entry */
        while (rtnh_ok(rtnh, remaining)) {
@@ -2536,15 +2536,19 @@ beginning:
                                 * next hops that have been already added.
                                 */
                                add = 0;
+                               remaining = cfg->fc_mp_len - remaining;
                                goto beginning;
                        }
                }
                /* Because each route is added like a single route we remove
-                * this flag after the first nexthop (if there is a collision,
-                * we have already fail to add the first nexthop:
-                * fib6_add_rt2node() has reject it).
+                * these flags after the first nexthop: if there is a collision,
+                * we have already failed to add the first nexthop:
+                * fib6_add_rt2node() has rejected it; when replacing, old
+                * nexthops have been replaced by first new, the rest should
+                * be added to it.
                 */
-               cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL;
+               cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
+                                                    NLM_F_REPLACE);
                rtnh = rtnh_next(rtnh, &remaining);
        }
 
index b6575d6..3adffb3 100644 (file)
@@ -914,7 +914,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
                        tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
                        tcp_time_stamp + tcptw->tw_ts_offset,
                        tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
-                       tw->tw_tclass, (tw->tw_flowlabel << 12));
+                       tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
 
        inet_twsk_put(tw);
 }
index 3477c91..e51fc3e 100644 (file)
@@ -525,10 +525,8 @@ csum_copy_err:
        }
        unlock_sock_fast(sk, slow);
 
-       if (noblock)
-               return -EAGAIN;
-
-       /* starting over for a new packet */
+       /* starting over for a new packet, but check if we need to yield */
+       cond_resched();
        msg->msg_flags &= ~MSG_TRUNC;
        goto try_again;
 }
@@ -731,7 +729,9 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
            (inet->inet_dport && inet->inet_dport != rmt_port) ||
            (!ipv6_addr_any(&sk->sk_v6_daddr) &&
                    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
-           (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+           (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
+           (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
+                   !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
                return false;
        if (!inet6_mc_check(sk, loc_addr, rmt_addr))
                return false;
index 265e427..ff347a0 100644 (file)
@@ -2495,51 +2495,22 @@ static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local,
                                           struct ieee80211_roc_work *new_roc,
                                           struct ieee80211_roc_work *cur_roc)
 {
-       unsigned long j = jiffies;
-       unsigned long cur_roc_end = cur_roc->hw_start_time +
-                                   msecs_to_jiffies(cur_roc->duration);
-       struct ieee80211_roc_work *next_roc;
-       int new_dur;
+       unsigned long now = jiffies;
+       unsigned long remaining = cur_roc->hw_start_time +
+                                 msecs_to_jiffies(cur_roc->duration) -
+                                 now;
 
        if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun))
                return false;
 
-       if (time_after(j + IEEE80211_ROC_MIN_LEFT, cur_roc_end))
+       /* if it doesn't fit entirely, schedule a new one */
+       if (new_roc->duration > jiffies_to_msecs(remaining))
                return false;
 
        ieee80211_handle_roc_started(new_roc);
 
-       new_dur = new_roc->duration - jiffies_to_msecs(cur_roc_end - j);
-
-       /* cur_roc is long enough - add new_roc to the dependents list. */
-       if (new_dur <= 0) {
-               list_add_tail(&new_roc->list, &cur_roc->dependents);
-               return true;
-       }
-
-       new_roc->duration = new_dur;
-
-       /*
-        * if cur_roc was already coalesced before, we might
-        * want to extend the next roc instead of adding
-        * a new one.
-        */
-       next_roc = list_entry(cur_roc->list.next,
-                             struct ieee80211_roc_work, list);
-       if (&next_roc->list != &local->roc_list &&
-           next_roc->chan == new_roc->chan &&
-           next_roc->sdata == new_roc->sdata &&
-           !WARN_ON(next_roc->started)) {
-               list_add_tail(&new_roc->list, &next_roc->dependents);
-               next_roc->duration = max(next_roc->duration,
-                                        new_roc->duration);
-               next_roc->type = max(next_roc->type, new_roc->type);
-               return true;
-       }
-
-       /* add right after cur_roc */
-       list_add(&new_roc->list, &cur_roc->list);
-
+       /* add to dependents so we send the expired event properly */
+       list_add_tail(&new_roc->list, &cur_roc->dependents);
        return true;
 }
 
@@ -2652,17 +2623,9 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
                         * In the offloaded ROC case, if it hasn't begun, add
                         * this new one to the dependent list to be handled
                         * when the master one begins. If it has begun,
-                        * check that there's still a minimum time left and
-                        * if so, start this one, transmitting the frame, but
-                        * add it to the list directly after this one with
-                        * a reduced time so we'll ask the driver to execute
-                        * it right after finishing the previous one, in the
-                        * hope that it'll also be executed right afterwards,
-                        * effectively extending the old one.
-                        * If there's no minimum time left, just add it to the
-                        * normal list.
-                        * TODO: the ROC type is ignored here, assuming that it
-                        * is better to immediately use the current ROC.
+                        * check if it fits entirely within the existing one,
+                        * in which case it will just be dependent as well.
+                        * Otherwise, schedule it by itself.
                         */
                        if (!tmp->hw_begun) {
                                list_add_tail(&roc->list, &tmp->dependents);
index ab46ab4..c0a9187 100644 (file)
@@ -205,6 +205,8 @@ enum ieee80211_packet_rx_flags {
  * @IEEE80211_RX_CMNTR: received on cooked monitor already
  * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
  *     to cfg80211_report_obss_beacon().
+ * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
+ *     reorder buffer timeout timer, not the normal RX path
  *
  * These flags are used across handling multiple interfaces
  * for a single frame.
@@ -212,6 +214,7 @@ enum ieee80211_packet_rx_flags {
 enum ieee80211_rx_flags {
        IEEE80211_RX_CMNTR              = BIT(0),
        IEEE80211_RX_BEACON_REPORTED    = BIT(1),
+       IEEE80211_RX_REORDER_TIMER      = BIT(2),
 };
 
 struct ieee80211_rx_data {
@@ -325,12 +328,6 @@ struct mesh_preq_queue {
        u8 flags;
 };
 
-#if HZ/100 == 0
-#define IEEE80211_ROC_MIN_LEFT 1
-#else
-#define IEEE80211_ROC_MIN_LEFT (HZ/100)
-#endif
-
 struct ieee80211_roc_work {
        struct list_head list;
        struct list_head dependents;
index bab5c63..84cef60 100644 (file)
@@ -522,6 +522,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
                memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
                       sizeof(sdata->vif.hw_queue));
                sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
+
+               mutex_lock(&local->key_mtx);
+               sdata->crypto_tx_tailroom_needed_cnt +=
+                       master->crypto_tx_tailroom_needed_cnt;
+               mutex_unlock(&local->key_mtx);
+
                break;
                }
        case NL80211_IFTYPE_AP:
index 2291cd7..a907f2d 100644 (file)
@@ -58,6 +58,22 @@ static void assert_key_lock(struct ieee80211_local *local)
        lockdep_assert_held(&local->key_mtx);
 }
 
+static void
+update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta)
+{
+       struct ieee80211_sub_if_data *vlan;
+
+       if (sdata->vif.type != NL80211_IFTYPE_AP)
+               return;
+
+       mutex_lock(&sdata->local->mtx);
+
+       list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+               vlan->crypto_tx_tailroom_needed_cnt += delta;
+
+       mutex_unlock(&sdata->local->mtx);
+}
+
 static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
 {
        /*
@@ -79,6 +95,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
         * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
         */
 
+       update_vlan_tailroom_need_count(sdata, 1);
+
        if (!sdata->crypto_tx_tailroom_needed_cnt++) {
                /*
                 * Flush all XMIT packets currently using HW encryption or no
@@ -88,6 +106,15 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
        }
 }
 
+static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
+                                        int delta)
+{
+       WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
+
+       update_vlan_tailroom_need_count(sdata, -delta);
+       sdata->crypto_tx_tailroom_needed_cnt -= delta;
+}
+
 static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 {
        struct ieee80211_sub_if_data *sdata;
@@ -144,7 +171,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 
                if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
                      (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
-                       sdata->crypto_tx_tailroom_needed_cnt--;
+                       decrease_tailroom_need_count(sdata, 1);
 
                WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
                        (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
@@ -541,7 +568,7 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key,
                        schedule_delayed_work(&sdata->dec_tailroom_needed_wk,
                                              HZ/2);
                } else {
-                       sdata->crypto_tx_tailroom_needed_cnt--;
+                       decrease_tailroom_need_count(sdata, 1);
                }
        }
 
@@ -631,6 +658,7 @@ void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom)
 void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_key *key;
+       struct ieee80211_sub_if_data *vlan;
 
        ASSERT_RTNL();
 
@@ -639,7 +667,14 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
 
        mutex_lock(&sdata->local->key_mtx);
 
-       sdata->crypto_tx_tailroom_needed_cnt = 0;
+       WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
+                    sdata->crypto_tx_tailroom_pending_dec);
+
+       if (sdata->vif.type == NL80211_IFTYPE_AP) {
+               list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+                       WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
+                                    vlan->crypto_tx_tailroom_pending_dec);
+       }
 
        list_for_each_entry(key, &sdata->key_list, list) {
                increment_tailroom_need_count(sdata);
@@ -649,6 +684,22 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
        mutex_unlock(&sdata->local->key_mtx);
 }
 
+void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_sub_if_data *vlan;
+
+       mutex_lock(&sdata->local->key_mtx);
+
+       sdata->crypto_tx_tailroom_needed_cnt = 0;
+
+       if (sdata->vif.type == NL80211_IFTYPE_AP) {
+               list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+                       vlan->crypto_tx_tailroom_needed_cnt = 0;
+       }
+
+       mutex_unlock(&sdata->local->key_mtx);
+}
+
 void ieee80211_iter_keys(struct ieee80211_hw *hw,
                         struct ieee80211_vif *vif,
                         void (*iter)(struct ieee80211_hw *hw,
@@ -688,8 +739,8 @@ static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_key *key, *tmp;
 
-       sdata->crypto_tx_tailroom_needed_cnt -=
-               sdata->crypto_tx_tailroom_pending_dec;
+       decrease_tailroom_need_count(sdata,
+                                    sdata->crypto_tx_tailroom_pending_dec);
        sdata->crypto_tx_tailroom_pending_dec = 0;
 
        ieee80211_debugfs_key_remove_mgmt_default(sdata);
@@ -709,6 +760,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_sub_if_data *vlan;
+       struct ieee80211_sub_if_data *master;
        struct ieee80211_key *key, *tmp;
        LIST_HEAD(keys);
 
@@ -728,8 +780,20 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
        list_for_each_entry_safe(key, tmp, &keys, list)
                __ieee80211_key_destroy(key, false);
 
-       WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
-                    sdata->crypto_tx_tailroom_pending_dec);
+       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+               if (sdata->bss) {
+                       master = container_of(sdata->bss,
+                                             struct ieee80211_sub_if_data,
+                                             u.ap);
+
+                       WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt !=
+                                    master->crypto_tx_tailroom_needed_cnt);
+               }
+       } else {
+               WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
+                            sdata->crypto_tx_tailroom_pending_dec);
+       }
+
        if (sdata->vif.type == NL80211_IFTYPE_AP) {
                list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
                        WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
@@ -793,8 +857,8 @@ void ieee80211_delayed_tailroom_dec(struct work_struct *wk)
         */
 
        mutex_lock(&sdata->local->key_mtx);
-       sdata->crypto_tx_tailroom_needed_cnt -=
-               sdata->crypto_tx_tailroom_pending_dec;
+       decrease_tailroom_need_count(sdata,
+                                    sdata->crypto_tx_tailroom_pending_dec);
        sdata->crypto_tx_tailroom_pending_dec = 0;
        mutex_unlock(&sdata->local->key_mtx);
 }
index c5a3183..96557dd 100644 (file)
@@ -161,6 +161,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
 void ieee80211_free_sta_keys(struct ieee80211_local *local,
                             struct sta_info *sta);
 void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
+void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata);
 
 #define key_mtx_dereference(local, ref) \
        rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))
index 260eed4..5793f75 100644 (file)
@@ -2121,7 +2121,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
                /* deliver to local stack */
                skb->protocol = eth_type_trans(skb, dev);
                memset(skb->cb, 0, sizeof(skb->cb));
-               if (rx->local->napi)
+               if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
+                   rx->local->napi)
                        napi_gro_receive(rx->local->napi, skb);
                else
                        netif_receive_skb(skb);
@@ -3231,7 +3232,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
                /* This is OK -- must be QoS data frame */
                .security_idx = tid,
                .seqno_idx = tid,
-               .flags = 0,
+               .flags = IEEE80211_RX_REORDER_TIMER,
        };
        struct tid_ampdu_rx *tid_agg_rx;
 
index 79412f1..b864ebc 100644 (file)
@@ -2023,6 +2023,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
 
        /* add back keys */
        list_for_each_entry(sdata, &local->interfaces, list)
+               ieee80211_reset_crypto_tx_tailroom(sdata);
+
+       list_for_each_entry(sdata, &local->interfaces, list)
                if (ieee80211_sdata_running(sdata))
                        ieee80211_enable_keys(sdata);
 
index a4220e9..efa3f48 100644 (file)
@@ -98,8 +98,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
 
        hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
 
-       if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN ||
-                   skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
+       if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
                return NULL;
 
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -167,6 +166,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
        size_t len;
        u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
 
+       if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN))
+               return -1;
+
        iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
        if (!iv)
                return -1;
index f70e34a..a0f3e6a 100644 (file)
@@ -863,6 +863,7 @@ config NETFILTER_XT_TARGET_TPROXY
        depends on NETFILTER_XTABLES
        depends on NETFILTER_ADVANCED
        depends on (IPV6 || IPV6=n)
+       depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
        depends on IP_NF_MANGLE
        select NF_DEFRAG_IPV4
        select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
@@ -1356,6 +1357,7 @@ config NETFILTER_XT_MATCH_SOCKET
        depends on NETFILTER_ADVANCED
        depends on !NF_CONNTRACK || NF_CONNTRACK
        depends on (IPV6 || IPV6=n)
+       depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
        select NF_DEFRAG_IPV4
        select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
        help
index 4953267..285eae3 100644 (file)
@@ -3823,6 +3823,9 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
        cancel_work_sync(&ipvs->defense_work.work);
        unregister_net_sysctl_table(ipvs->sysctl_hdr);
        ip_vs_stop_estimator(net, &ipvs->tot_stats);
+
+       if (!net_eq(net, &init_net))
+               kfree(ipvs->sysctl_tbl);
 }
 
 #else
index 5caa0c4..70383de 100644 (file)
@@ -202,7 +202,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
  *     sES -> sES      :-)
  *     sFW -> sCW      Normal close request answered by ACK.
  *     sCW -> sCW
- *     sLA -> sTW      Last ACK detected.
+ *     sLA -> sTW      Last ACK detected (RFC5961 challenged)
  *     sTW -> sTW      Retransmitted last ACK. Remain in the same state.
  *     sCL -> sCL
  */
@@ -261,7 +261,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
  *     sES -> sES      :-)
  *     sFW -> sCW      Normal close request answered by ACK.
  *     sCW -> sCW
- *     sLA -> sTW      Last ACK detected.
+ *     sLA -> sTW      Last ACK detected (RFC5961 challenged)
  *     sTW -> sTW      Retransmitted last ACK.
  *     sCL -> sCL
  */
@@ -906,6 +906,7 @@ static int tcp_packet(struct nf_conn *ct,
                                        1 : ct->proto.tcp.last_win;
                        ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
                                ct->proto.tcp.last_wscale;
+                       ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
                        ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
                                ct->proto.tcp.last_flags;
                        memset(&ct->proto.tcp.seen[dir], 0,
@@ -923,7 +924,9 @@ static int tcp_packet(struct nf_conn *ct,
                 * may be in sync but we are not. In that case, we annotate
                 * the TCP options and let the packet go through. If it is a
                 * valid SYN packet, the server will reply with a SYN/ACK, and
-                * then we'll get in sync. Otherwise, the server ignores it. */
+                * then we'll get in sync. Otherwise, the server potentially
+                * responds with a challenge ACK if implementing RFC5961.
+                */
                if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
                        struct ip_ct_tcp_state seen = {};
 
@@ -939,6 +942,13 @@ static int tcp_packet(struct nf_conn *ct,
                                ct->proto.tcp.last_flags |=
                                        IP_CT_TCP_FLAG_SACK_PERM;
                        }
+                       /* Mark the potential for RFC5961 challenge ACK,
+                        * this pose a special problem for LAST_ACK state
+                        * as ACK is intrepretated as ACKing last FIN.
+                        */
+                       if (old_state == TCP_CONNTRACK_LAST_ACK)
+                               ct->proto.tcp.last_flags |=
+                                       IP_CT_EXP_CHALLENGE_ACK;
                }
                spin_unlock_bh(&ct->lock);
                if (LOG_INVALID(net, IPPROTO_TCP))
@@ -970,6 +980,25 @@ static int tcp_packet(struct nf_conn *ct,
                        nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
                                  "nf_ct_tcp: invalid state ");
                return -NF_ACCEPT;
+       case TCP_CONNTRACK_TIME_WAIT:
+               /* RFC5961 compliance cause stack to send "challenge-ACK"
+                * e.g. in response to spurious SYNs.  Conntrack MUST
+                * not believe this ACK is acking last FIN.
+                */
+               if (old_state == TCP_CONNTRACK_LAST_ACK &&
+                   index == TCP_ACK_SET &&
+                   ct->proto.tcp.last_dir != dir &&
+                   ct->proto.tcp.last_index == TCP_SYN_SET &&
+                   (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
+                       /* Detected RFC5961 challenge ACK */
+                       ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
+                       spin_unlock_bh(&ct->lock);
+                       if (LOG_INVALID(net, IPPROTO_TCP))
+                               nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
+                                     "nf_ct_tcp: challenge-ACK ignored ");
+                       return NF_ACCEPT; /* Don't change state */
+               }
+               break;
        case TCP_CONNTRACK_CLOSE:
                if (index == TCP_RST_SET
                    && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
index ad9d11f..34ded09 100644 (file)
@@ -4472,9 +4472,9 @@ EXPORT_SYMBOL_GPL(nft_data_init);
  */
 void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
 {
-       switch (type) {
-       case NFT_DATA_VALUE:
+       if (type < NFT_DATA_VERDICT)
                return;
+       switch (type) {
        case NFT_DATA_VERDICT:
                return nft_verdict_uninit(data);
        default:
index 3ad9126..4ef1fae 100644 (file)
@@ -1073,7 +1073,13 @@ static struct pernet_operations nfnl_log_net_ops = {
 
 static int __init nfnetlink_log_init(void)
 {
-       int status = -ENOMEM;
+       int status;
+
+       status = register_pernet_subsys(&nfnl_log_net_ops);
+       if (status < 0) {
+               pr_err("failed to register pernet ops\n");
+               goto out;
+       }
 
        netlink_register_notifier(&nfulnl_rtnl_notifier);
        status = nfnetlink_subsys_register(&nfulnl_subsys);
@@ -1088,28 +1094,23 @@ static int __init nfnetlink_log_init(void)
                goto cleanup_subsys;
        }
 
-       status = register_pernet_subsys(&nfnl_log_net_ops);
-       if (status < 0) {
-               pr_err("failed to register pernet ops\n");
-               goto cleanup_logger;
-       }
        return status;
 
-cleanup_logger:
-       nf_log_unregister(&nfulnl_logger);
 cleanup_subsys:
        nfnetlink_subsys_unregister(&nfulnl_subsys);
 cleanup_netlink_notifier:
        netlink_unregister_notifier(&nfulnl_rtnl_notifier);
+       unregister_pernet_subsys(&nfnl_log_net_ops);
+out:
        return status;
 }
 
 static void __exit nfnetlink_log_fini(void)
 {
-       unregister_pernet_subsys(&nfnl_log_net_ops);
        nf_log_unregister(&nfulnl_logger);
        nfnetlink_subsys_unregister(&nfulnl_subsys);
        netlink_unregister_notifier(&nfulnl_rtnl_notifier);
+       unregister_pernet_subsys(&nfnl_log_net_ops);
 }
 
 MODULE_DESCRIPTION("netfilter userspace logging");
index 0b98c74..11c7682 100644 (file)
@@ -1317,7 +1317,13 @@ static struct pernet_operations nfnl_queue_net_ops = {
 
 static int __init nfnetlink_queue_init(void)
 {
-       int status = -ENOMEM;
+       int status;
+
+       status = register_pernet_subsys(&nfnl_queue_net_ops);
+       if (status < 0) {
+               pr_err("nf_queue: failed to register pernet ops\n");
+               goto out;
+       }
 
        netlink_register_notifier(&nfqnl_rtnl_notifier);
        status = nfnetlink_subsys_register(&nfqnl_subsys);
@@ -1326,19 +1332,13 @@ static int __init nfnetlink_queue_init(void)
                goto cleanup_netlink_notifier;
        }
 
-       status = register_pernet_subsys(&nfnl_queue_net_ops);
-       if (status < 0) {
-               pr_err("nf_queue: failed to register pernet ops\n");
-               goto cleanup_subsys;
-       }
        register_netdevice_notifier(&nfqnl_dev_notifier);
        nf_register_queue_handler(&nfqh);
        return status;
 
-cleanup_subsys:
-       nfnetlink_subsys_unregister(&nfqnl_subsys);
 cleanup_netlink_notifier:
        netlink_unregister_notifier(&nfqnl_rtnl_notifier);
+out:
        return status;
 }
 
@@ -1346,9 +1346,9 @@ static void __exit nfnetlink_queue_fini(void)
 {
        nf_unregister_queue_handler();
        unregister_netdevice_notifier(&nfqnl_dev_notifier);
-       unregister_pernet_subsys(&nfnl_queue_net_ops);
        nfnetlink_subsys_unregister(&nfqnl_subsys);
        netlink_unregister_notifier(&nfqnl_rtnl_notifier);
+       unregister_pernet_subsys(&nfnl_queue_net_ops);
 
        rcu_barrier(); /* Wait for completion of call_rcu()'s */
 }
index daa0b81..bf6e766 100644 (file)
@@ -89,7 +89,7 @@ static inline int netlink_is_kernel(struct sock *sk)
        return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
 }
 
-struct netlink_table *nl_table;
+struct netlink_table *nl_table __read_mostly;
 EXPORT_SYMBOL_GPL(nl_table);
 
 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
@@ -1081,6 +1081,7 @@ static int netlink_insert(struct sock *sk, u32 portid)
        if (err) {
                if (err == -EEXIST)
                        err = -EADDRINUSE;
+               nlk_sk(sk)->portid = 0;
                sock_put(sk);
        }
 
index b6ef9a0..a75864d 100644 (file)
@@ -81,6 +81,11 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
        struct tcf_proto_ops *t;
        int rc = -ENOENT;
 
+       /* Wait for outstanding call_rcu()s, if any, from a
+        * tcf_proto_ops's destroy() handler.
+        */
+       rcu_barrier();
+
        write_lock(&cls_mod_lock);
        list_for_each_entry(t, &tcf_proto_base, head) {
                if (t == ops) {
index ad9eed7..1e1c89e 100644 (file)
@@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
                if (dev->flags & IFF_UP)
                        dev_deactivate(dev);
 
-               if (new && new->ops->attach) {
-                       new->ops->attach(new);
-                       num_q = 0;
-               }
+               if (new && new->ops->attach)
+                       goto skip;
 
                for (i = 0; i < num_q; i++) {
                        struct netdev_queue *dev_queue = dev_ingress_queue(dev);
@@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
                                qdisc_destroy(old);
                }
 
+skip:
                if (!ingress) {
                        notify_and_destroy(net, skb, n, classid,
                                           dev->qdisc, new);
                        if (new && !new->ops->attach)
                                atomic_inc(&new->refcnt);
                        dev->qdisc = new ? : &noop_qdisc;
+
+                       if (new && new->ops->attach)
+                               new->ops->attach(new);
                } else {
                        notify_and_destroy(net, skb, n, classid, old, new);
                }
index 46568b8..055453d 100644 (file)
@@ -338,7 +338,7 @@ int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
                                              fi, tos, type, nlflags,
                                              tb_id);
                if (!err)
-                       fi->fib_flags |= RTNH_F_EXTERNAL;
+                       fi->fib_flags |= RTNH_F_OFFLOAD;
        }
 
        return err;
@@ -364,7 +364,7 @@ int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
        const struct swdev_ops *ops;
        int err = 0;
 
-       if (!(fi->fib_flags & RTNH_F_EXTERNAL))
+       if (!(fi->fib_flags & RTNH_F_OFFLOAD))
                return 0;
 
        dev = netdev_switch_get_dev_by_nhs(fi);
@@ -376,7 +376,7 @@ int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
                err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len,
                                              fi, tos, type, tb_id);
                if (!err)
-                       fi->fib_flags &= ~RTNH_F_EXTERNAL;
+                       fi->fib_flags &= ~RTNH_F_OFFLOAD;
        }
 
        return err;
index 5266ea7..0643059 100644 (file)
@@ -1880,6 +1880,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
                unix_state_unlock(sk);
                timeo = freezable_schedule_timeout(timeo);
                unix_state_lock(sk);
+
+               if (sock_flag(sk, SOCK_DEAD))
+                       break;
+
                clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
        }
 
@@ -1939,6 +1943,10 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
                struct sk_buff *skb, *last;
 
                unix_state_lock(sk);
+               if (sock_flag(sk, SOCK_DEAD)) {
+                       err = -ECONNRESET;
+                       goto unlock;
+               }
                last = skb = skb_peek(&sk->sk_receive_queue);
 again:
                if (skb == NULL) {
index 526c4fe..b58286e 100644 (file)
@@ -13,6 +13,8 @@
 #include <net/dst.h>
 #include <net/ip.h>
 #include <net/xfrm.h>
+#include <net/ip_tunnels.h>
+#include <net/ip6_tunnel.h>
 
 static struct kmem_cache *secpath_cachep __read_mostly;
 
@@ -186,6 +188,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
        struct xfrm_state *x = NULL;
        xfrm_address_t *daddr;
        struct xfrm_mode *inner_mode;
+       u32 mark = skb->mark;
        unsigned int family;
        int decaps = 0;
        int async = 0;
@@ -203,6 +206,18 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                                   XFRM_SPI_SKB_CB(skb)->daddroff);
        family = XFRM_SPI_SKB_CB(skb)->family;
 
+       /* if tunnel is present override skb->mark value with tunnel i_key */
+       if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) {
+               switch (family) {
+               case AF_INET:
+                       mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
+                       break;
+               case AF_INET6:
+                       mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
+                       break;
+               }
+       }
+
        /* Allocate new secpath or COW existing one. */
        if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
                struct sec_path *sp;
@@ -229,7 +244,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                        goto drop;
                }
 
-               x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
+               x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
                if (x == NULL) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
                        xfrm_audit_state_notfound(skb, family, spi, seq);
index dab57da..4fd725a 100644 (file)
@@ -99,6 +99,7 @@ static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
 
        if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
                XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq;
+               XFRM_SKB_CB(skb)->seq.output.hi = 0;
                if (unlikely(x->replay.oseq == 0)) {
                        x->replay.oseq--;
                        xfrm_audit_state_replay_overflow(x, skb);
@@ -177,6 +178,7 @@ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
 
        if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
                XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq;
+               XFRM_SKB_CB(skb)->seq.output.hi = 0;
                if (unlikely(replay_esn->oseq == 0)) {
                        replay_esn->oseq--;
                        xfrm_audit_state_replay_overflow(x, skb);
index f5e39e3..96688cd 100644 (file)
@@ -927,8 +927,8 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
                        x->id.spi != spi)
                        continue;
 
-               spin_unlock_bh(&net->xfrm.xfrm_state_lock);
                xfrm_state_hold(x);
+               spin_unlock_bh(&net->xfrm.xfrm_state_lock);
                return x;
        }
        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
index a1504c4..25db8cf 100644 (file)
@@ -73,18 +73,11 @@ class LxLsmod(gdb.Command):
                 "        " if utils.get_long_type().sizeof == 8 else ""))
 
         for module in module_list():
-            ref = 0
-            module_refptr = module['refptr']
-            for cpu in cpus.cpu_list("cpu_possible_mask"):
-                refptr = cpus.per_cpu(module_refptr, cpu)
-                ref += refptr['incs']
-                ref -= refptr['decs']
-
             gdb.write("{address} {name:<19} {size:>8}  {ref}".format(
                 address=str(module['module_core']).split()[0],
                 name=module['name'].string(),
                 size=str(module['core_size']),
-                ref=str(ref)))
+                ref=str(module['refcnt']['counter'])))
 
             source_list = module['source_list']
             t = self._module_use_type.get_type().pointer()
index cf4cedf..6dad042 100644 (file)
@@ -916,7 +916,6 @@ static struct ac97c_platform_data *atmel_ac97c_probe_dt(struct device *dev)
 {
        struct ac97c_platform_data *pdata;
        struct device_node *node = dev->of_node;
-       const struct of_device_id *match;
 
        if (!node) {
                dev_err(dev, "Device does not have associated DT data\n");
index ac6b33f..7d45645 100644 (file)
@@ -339,7 +339,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
                if (delta > new_hw_ptr) {
                        /* check for double acknowledged interrupts */
                        hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
-                       if (hdelta > runtime->hw_ptr_buffer_jiffies/2) {
+                       if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
                                hw_base += runtime->buffer_size;
                                if (hw_base >= runtime->boundary) {
                                        hw_base = 0;
index 788f969..ac0db16 100644 (file)
@@ -844,8 +844,16 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
                        snd_hda_codec_write(codec, nid, 0,
                                            AC_VERB_SET_POWER_STATE, state);
                        changed = nid;
+                       /* all known codecs seem to be capable to handl
+                        * widgets state even in D3, so far.
+                        * if any new codecs need to restore the widget
+                        * states after D0 transition, call the function
+                        * below.
+                        */
+#if 0 /* disabled */
                        if (state == AC_PWRST_D0)
                                snd_hdac_regmap_sync_node(&codec->core, nid);
+#endif
                }
        }
        return changed;
@@ -4918,9 +4926,12 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
  dig_only:
        parse_digital(codec);
 
-       if (spec->power_down_unused || codec->power_save_node)
+       if (spec->power_down_unused || codec->power_save_node) {
                if (!codec->power_filter)
                        codec->power_filter = snd_hda_gen_path_power_filter;
+               if (!codec->patch_ops.stream_pm)
+                       codec->patch_ops.stream_pm = snd_hda_gen_stream_pm;
+       }
 
        if (!spec->no_analog && spec->beep_nid) {
                err = snd_hda_attach_beep_device(codec, spec->beep_nid);
index 34040d2..fea198c 100644 (file)
@@ -2089,6 +2089,8 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaab0),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaac8),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        /* VIA VT8251/VT8237A */
        { PCI_DEVICE(0x1106, 0x3288),
          .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
index f8f0dfb..78b719b 100644 (file)
@@ -968,6 +968,14 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
          .patch = patch_conexant_auto },
        { .id = 0x14f150b9, .name = "CX20665",
          .patch = patch_conexant_auto },
+       { .id = 0x14f150f1, .name = "CX20721",
+         .patch = patch_conexant_auto },
+       { .id = 0x14f150f2, .name = "CX20722",
+         .patch = patch_conexant_auto },
+       { .id = 0x14f150f3, .name = "CX20723",
+         .patch = patch_conexant_auto },
+       { .id = 0x14f150f4, .name = "CX20724",
+         .patch = patch_conexant_auto },
        { .id = 0x14f1510f, .name = "CX20751/2",
          .patch = patch_conexant_auto },
        { .id = 0x14f15110, .name = "CX20751/2",
@@ -1002,6 +1010,10 @@ MODULE_ALIAS("snd-hda-codec-id:14f150ab");
 MODULE_ALIAS("snd-hda-codec-id:14f150ac");
 MODULE_ALIAS("snd-hda-codec-id:14f150b8");
 MODULE_ALIAS("snd-hda-codec-id:14f150b9");
+MODULE_ALIAS("snd-hda-codec-id:14f150f1");
+MODULE_ALIAS("snd-hda-codec-id:14f150f2");
+MODULE_ALIAS("snd-hda-codec-id:14f150f3");
+MODULE_ALIAS("snd-hda-codec-id:14f150f4");
 MODULE_ALIAS("snd-hda-codec-id:14f1510f");
 MODULE_ALIAS("snd-hda-codec-id:14f15110");
 MODULE_ALIAS("snd-hda-codec-id:14f15111");
index e2afd53..4641684 100644 (file)
@@ -883,6 +883,8 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
        { 0x10ec0668, 0x1028, 0, "ALC3661" },
        { 0x10ec0275, 0x1028, 0, "ALC3260" },
        { 0x10ec0899, 0x1028, 0, "ALC3861" },
+       { 0x10ec0298, 0x1028, 0, "ALC3266" },
+       { 0x10ec0256, 0x1028, 0, "ALC3246" },
        { 0x10ec0670, 0x1025, 0, "ALC669X" },
        { 0x10ec0676, 0x1025, 0, "ALC679X" },
        { 0x10ec0282, 0x1043, 0, "ALC3229" },
@@ -3673,6 +3675,10 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
                alc_process_coef_fw(codec, coef0293);
                snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
                break;
+       case 0x10ec0662:
+               snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
+               snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+               break;
        case 0x10ec0668:
                alc_write_coef_idx(codec, 0x11, 0x0001);
                snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
@@ -3738,7 +3744,6 @@ static void alc_headset_mode_default(struct hda_codec *codec)
        case 0x10ec0288:
                alc_process_coef_fw(codec, coef0288);
                break;
-               break;
        case 0x10ec0292:
                alc_process_coef_fw(codec, coef0292);
                break;
@@ -4012,7 +4017,7 @@ static void alc_update_headset_mode(struct hda_codec *codec)
        if (new_headset_mode != ALC_HEADSET_MODE_MIC) {
                snd_hda_set_pin_ctl_cache(codec, hp_pin,
                                          AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN);
-               if (spec->headphone_mic_pin)
+               if (spec->headphone_mic_pin && spec->headphone_mic_pin != hp_pin)
                        snd_hda_set_pin_ctl_cache(codec, spec->headphone_mic_pin,
                                                  PIN_VREFHIZ);
        }
@@ -4215,6 +4220,23 @@ static void alc_fixup_dell_xps13(struct hda_codec *codec,
        }
 }
 
+static void alc_fixup_headset_mode_alc662(struct hda_codec *codec,
+                               const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
+               spec->gen.hp_mic = 1; /* Mic-in is same pin as headphone */
+
+               /* Disable boost for mic-in permanently. (This code is only called
+                  from quirks that guarantee that the headphone is at NID 0x1b.) */
+               snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000);
+               snd_hda_override_wcaps(codec, 0x1b, get_wcaps(codec, 0x1b) & ~AC_WCAP_IN_AMP);
+       } else
+               alc_fixup_headset_mode(codec, fix, action);
+}
+
 static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action)
 {
@@ -5119,6 +5141,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
        SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+       SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
        SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
@@ -5148,6 +5171,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -5345,6 +5369,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x17, 0x40000000},
                {0x1d, 0x40700001},
                {0x21, 0x02211050}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5548", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC255_STANDARD_PINS,
+               {0x12, 0x90a60180},
+               {0x14, 0x90170130},
+               {0x17, 0x40000000},
+               {0x1d, 0x40700001},
+               {0x21, 0x02211040}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS,
                {0x13, 0x40000000}),
@@ -5598,7 +5629,8 @@ static int patch_alc269(struct hda_codec *codec)
 
        spec = codec->spec;
        spec->gen.shared_mic_vref_pin = 0x18;
-       codec->power_save_node = 1;
+       if (codec->core.vendor_id != 0x10ec0292)
+               codec->power_save_node = 1;
 
        snd_hda_pick_fixup(codec, alc269_fixup_models,
                       alc269_fixup_tbl, alc269_fixups);
@@ -6079,7 +6111,9 @@ enum {
        ALC662_FIXUP_NO_JACK_DETECT,
        ALC662_FIXUP_ZOTAC_Z68,
        ALC662_FIXUP_INV_DMIC,
+       ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
        ALC668_FIXUP_DELL_MIC_NO_PRESENCE,
+       ALC662_FIXUP_HEADSET_MODE,
        ALC668_FIXUP_HEADSET_MODE,
        ALC662_FIXUP_BASS_MODE4_CHMAP,
        ALC662_FIXUP_BASS_16,
@@ -6272,6 +6306,20 @@ static const struct hda_fixup alc662_fixups[] = {
                .chained = true,
                .chain_id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE
        },
+       [ALC662_FIXUP_DELL_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x03a1113c }, /* use as headset mic, without its own jack detect */
+                       /* headphone mic by setting pin control of 0x1b (headphone out) to in + vref_50 */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC662_FIXUP_HEADSET_MODE
+       },
+       [ALC662_FIXUP_HEADSET_MODE] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_headset_mode_alc662,
+       },
        [ALC668_FIXUP_DELL_MIC_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -6423,6 +6471,18 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
 };
 
 static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
+       SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
+               {0x12, 0x4004c000},
+               {0x14, 0x01014010},
+               {0x15, 0x411111f0},
+               {0x16, 0x411111f0},
+               {0x18, 0x01a19020},
+               {0x19, 0x411111f0},
+               {0x1a, 0x0181302f},
+               {0x1b, 0x0221401f},
+               {0x1c, 0x411111f0},
+               {0x1d, 0x4054c601},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
                {0x12, 0x99a30130},
                {0x14, 0x90170110},
index 43c99ce..6833c74 100644 (file)
@@ -4403,7 +4403,6 @@ static const struct hda_codec_ops stac_patch_ops = {
 #ifdef CONFIG_PM
        .suspend = stac_suspend,
 #endif
-       .stream_pm = snd_hda_gen_stream_pm,
        .reboot_notify = stac_shutup,
 };
 
@@ -4697,7 +4696,8 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
                return err;
 
        spec = codec->spec;
-       codec->power_save_node = 1;
+       /* disabled power_save_node since it causes noises on a Dell machine */
+       /* codec->power_save_node = 1; */
        spec->linear_tone_beep = 0;
        spec->gen.own_eapd_ctl = 1;
        spec->gen.power_down_unused = 1;
index d51703e..0a4ad5f 100644 (file)
@@ -72,7 +72,6 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
                if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
                        old_vmaster_hook = spec->vmaster_mute.hook;
                        spec->vmaster_mute.hook = update_tpacpi_mute_led;
-                       spec->vmaster_mute_enum = 1;
                        removefunc = false;
                }
                if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
index 2ffb9a0..3d44fc5 100644 (file)
@@ -623,14 +623,14 @@ static int mc13783_probe(struct snd_soc_codec *codec)
                                AUDIO_SSI_SEL, 0);
        else
                mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_CODEC,
-                               0, AUDIO_SSI_SEL);
+                               AUDIO_SSI_SEL, AUDIO_SSI_SEL);
 
        if (priv->dac_ssi_port == MC13783_SSI1_PORT)
                mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
                                AUDIO_SSI_SEL, 0);
        else
                mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
-                               0, AUDIO_SSI_SEL);
+                               AUDIO_SSI_SEL, AUDIO_SSI_SEL);
 
        return 0;
 }
index dc7778b..c3c33bd 100644 (file)
@@ -437,7 +437,7 @@ static int uda1380_set_dai_fmt_both(struct snd_soc_dai *codec_dai,
        if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS)
                return -EINVAL;
 
-       uda1380_write(codec, UDA1380_IFACE, iface);
+       uda1380_write_reg_cache(codec, UDA1380_IFACE, iface);
 
        return 0;
 }
index 3035d98..e97a761 100644 (file)
@@ -395,7 +395,7 @@ static const struct snd_soc_dapm_route audio_paths[] = {
        { "Right Input Mixer", "Boost Switch", "Right Boost Mixer", },
        { "Right Input Mixer", NULL, "RINPUT1", },  /* Really Boost Switch */
        { "Right Input Mixer", NULL, "RINPUT2" },
-       { "Right Input Mixer", NULL, "LINPUT3" },
+       { "Right Input Mixer", NULL, "RINPUT3" },
 
        { "Left ADC", NULL, "Left Input Mixer" },
        { "Right ADC", NULL, "Right Input Mixer" },
index 4fbc768..a1c04da 100644 (file)
@@ -2754,7 +2754,7 @@ static struct {
 };
 
 static int fs_ratios[] = {
-       64, 128, 192, 256, 348, 512, 768, 1024, 1408, 1536
+       64, 128, 192, 256, 384, 512, 768, 1024, 1408, 1536
 };
 
 static int bclk_divs[] = {
index bb4b78e..23c91fa 100644 (file)
@@ -1247,7 +1247,7 @@ static int davinci_mcasp_suspend(struct snd_soc_dai *dai)
        u32 reg;
        int i;
 
-       context->pm_state = pm_runtime_enabled(mcasp->dev);
+       context->pm_state = pm_runtime_active(mcasp->dev);
        if (!context->pm_state)
                pm_runtime_get_sync(mcasp->dev);
 
index defe0f0..158204d 100644 (file)
@@ -3100,11 +3100,16 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
        }
 
        prefix = soc_dapm_prefix(dapm);
-       if (prefix)
+       if (prefix) {
                w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
-       else
+               if (widget->sname)
+                       w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
+                                            widget->sname);
+       } else {
                w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
-
+               if (widget->sname)
+                       w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
+       }
        if (w->name == NULL) {
                kfree(w);
                return NULL;
index 7c5a701..2917534 100644 (file)
@@ -1117,6 +1117,8 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        switch (chip->usb_id) {
        case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema  */
        case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
+       case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+       case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
                return true;
        }
index c5baf9c..618c2bc 100644 (file)
@@ -123,6 +123,8 @@ static int get_last_jit_image(char *haystack, size_t hlen,
        assert(ret == 0);
 
        ptr = haystack;
+       memset(pmatch, 0, sizeof(pmatch));
+
        while (1) {
                ret = regexec(&regex, ptr, 1, pmatch, 0);
                if (ret == 0) {
index bac98ca..323b65e 100644 (file)
@@ -52,6 +52,7 @@ unsigned int skip_c0;
 unsigned int skip_c1;
 unsigned int do_nhm_cstates;
 unsigned int do_snb_cstates;
+unsigned int do_knl_cstates;
 unsigned int do_pc2;
 unsigned int do_pc3;
 unsigned int do_pc6;
@@ -91,6 +92,7 @@ unsigned int do_gfx_perf_limit_reasons;
 unsigned int do_ring_perf_limit_reasons;
 unsigned int crystal_hz;
 unsigned long long tsc_hz;
+int base_cpu;
 
 #define RAPL_PKG               (1 << 0)
                                        /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -316,7 +318,7 @@ void print_header(void)
 
        if (do_nhm_cstates)
                outp += sprintf(outp, "  CPU%%c1");
-       if (do_nhm_cstates && !do_slm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
                outp += sprintf(outp, "  CPU%%c3");
        if (do_nhm_cstates)
                outp += sprintf(outp, "  CPU%%c6");
@@ -546,7 +548,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (do_nhm_cstates && !do_slm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
                outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc);
        if (do_nhm_cstates)
                outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc);
@@ -1018,14 +1020,17 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                return 0;
 
-       if (do_nhm_cstates && !do_slm_cstates) {
+       if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) {
                if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
                        return -6;
        }
 
-       if (do_nhm_cstates) {
+       if (do_nhm_cstates && !do_knl_cstates) {
                if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
                        return -7;
+       } else if (do_knl_cstates) {
+               if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
+                       return -7;
        }
 
        if (do_snb_cstates)
@@ -1150,7 +1155,7 @@ dump_nhm_platform_info(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(0, MSR_NHM_PLATFORM_INFO, &msr);
+       get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
 
        fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr);
 
@@ -1162,7 +1167,7 @@ dump_nhm_platform_info(void)
        fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n",
                ratio, bclk, ratio * bclk);
 
-       get_msr(0, MSR_IA32_POWER_CTL, &msr);
+       get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
        fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
                msr, msr & 0x2 ? "EN" : "DIS");
 
@@ -1175,7 +1180,7 @@ dump_hsw_turbo_ratio_limits(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(0, MSR_TURBO_RATIO_LIMIT2, &msr);
+       get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
 
        fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr);
 
@@ -1197,7 +1202,7 @@ dump_ivt_turbo_ratio_limits(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(0, MSR_TURBO_RATIO_LIMIT1, &msr);
+       get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
 
        fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr);
 
@@ -1249,7 +1254,7 @@ dump_nhm_turbo_ratio_limits(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(0, MSR_TURBO_RATIO_LIMIT, &msr);
+       get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
 
        fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
 
@@ -1296,11 +1301,72 @@ dump_nhm_turbo_ratio_limits(void)
 }
 
 static void
+dump_knl_turbo_ratio_limits(void)
+{
+       int cores;
+       unsigned int ratio;
+       unsigned long long msr;
+       int delta_cores;
+       int delta_ratio;
+       int i;
+
+       get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
+
+       fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n",
+       msr);
+
+       /**
+        * Turbo encoding in KNL is as follows:
+        * [7:0] -- Base value of number of active cores of bucket 1.
+        * [15:8] -- Base value of freq ratio of bucket 1.
+        * [20:16] -- +ve delta of number of active cores of bucket 2.
+        * i.e. active cores of bucket 2 =
+        * active cores of bucket 1 + delta
+        * [23:21] -- Negative delta of freq ratio of bucket 2.
+        * i.e. freq ratio of bucket 2 =
+        * freq ratio of bucket 1 - delta
+        * [28:24]-- +ve delta of number of active cores of bucket 3.
+        * [31:29]-- -ve delta of freq ratio of bucket 3.
+        * [36:32]-- +ve delta of number of active cores of bucket 4.
+        * [39:37]-- -ve delta of freq ratio of bucket 4.
+        * [44:40]-- +ve delta of number of active cores of bucket 5.
+        * [47:45]-- -ve delta of freq ratio of bucket 5.
+        * [52:48]-- +ve delta of number of active cores of bucket 6.
+        * [55:53]-- -ve delta of freq ratio of bucket 6.
+        * [60:56]-- +ve delta of number of active cores of bucket 7.
+        * [63:61]-- -ve delta of freq ratio of bucket 7.
+        */
+       cores = msr & 0xFF;
+       ratio = (msr >> 8) && 0xFF;
+       if (ratio > 0)
+               fprintf(stderr,
+                       "%d * %.0f = %.0f MHz max turbo %d active cores\n",
+                       ratio, bclk, ratio * bclk, cores);
+
+       for (i = 16; i < 64; i = i + 8) {
+               delta_cores = (msr >> i) & 0x1F;
+               delta_ratio = (msr >> (i + 5)) && 0x7;
+               if (!delta_cores || !delta_ratio)
+                       return;
+               cores = cores + delta_cores;
+               ratio = ratio - delta_ratio;
+
+               /** -ve ratios will make successive ratio calculations
+                * negative. Hence return instead of carrying on.
+                */
+               if (ratio > 0)
+                       fprintf(stderr,
+                               "%d * %.0f = %.0f MHz max turbo %d active cores\n",
+                               ratio, bclk, ratio * bclk, cores);
+       }
+}
+
+static void
 dump_nhm_cst_cfg(void)
 {
        unsigned long long msr;
 
-       get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
+       get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
 
 #define SNB_C1_AUTO_UNDEMOTE              (1UL << 27)
 #define SNB_C3_AUTO_UNDEMOTE              (1UL << 28)
@@ -1381,12 +1447,41 @@ int parse_int_file(const char *fmt, ...)
 }
 
 /*
- * cpu_is_first_sibling_in_core(cpu)
- * return 1 if given CPU is 1st HT sibling in the core
+ * get_cpu_position_in_core(cpu)
+ * return the position of the CPU among its HT siblings in the core
+ * return -1 if the sibling is not in list
  */
-int cpu_is_first_sibling_in_core(int cpu)
+int get_cpu_position_in_core(int cpu)
 {
-       return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
+       char path[64];
+       FILE *filep;
+       int this_cpu;
+       char character;
+       int i;
+
+       sprintf(path,
+               "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list",
+               cpu);
+       filep = fopen(path, "r");
+       if (filep == NULL) {
+               perror(path);
+               exit(1);
+       }
+
+       for (i = 0; i < topo.num_threads_per_core; i++) {
+               fscanf(filep, "%d", &this_cpu);
+               if (this_cpu == cpu) {
+                       fclose(filep);
+                       return i;
+               }
+
+               /* Account for no separator after last thread*/
+               if (i != (topo.num_threads_per_core - 1))
+                       fscanf(filep, "%c", &character);
+       }
+
+       fclose(filep);
+       return -1;
 }
 
 /*
@@ -1412,25 +1507,31 @@ int get_num_ht_siblings(int cpu)
 {
        char path[80];
        FILE *filep;
-       int sib1, sib2;
-       int matches;
+       int sib1;
+       int matches = 0;
        char character;
+       char str[100];
+       char *ch;
 
        sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
        filep = fopen_or_die(path, "r");
+
        /*
         * file format:
-        * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
-        * otherwinse 1 sibling (self).
+        * A ',' separated or '-' separated set of numbers
+        * (eg 1-2 or 1,3,4,5)
         */
-       matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
+       fscanf(filep, "%d%c\n", &sib1, &character);
+       fseek(filep, 0, SEEK_SET);
+       fgets(str, 100, filep);
+       ch = strchr(str, character);
+       while (ch != NULL) {
+               matches++;
+               ch = strchr(ch+1, character);
+       }
 
        fclose(filep);
-
-       if (matches == 3)
-               return 2;
-       else
-               return 1;
+       return matches+1;
 }
 
 /*
@@ -1594,8 +1695,10 @@ restart:
 void check_dev_msr()
 {
        struct stat sb;
+       char pathname[32];
 
-       if (stat("/dev/cpu/0/msr", &sb))
+       sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
+       if (stat(pathname, &sb))
                if (system("/sbin/modprobe msr > /dev/null 2>&1"))
                        err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
 }
@@ -1608,6 +1711,7 @@ void check_permissions()
        cap_user_data_t cap_data = &cap_data_data;
        extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
        int do_exit = 0;
+       char pathname[32];
 
        /* check for CAP_SYS_RAWIO */
        cap_header->pid = getpid();
@@ -1622,7 +1726,8 @@ void check_permissions()
        }
 
        /* test file permissions */
-       if (euidaccess("/dev/cpu/0/msr", R_OK)) {
+       sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
+       if (euidaccess(pathname, R_OK)) {
                do_exit++;
                warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
        }
@@ -1704,7 +1809,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
        default:
                return 0;
        }
-       get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
+       get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
 
        pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
 
@@ -1753,6 +1858,21 @@ int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
        }
 }
 
+int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+
+       if (family != 6)
+               return 0;
+
+       switch (model) {
+       case 0x57:      /* Knights Landing */
+               return 1;
+       default:
+               return 0;
+       }
+}
 static void
 dump_cstate_pstate_config_info(family, model)
 {
@@ -1770,6 +1890,9 @@ dump_cstate_pstate_config_info(family, model)
        if (has_nhm_turbo_ratio_limit(family, model))
                dump_nhm_turbo_ratio_limits();
 
+       if (has_knl_turbo_ratio_limit(family, model))
+               dump_knl_turbo_ratio_limits();
+
        dump_nhm_cst_cfg();
 }
 
@@ -1801,7 +1924,7 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
                return 0;
 
-       switch (msr & 0x7) {
+       switch (msr & 0xF) {
        case ENERGY_PERF_BIAS_PERFORMANCE:
                epb_string = "performance";
                break;
@@ -1925,7 +2048,7 @@ double get_tdp(model)
        unsigned long long msr;
 
        if (do_rapl & RAPL_PKG_POWER_INFO)
-               if (!get_msr(0, MSR_PKG_POWER_INFO, &msr))
+               if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr))
                        return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
 
        switch (model) {
@@ -1950,6 +2073,7 @@ rapl_dram_energy_units_probe(int  model, double rapl_energy_units)
        case 0x3F:      /* HSX */
        case 0x4F:      /* BDX */
        case 0x56:      /* BDX-DE */
+       case 0x57:      /* KNL */
                return (rapl_dram_energy_units = 15.3 / 1000000);
        default:
                return (rapl_energy_units);
@@ -1991,6 +2115,7 @@ void rapl_probe(unsigned int family, unsigned int model)
        case 0x3F:      /* HSX */
        case 0x4F:      /* BDX */
        case 0x56:      /* BDX-DE */
+       case 0x57:      /* KNL */
                do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
                break;
        case 0x2D:
@@ -2006,7 +2131,7 @@ void rapl_probe(unsigned int family, unsigned int model)
        }
 
        /* units on package 0, verify later other packages match */
-       if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
+       if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
                return;
 
        rapl_power_units = 1.0 / (1 << (msr & 0xF));
@@ -2331,6 +2456,17 @@ int is_slm(unsigned int family, unsigned int model)
        return 0;
 }
 
+int is_knl(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+       switch (model) {
+       case 0x57:      /* KNL */
+               return 1;
+       }
+       return 0;
+}
+
 #define SLM_BCLK_FREQS 5
 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
 
@@ -2340,7 +2476,7 @@ double slm_bclk(void)
        unsigned int i;
        double freq;
 
-       if (get_msr(0, MSR_FSB_FREQ, &msr))
+       if (get_msr(base_cpu, MSR_FSB_FREQ, &msr))
                fprintf(stderr, "SLM BCLK: unknown\n");
 
        i = msr & 0xf;
@@ -2408,7 +2544,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
        if (!do_nhm_platform_info)
                goto guess;
 
-       if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr))
+       if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
                goto guess;
 
        target_c_local = (msr >> 16) & 0xFF;
@@ -2541,6 +2677,7 @@ void process_cpuid()
        do_c8_c9_c10 = has_hsw_msrs(family, model);
        do_skl_residency = has_skl_msrs(family, model);
        do_slm_cstates = is_slm(family, model);
+       do_knl_cstates  = is_knl(family, model);
        bclk = discover_bclk(family, model);
 
        rapl_probe(family, model);
@@ -2755,13 +2892,9 @@ int initialize_counters(int cpu_id)
 
        my_package_id = get_physical_package_id(cpu_id);
        my_core_id = get_core_id(cpu_id);
-
-       if (cpu_is_first_sibling_in_core(cpu_id)) {
-               my_thread_id = 0;
+       my_thread_id = get_cpu_position_in_core(cpu_id);
+       if (!my_thread_id)
                topo.num_cores++;
-       } else {
-               my_thread_id = 1;
-       }
 
        init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
        init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
@@ -2785,13 +2918,24 @@ void setup_all_buffers(void)
        for_all_proc_cpus(initialize_counters);
 }
 
+void set_base_cpu(void)
+{
+       base_cpu = sched_getcpu();
+       if (base_cpu < 0)
+               err(-ENODEV, "No valid cpus found");
+
+       if (debug > 1)
+               fprintf(stderr, "base_cpu = %d\n", base_cpu);
+}
+
 void turbostat_init()
 {
+       setup_all_buffers();
+       set_base_cpu();
        check_dev_msr();
        check_permissions();
        process_cpuid();
 
-       setup_all_buffers();
 
        if (debug)
                for_all_cpus(print_epb, ODD_COUNTERS);
@@ -2870,7 +3014,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(stderr, "turbostat version 4.5 2 Apr, 2015"
+       fprintf(stderr, "turbostat version 4.7 27-May, 2015"
                " - Len Brown <lenb@kernel.org>\n");
 }