Merge branch 'pm-cpufreq'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 30 Mar 2020 12:46:27 +0000 (14:46 +0200)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Mon, 30 Mar 2020 12:46:27 +0000 (14:46 +0200)
* pm-cpufreq:
  cpufreq: intel_pstate: Simplify intel_pstate_cpu_init()
  cpufreq: qcom: Add support for krait based socs
  cpufreq: imx6q-cpufreq: Improve the logic of -EPROBE_DEFER handling
  cpufreq: Use scnprintf() for avoiding potential buffer overflow
  Documentation: intel_pstate: update links for references
  cpufreq: intel_pstate: Consolidate policy verification
  cpufreq: dt: Allow platform specific intermediate callbacks
  cpufreq: imx-cpufreq-dt: Correct i.MX8MP's market segment fuse location
  cpufreq: imx6q: read OCOTP through nvmem for imx6q
  cpufreq: imx6q: fix error handling
  cpufreq: imx-cpufreq-dt: Add "cpu-supply" property check
  cpufreq: ti-cpufreq: Add support for OPP_PLUS
  cpufreq: imx6q: Fixes unwanted cpu overclocking on i.MX6ULL

446 files changed:
.clang-format
Documentation/admin-guide/pm/cpuidle.rst
Documentation/arm64/silicon-errata.rst
Documentation/devicetree/bindings/net/fsl-fman.txt
Documentation/filesystems/porting.rst
Documentation/kbuild/kbuild.rst
Documentation/kbuild/kconfig-macro-language.rst
Documentation/kbuild/makefiles.rst
Documentation/kbuild/modules.rst
Documentation/networking/devlink/devlink-region.rst
Documentation/networking/net_failover.rst
Documentation/networking/rds.txt
Documentation/power/pm_qos_interface.rst
Documentation/trace/events-power.rst
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/configs/nps_defconfig
arch/arc/configs/nsimosci_defconfig
arch/arc/configs/nsimosci_hs_defconfig
arch/arc/configs/nsimosci_hs_smp_defconfig
arch/arc/include/asm/fpu.h
arch/arc/include/asm/linkage.h
arch/arc/kernel/setup.c
arch/arc/kernel/troubleshoot.c
arch/arm/Makefile
arch/arm/boot/compressed/Makefile
arch/arm/kernel/vdso.c
arch/arm/lib/copy_from_user.S
arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
arch/arm64/include/asm/mmu.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/unistd.h
arch/arm64/kernel/smp.c
arch/mips/boot/dts/ingenic/ci20.dts
arch/mips/kernel/setup.c
arch/powerpc/kvm/book3s_pr.c
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/kasan/kasan_init_32.c
arch/s390/kvm/kvm-s390.c
arch/x86/Makefile
arch/x86/crypto/Makefile
arch/x86/events/amd/uncore.c
arch/x86/include/asm/kvm_emulate.h
arch/x86/kernel/apic/vector.c
arch/x86/kernel/cpu/mce/intel.c
arch/x86/kernel/cpu/mce/therm_throt.c
arch/x86/kvm/Kconfig
arch/x86/kvm/emulate.c
arch/x86/kvm/ioapic.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/mm/fault.c
arch/x86/mm/ioremap.c
arch/x86/platform/intel/iosf_mbi.c
block/blk-iocost.c
block/blk-mq-sched.c
block/genhd.c
drivers/acpi/apei/ghes.c
drivers/android/binderfs.c
drivers/atm/nicstar.c
drivers/auxdisplay/Kconfig
drivers/auxdisplay/charlcd.c
drivers/auxdisplay/img-ascii-lcd.c
drivers/base/platform.c
drivers/block/virtio_blk.c
drivers/char/ipmi/ipmi_si_platform.c
drivers/clk/clk.c
drivers/clk/qcom/dispcc-sc7180.c
drivers/clk/qcom/videocc-sc7180.c
drivers/cpuidle/cpuidle-haltpoll.c
drivers/cpuidle/cpuidle.c
drivers/cpuidle/governor.c
drivers/firmware/efi/efivars.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
drivers/gpu/drm/arm/display/komeda/komeda_drv.c
drivers/gpu/drm/bochs/bochs_hw.c
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_lease.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_dma.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fimc.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/exynos/exynos_drm_rotator.c
drivers/gpu/drm/exynos/exynos_drm_scaler.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gvt/display.c
drivers/gpu/drm/i915/gvt/opregion.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/i915_utils.h
drivers/gpu/drm/i915/intel_sideband.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-ids.h
drivers/hid/hid-picolcd_fb.c
drivers/hid/hid-quirks.c
drivers/hid/hid-sensor-custom.c
drivers/hsi/clients/cmt_speech.c
drivers/hwtracing/intel_th/msu.c
drivers/hwtracing/intel_th/pci.c
drivers/hwtracing/stm/p_sys-t.c
drivers/i2c/busses/i2c-designware-pcidrv.c
drivers/i2c/busses/i2c-gpio.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/i2c-core-acpi.c
drivers/idle/intel_idle.c
drivers/iio/accel/adxl372.c
drivers/iio/accel/st_accel_i2c.c
drivers/iio/adc/at91-sama5d2_adc.c
drivers/iio/adc/stm32-dfsdm-adc.c
drivers/iio/chemical/Kconfig
drivers/iio/light/vcnl4000.c
drivers/iio/magnetometer/ak8974.c
drivers/iio/proximity/ping.c
drivers/iio/trigger/stm32-timer-trigger.c
drivers/iommu/amd_iommu.c
drivers/iommu/dma-iommu.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu-debugfs.c
drivers/iommu/intel-iommu.c
drivers/iommu/io-pgtable-arm.c
drivers/irqchip/irq-gic-v3.c
drivers/macintosh/windfarm_ad7417_sensor.c
drivers/macintosh/windfarm_fcu_controls.c
drivers/macintosh/windfarm_lm75_sensor.c
drivers/macintosh/windfarm_lm87_sensor.c
drivers/macintosh/windfarm_max6690_sensor.c
drivers/macintosh/windfarm_smu_sat.c
drivers/media/pci/saa7134/saa7134-video.c
drivers/media/platform/via-camera.c
drivers/misc/cardreader/rts5227.c
drivers/misc/cardreader/rts5249.c
drivers/misc/cardreader/rts5260.c
drivers/misc/cardreader/rts5261.c
drivers/misc/eeprom/at24.c
drivers/mmc/core/core.c
drivers/mmc/core/mmc.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/host/rtsx_pci_sdmmc.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-cadence.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sdhci-msm.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mmc/host/sdhci-omap.c
drivers/mmc/host/sdhci-pci-gli.c
drivers/mmc/host/sdhci-tegra.c
drivers/net/bonding/bond_alb.c
drivers/net/can/dev.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/global2.c
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fman/Kconfig
drivers/net/ethernet/freescale/fman/fman.c
drivers/net/ethernet/freescale/fman/fman.h
drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/marvell/mvmdio.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/efx_channels.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sfc/tx_common.c
drivers/net/ethernet/sfc/tx_common.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ipvlan/ipvlan_core.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/phy/bcm63xx.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/slip/slhc.c
drivers/net/team/team.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/intel/ipw2x00/ipw2100.c
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/mediatek/mt76/dma.c
drivers/nvme/host/rdma.c
drivers/nvme/target/tcp.c
drivers/of/of_mdio.c
drivers/pinctrl/cirrus/pinctrl-madera-core.c
drivers/pinctrl/core.c
drivers/pinctrl/freescale/pinctrl-scu.c
drivers/pinctrl/meson/pinctrl-meson-gxl.c
drivers/pinctrl/pinctrl-falcon.c
drivers/pinctrl/qcom/pinctrl-msm.c
drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
drivers/rtc/Kconfig
drivers/s390/block/dasd.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_int.h
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/net/qeth_l3_sys.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/ufs/ufshcd.c
drivers/slimbus/qcom-ngd-ctrl.c
drivers/spi/spi-fsl-qspi.c
drivers/staging/greybus/tools/loopback_test.c
drivers/staging/rtl8188eu/os_dep/usb_intf.c
drivers/staging/speakup/main.c
drivers/staging/wfx/hif_tx.c
drivers/staging/wfx/hif_tx.h
drivers/staging/wfx/hif_tx_mib.h
drivers/staging/wfx/sta.c
drivers/thunderbolt/switch.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/omap-serial.c
drivers/tty/tty_io.c
drivers/usb/chipidea/ci_hdrc_imx.c
drivers/usb/chipidea/udc.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/quirks.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-trace.h
drivers/usb/serial/option.c
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/typec/ucsi/displayport.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_ring.c
drivers/watchdog/iTCO_vendor.h
drivers/watchdog/iTCO_vendor_support.c
drivers/watchdog/iTCO_wdt.c
fs/afs/addr_list.c
fs/afs/internal.h
fs/btrfs/block-group.c
fs/btrfs/inode.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/smb2ops.c
fs/crypto/keysetup.c
fs/eventpoll.c
fs/file.c
fs/fuse/dev.c
fs/fuse/fuse_i.h
fs/gfs2/inode.c
fs/inode.c
fs/io_uring.c
fs/locks.c
fs/nfs/client.c
fs/nfs/fs_context.c
fs/nfs/fscache.c
fs/nfs/namespace.c
fs/nfs/nfs4client.c
fs/open.c
fs/overlayfs/Kconfig
fs/overlayfs/file.c
fs/overlayfs/overlayfs.h
fs/overlayfs/super.c
fs/overlayfs/util.c
include/crypto/curve25519.h
include/drm/drm_dp_mst_helper.h
include/dt-bindings/clock/imx8mn-clock.h
include/linux/cgroup.h
include/linux/dmar.h
include/linux/file.h
include/linux/fs.h
include/linux/futex.h
include/linux/genhd.h
include/linux/inet_diag.h
include/linux/intel-iommu.h
include/linux/mmc/host.h
include/linux/of_clk.h
include/linux/page-flags.h
include/linux/phy.h
include/linux/platform_device.h
include/linux/pm_qos.h
include/linux/rhashtable.h
include/linux/socket.h
include/linux/vmalloc.h
include/linux/workqueue.h
include/net/fib_rules.h
include/soc/mscc/ocelot_dev.h
include/trace/events/power.h
include/uapi/linux/in.h
init/Kconfig
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
kernel/futex.c
kernel/notifier.c
kernel/pid.c
kernel/power/qos.c
kernel/sys.c
kernel/trace/ftrace.c
kernel/workqueue.c
mm/madvise.c
mm/memcontrol.c
mm/mmu_notifier.c
mm/nommu.c
mm/slub.c
mm/sparse.c
mm/vmalloc.c
net/batman-adv/bat_iv_ogm.c
net/caif/caif_dev.c
net/core/devlink.c
net/core/netclassid_cgroup.c
net/core/sock.c
net/dsa/dsa_priv.h
net/dsa/port.c
net/dsa/slave.c
net/ieee802154/nl_policy.c
net/ipv4/gre_demux.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/raw_diag.c
net/ipv4/udp_diag.c
net/ipv6/addrconf.c
net/ipv6/seg6_iptunnel.c
net/ipv6/seg6_local.c
net/mac80211/mesh_hwmp.c
net/mptcp/options.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_cthelper.c
net/netfilter/nft_chain_nat.c
net/netfilter/nft_payload.c
net/netfilter/nft_tunnel.c
net/netfilter/x_tables.c
net/netfilter/xt_recent.c
net/netlink/af_netlink.c
net/nfc/hci/core.c
net/nfc/netlink.c
net/openvswitch/datapath.c
net/packet/af_packet.c
net/sched/sch_fq.c
net/sched/sch_taprio.c
net/sctp/diag.c
net/smc/smc_ib.c
net/socket.c
net/tipc/netlink.c
net/wireless/nl80211.c
scripts/Kconfig.include
scripts/Makefile.extrawarn
scripts/export_report.pl
scripts/kallsyms.c
scripts/mod/modpost.c
sound/core/oss/pcm_plugin.c
sound/core/pcm_native.c
sound/core/seq/oss/seq_oss_midi.c
sound/core/seq/seq_virmidi.c
sound/pci/hda/patch_realtek.c
sound/soc/intel/atom/sst/sst.c
sound/soc/intel/atom/sst/sst_loader.c
sound/soc/ti/omap-dmic.c
sound/soc/ti/omap-mcbsp.c
sound/soc/ti/omap-mcpdm.c
sound/usb/line6/driver.c
sound/usb/line6/midibuf.c
tools/include/uapi/asm/errno.h
tools/perf/arch/arm64/util/arm-spe.c
tools/perf/arch/arm64/util/perf_regs.c
tools/perf/arch/powerpc/util/perf_regs.c
tools/perf/arch/x86/util/auxtrace.c
tools/perf/arch/x86/util/event.c
tools/perf/arch/x86/util/header.c
tools/perf/arch/x86/util/intel-bts.c
tools/perf/arch/x86/util/intel-pt.c
tools/perf/arch/x86/util/machine.c
tools/perf/arch/x86/util/perf_regs.c
tools/perf/arch/x86/util/pmu.c
tools/perf/bench/bench.h
tools/perf/bench/epoll-ctl.c
tools/perf/bench/epoll-wait.c
tools/perf/bench/futex-hash.c
tools/perf/bench/futex-lock-pi.c
tools/perf/bench/futex-requeue.c
tools/perf/bench/futex-wake-parallel.c
tools/perf/bench/futex-wake.c
tools/perf/builtin-diff.c
tools/perf/builtin-top.c
tools/perf/pmu-events/jevents.c
tools/perf/tests/bp_account.c
tools/perf/util/block-info.c
tools/perf/util/env.c
tools/perf/util/map.c
tools/perf/util/parse-events.c
tools/perf/util/symbol.c
tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
tools/power/x86/turbostat/Makefile
tools/power/x86/turbostat/turbostat.c
tools/testing/ktest/ktest.pl
tools/testing/ktest/sample.conf
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/tc-testing/config
usr/Kconfig

index 196ca31..6ec5558 100644 (file)
@@ -86,6 +86,8 @@ ForEachMacros:
   - 'bio_for_each_segment_all'
   - 'bio_list_for_each'
   - 'bip_for_each_vec'
+  - 'bitmap_for_each_clear_region'
+  - 'bitmap_for_each_set_region'
   - 'blkg_for_each_descendant_post'
   - 'blkg_for_each_descendant_pre'
   - 'blk_queue_for_each_rl'
@@ -115,6 +117,7 @@ ForEachMacros:
   - 'drm_client_for_each_connector_iter'
   - 'drm_client_for_each_modeset'
   - 'drm_connector_for_each_possible_encoder'
+  - 'drm_for_each_bridge_in_chain'
   - 'drm_for_each_connector_iter'
   - 'drm_for_each_crtc'
   - 'drm_for_each_encoder'
@@ -136,9 +139,10 @@ ForEachMacros:
   - 'for_each_bio'
   - 'for_each_board_func_rsrc'
   - 'for_each_bvec'
+  - 'for_each_card_auxs'
+  - 'for_each_card_auxs_safe'
   - 'for_each_card_components'
-  - 'for_each_card_links'
-  - 'for_each_card_links_safe'
+  - 'for_each_card_pre_auxs'
   - 'for_each_card_prelinks'
   - 'for_each_card_rtds'
   - 'for_each_card_rtds_safe'
@@ -166,6 +170,7 @@ ForEachMacros:
   - 'for_each_dpcm_fe'
   - 'for_each_drhd_unit'
   - 'for_each_dss_dev'
+  - 'for_each_efi_handle'
   - 'for_each_efi_memory_desc'
   - 'for_each_efi_memory_desc_in_map'
   - 'for_each_element'
@@ -190,6 +195,7 @@ ForEachMacros:
   - 'for_each_lru'
   - 'for_each_matching_node'
   - 'for_each_matching_node_and_match'
+  - 'for_each_member'
   - 'for_each_memblock'
   - 'for_each_memblock_type'
   - 'for_each_memcg_cache_index'
@@ -200,9 +206,11 @@ ForEachMacros:
   - 'for_each_msi_entry'
   - 'for_each_msi_entry_safe'
   - 'for_each_net'
+  - 'for_each_net_continue_reverse'
   - 'for_each_netdev'
   - 'for_each_netdev_continue'
   - 'for_each_netdev_continue_rcu'
+  - 'for_each_netdev_continue_reverse'
   - 'for_each_netdev_feature'
   - 'for_each_netdev_in_bond_rcu'
   - 'for_each_netdev_rcu'
@@ -254,10 +262,10 @@ ForEachMacros:
   - 'for_each_reserved_mem_region'
   - 'for_each_rtd_codec_dai'
   - 'for_each_rtd_codec_dai_rollback'
-  - 'for_each_rtdcom'
-  - 'for_each_rtdcom_safe'
+  - 'for_each_rtd_components'
   - 'for_each_set_bit'
   - 'for_each_set_bit_from'
+  - 'for_each_set_clump8'
   - 'for_each_sg'
   - 'for_each_sg_dma_page'
   - 'for_each_sg_page'
@@ -267,6 +275,7 @@ ForEachMacros:
   - 'for_each_subelement_id'
   - '__for_each_thread'
   - 'for_each_thread'
+  - 'for_each_wakeup_source'
   - 'for_each_zone'
   - 'for_each_zone_zonelist'
   - 'for_each_zone_zonelist_nodemask'
@@ -330,6 +339,7 @@ ForEachMacros:
   - 'list_for_each'
   - 'list_for_each_codec'
   - 'list_for_each_codec_safe'
+  - 'list_for_each_continue'
   - 'list_for_each_entry'
   - 'list_for_each_entry_continue'
   - 'list_for_each_entry_continue_rcu'
@@ -351,6 +361,7 @@ ForEachMacros:
   - 'llist_for_each_entry'
   - 'llist_for_each_entry_safe'
   - 'llist_for_each_safe'
+  - 'mci_for_each_dimm'
   - 'media_device_for_each_entity'
   - 'media_device_for_each_intf'
   - 'media_device_for_each_link'
@@ -444,10 +455,16 @@ ForEachMacros:
   - 'virtio_device_for_each_vq'
   - 'xa_for_each'
   - 'xa_for_each_marked'
+  - 'xa_for_each_range'
   - 'xa_for_each_start'
   - 'xas_for_each'
   - 'xas_for_each_conflict'
   - 'xas_for_each_marked'
+  - 'xbc_array_for_each_value'
+  - 'xbc_for_each_key_value'
+  - 'xbc_node_for_each_array_value'
+  - 'xbc_node_for_each_child'
+  - 'xbc_node_for_each_key_value'
   - 'zorro_for_each_dev'
 
 #IncludeBlocks: Preserve # Unknown to clang-format-5.0
index 6a06dc4..5605cc6 100644 (file)
@@ -583,20 +583,17 @@ Power Management Quality of Service for CPUs
 The power management quality of service (PM QoS) framework in the Linux kernel
 allows kernel code and user space processes to set constraints on various
 energy-efficiency features of the kernel to prevent performance from dropping
-below a required level.  The PM QoS constraints can be set globally, in
-predefined categories referred to as PM QoS classes, or against individual
-devices.
+below a required level.
 
 CPU idle time management can be affected by PM QoS in two ways, through the
-global constraint in the ``PM_QOS_CPU_DMA_LATENCY`` class and through the
-resume latency constraints for individual CPUs.  Kernel code (e.g. device
-drivers) can set both of them with the help of special internal interfaces
-provided by the PM QoS framework.  User space can modify the former by opening
-the :file:`cpu_dma_latency` special device file under :file:`/dev/` and writing
-a binary value (interpreted as a signed 32-bit integer) to it.  In turn, the
-resume latency constraint for a CPU can be modified by user space by writing a
-string (representing a signed 32-bit integer) to the
-:file:`power/pm_qos_resume_latency_us` file under
+global CPU latency limit and through the resume latency constraints for
+individual CPUs.  Kernel code (e.g. device drivers) can set both of them with
+the help of special internal interfaces provided by the PM QoS framework.  User
+space can modify the former by opening the :file:`cpu_dma_latency` special
+device file under :file:`/dev/` and writing a binary value (interpreted as a
+signed 32-bit integer) to it.  In turn, the resume latency constraint for a CPU
+can be modified from user space by writing a string (representing a signed
+32-bit integer) to the :file:`power/pm_qos_resume_latency_us` file under
 :file:`/sys/devices/system/cpu/cpu<N>/` in ``sysfs``, where the CPU number
 ``<N>`` is allocated at the system initialization time.  Negative values
 will be rejected in both cases and, also in both cases, the written integer
@@ -605,32 +602,34 @@ number will be interpreted as a requested PM QoS constraint in microseconds.
 The requested value is not automatically applied as a new constraint, however,
 as it may be less restrictive (greater in this particular case) than another
 constraint previously requested by someone else.  For this reason, the PM QoS
-framework maintains a list of requests that have been made so far in each
-global class and for each device, aggregates them and applies the effective
-(minimum in this particular case) value as the new constraint.
+framework maintains a list of requests that have been made so far for the
+global CPU latency limit and for each individual CPU, aggregates them and
+applies the effective (minimum in this particular case) value as the new
+constraint.
 
 In fact, opening the :file:`cpu_dma_latency` special device file causes a new
-PM QoS request to be created and added to the priority list of requests in the
-``PM_QOS_CPU_DMA_LATENCY`` class and the file descriptor coming from the
-"open" operation represents that request.  If that file descriptor is then
-used for writing, the number written to it will be associated with the PM QoS
-request represented by it as a new requested constraint value.  Next, the
-priority list mechanism will be used to determine the new effective value of
-the entire list of requests and that effective value will be set as a new
-constraint.  Thus setting a new requested constraint value will only change the
-real constraint if the effective "list" value is affected by it.  In particular,
-for the ``PM_QOS_CPU_DMA_LATENCY`` class it only affects the real constraint if
-it is the minimum of the requested constraints in the list.  The process holding
-a file descriptor obtained by opening the :file:`cpu_dma_latency` special device
-file controls the PM QoS request associated with that file descriptor, but it
-controls this particular PM QoS request only.
+PM QoS request to be created and added to a global priority list of CPU latency
+limit requests and the file descriptor coming from the "open" operation
+represents that request.  If that file descriptor is then used for writing, the
+number written to it will be associated with the PM QoS request represented by
+it as a new requested limit value.  Next, the priority list mechanism will be
+used to determine the new effective value of the entire list of requests and
+that effective value will be set as a new CPU latency limit.  Thus requesting a
+new limit value will only change the real limit if the effective "list" value is
+affected by it, which is the case if it is the minimum of the requested values
+in the list.
+
+The process holding a file descriptor obtained by opening the
+:file:`cpu_dma_latency` special device file controls the PM QoS request
+associated with that file descriptor, but it controls this particular PM QoS
+request only.
 
 Closing the :file:`cpu_dma_latency` special device file or, more precisely, the
 file descriptor obtained while opening it, causes the PM QoS request associated
-with that file descriptor to be removed from the ``PM_QOS_CPU_DMA_LATENCY``
-class priority list and destroyed.  If that happens, the priority list mechanism
-will be used, again, to determine the new effective value for the whole list
-and that value will become the new real constraint.
+with that file descriptor to be removed from the global priority list of CPU
+latency limit requests and destroyed.  If that happens, the priority list
+mechanism will be used again, to determine the new effective value for the whole
+list and that value will become the new limit.
 
 In turn, for each CPU there is one resume latency PM QoS request associated with
 the :file:`power/pm_qos_resume_latency_us` file under
@@ -647,10 +646,10 @@ CPU in question every time the list of requests is updated this way or another
 (there may be other requests coming from kernel code in that list).
 
 CPU idle time governors are expected to regard the minimum of the global
-effective ``PM_QOS_CPU_DMA_LATENCY`` class constraint and the effective
-resume latency constraint for the given CPU as the upper limit for the exit
-latency of the idle states they can select for that CPU.  They should never
-select any idle states with exit latency beyond that limit.
+(effective) CPU latency limit and the effective resume latency constraint for
+the given CPU as the upper limit for the exit latency of the idle states that
+they are allowed to select for that CPU.  They should never select any idle
+states with exit latency beyond that limit.
 
 
 Idle States Control Via Kernel Command Line
index 9120e59..2c08c62 100644 (file)
@@ -110,6 +110,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX GICv3  | #23154          | CAVIUM_ERRATUM_23154        |
 +----------------+-----------------+-----------------+-----------------------------+
+| Cavium         | ThunderX GICv3  | #38539          | N/A                         |
++----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX Core   | #27456          | CAVIUM_ERRATUM_27456        |
 +----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX Core   | #30115          | CAVIUM_ERRATUM_30115        |
index 250f8d8..c00fb0d 100644 (file)
@@ -110,6 +110,13 @@ PROPERTIES
                Usage: required
                Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt
 
+- fsl,erratum-a050385
+               Usage: optional
+               Value type: boolean
+               Definition: A boolean property. Indicates the presence of the
+               erratum A050385 which indicates that DMA transactions that are
+               split can result in a FMan lock.
+
 =============================================================================
 FMan MURAM Node
 
index f185060..26c0939 100644 (file)
@@ -850,3 +850,11 @@ business doing so.
 d_alloc_pseudo() is internal-only; uses outside of alloc_file_pseudo() are
 very suspect (and won't work in modules).  Such uses are very likely to
 be misspelled d_alloc_anon().
+
+---
+
+**mandatory**
+
+[should've been added in 2016] stale comment in finish_open() nonwithstanding,
+failure exits in ->atomic_open() instances should *NOT* fput() the file,
+no matter what.  Everything is handled by the caller.
index f1e5dce..510f38d 100644 (file)
@@ -237,7 +237,7 @@ This is solely useful to speed up test compiles.
 KBUILD_EXTRA_SYMBOLS
 --------------------
 For modules that use symbols from other modules.
-See more details in modules.txt.
+See more details in modules.rst.
 
 ALLSOURCE_ARCHS
 ---------------
index 35b3263..8b413ef 100644 (file)
@@ -44,7 +44,7 @@ intermediate::
             def_bool y
 
 Then, Kconfig moves onto the evaluation stage to resolve inter-symbol
-dependency as explained in kconfig-language.txt.
+dependency as explained in kconfig-language.rst.
 
 
 Variables
index 6bc126a..04d5c01 100644 (file)
@@ -924,7 +924,7 @@ When kbuild executes, the following steps are followed (roughly):
        $(KBUILD_AFLAGS_MODULE) is used to add arch-specific options that
        are used for assembler.
 
-       From commandline AFLAGS_MODULE shall be used (see kbuild.txt).
+       From commandline AFLAGS_MODULE shall be used (see kbuild.rst).
 
     KBUILD_CFLAGS_KERNEL
        $(CC) options specific for built-in
@@ -937,7 +937,7 @@ When kbuild executes, the following steps are followed (roughly):
 
        $(KBUILD_CFLAGS_MODULE) is used to add arch-specific options that
        are used for $(CC).
-       From commandline CFLAGS_MODULE shall be used (see kbuild.txt).
+       From commandline CFLAGS_MODULE shall be used (see kbuild.rst).
 
     KBUILD_LDFLAGS_MODULE
        Options for $(LD) when linking modules
@@ -945,7 +945,7 @@ When kbuild executes, the following steps are followed (roughly):
        $(KBUILD_LDFLAGS_MODULE) is used to add arch-specific options
        used when linking modules. This is often a linker script.
 
-       From commandline LDFLAGS_MODULE shall be used (see kbuild.txt).
+       From commandline LDFLAGS_MODULE shall be used (see kbuild.rst).
 
     KBUILD_LDS
 
index 69fa48e..e0b45a2 100644 (file)
@@ -470,9 +470,9 @@ build.
 
        The syntax of the Module.symvers file is::
 
-       <CRC>       <Symbol>          <Namespace>  <Module>                         <Export Type>
+       <CRC>       <Symbol>         <Module>                         <Export Type>     <Namespace>
 
-       0xe1cc2a05  usb_stor_suspend  USB_STORAGE  drivers/usb/storage/usb-storage  EXPORT_SYMBOL_GPL
+       0xe1cc2a05  usb_stor_suspend drivers/usb/storage/usb-storage  EXPORT_SYMBOL_GPL USB_STORAGE
 
        The fields are separated by tabs and values may be empty (e.g.
        if no namespace is defined for an exported symbol).
index 1a7683e..8b46e85 100644 (file)
@@ -40,9 +40,6 @@ example usage
     # Delete a snapshot using:
     $ devlink region del pci/0000:00:05.0/cr-space snapshot 1
 
-    # Trigger (request) a snapshot be taken:
-    $ devlink region trigger pci/0000:00:05.0/cr-space
-
     # Dump a snapshot:
     $ devlink region dump pci/0000:00:05.0/fw-health snapshot 1
     0000000000000000 0014 95dc 0014 9514 0035 1670 0034 db30
index 06c97dc..e143ab7 100644 (file)
@@ -8,9 +8,9 @@ Overview
 ========
 
 The net_failover driver provides an automated failover mechanism via APIs
-to create and destroy a failover master netdev and mananges a primary and
+to create and destroy a failover master netdev and manages a primary and
 standby slave netdevs that get registered via the generic failover
-infrastructrure.
+infrastructure.
 
 The failover netdev acts a master device and controls 2 slave devices. The
 original paravirtual interface is registered as 'standby' slave netdev and
@@ -29,7 +29,7 @@ virtio-net accelerated datapath: STANDBY mode
 =============================================
 
 net_failover enables hypervisor controlled accelerated datapath to virtio-net
-enabled VMs in a transparent manner with no/minimal guest userspace chanages.
+enabled VMs in a transparent manner with no/minimal guest userspace changes.
 
 To support this, the hypervisor needs to enable VIRTIO_NET_F_STANDBY
 feature on the virtio-net interface and assign the same MAC address to both
index f2a0147..eec6169 100644 (file)
@@ -159,7 +159,7 @@ Socket Interface
        set SO_RDS_TRANSPORT on a socket for which the transport has
        been previously attached explicitly (by SO_RDS_TRANSPORT) or
        implicitly (via bind(2)) will return an error of EOPNOTSUPP.
-       An attempt to set SO_RDS_TRANSPPORT to RDS_TRANS_NONE will
+       An attempt to set SO_RDS_TRANSPORT to RDS_TRANS_NONE will
        always return EINVAL.
 
 RDMA for RDS
index 0d62d50..69b0fe3 100644 (file)
@@ -7,86 +7,78 @@ performance expectations by drivers, subsystems and user space applications on
 one of the parameters.
 
 Two different PM QoS frameworks are available:
-1. PM QoS classes for cpu_dma_latency
-2. The per-device PM QoS framework provides the API to manage the
+ * CPU latency QoS.
+ * The per-device PM QoS framework provides the API to manage the
    per-device latency constraints and PM QoS flags.
 
-Each parameters have defined units:
-
- * latency: usec
- * timeout: usec
- * throughput: kbs (kilo bit / sec)
- * memory bandwidth: mbs (mega bit / sec)
+The latency unit used in the PM QoS framework is the microsecond (usec).
 
 
 1. PM QoS framework
 ===================
 
-The infrastructure exposes multiple misc device nodes one per implemented
-parameter.  The set of parameters implement is defined by pm_qos_power_init()
-and pm_qos_params.h.  This is done because having the available parameters
-being runtime configurable or changeable from a driver was seen as too easy to
-abuse.
-
-For each parameter a list of performance requests is maintained along with
-an aggregated target value.  The aggregated target value is updated with
-changes to the request list or elements of the list.  Typically the
-aggregated target value is simply the max or min of the request values held
-in the parameter list elements.
+A global list of CPU latency QoS requests is maintained along with an aggregated
+(effective) target value.  The aggregated target value is updated with changes
+to the request list or elements of the list.  For CPU latency QoS, the
+aggregated target value is simply the min of the request values held in the list
+elements.
+
 Note: the aggregated target value is implemented as an atomic variable so that
 reading the aggregated value does not require any locking mechanism.
 
+From kernel space the use of this interface is simple:
 
-From kernel mode the use of this interface is simple:
-
-void pm_qos_add_request(handle, param_class, target_value):
-  Will insert an element into the list for that identified PM QoS class with the
-  target value.  Upon change to this list the new target is recomputed and any
-  registered notifiers are called only if the target value is now different.
-  Clients of pm_qos need to save the returned handle for future use in other
-  pm_qos API functions.
+void cpu_latency_qos_add_request(handle, target_value):
+  Will insert an element into the CPU latency QoS list with the target value.
+  Upon change to this list the new target is recomputed and any registered
+  notifiers are called only if the target value is now different.
+  Clients of PM QoS need to save the returned handle for future use in other
+  PM QoS API functions.
 
-void pm_qos_update_request(handle, new_target_value):
+void cpu_latency_qos_update_request(handle, new_target_value):
   Will update the list element pointed to by the handle with the new target
   value and recompute the new aggregated target, calling the notification tree
   if the target is changed.
 
-void pm_qos_remove_request(handle):
+void cpu_latency_qos_remove_request(handle):
   Will remove the element.  After removal it will update the aggregate target
   and call the notification tree if the target was changed as a result of
   removing the request.
 
-int pm_qos_request(param_class):
-  Returns the aggregated value for a given PM QoS class.
+int cpu_latency_qos_limit():
+  Returns the aggregated value for the CPU latency QoS.
+
+int cpu_latency_qos_request_active(handle):
+  Returns if the request is still active, i.e. it has not been removed from the
+  CPU latency QoS list.
 
-int pm_qos_request_active(handle):
-  Returns if the request is still active, i.e. it has not been removed from a
-  PM QoS class constraints list.
+int cpu_latency_qos_add_notifier(notifier):
+  Adds a notification callback function to the CPU latency QoS. The callback is
+  called when the aggregated value for the CPU latency QoS is changed.
 
-int pm_qos_add_notifier(param_class, notifier):
-  Adds a notification callback function to the PM QoS class. The callback is
-  called when the aggregated value for the PM QoS class is changed.
+int cpu_latency_qos_remove_notifier(notifier):
+  Removes the notification callback function from the CPU latency QoS.
 
-int pm_qos_remove_notifier(int param_class, notifier):
-  Removes the notification callback function for the PM QoS class.
 
+From user space:
 
-From user mode:
+The infrastructure exposes one device node, /dev/cpu_dma_latency, for the CPU
+latency QoS.
 
-Only processes can register a pm_qos request.  To provide for automatic
+Only processes can register a PM QoS request.  To provide for automatic
 cleanup of a process, the interface requires the process to register its
-parameter requests in the following way:
+parameter requests as follows.
 
-To register the default pm_qos target for the specific parameter, the process
-must open /dev/cpu_dma_latency
+To register the default PM QoS target for the CPU latency QoS, the process must
+open /dev/cpu_dma_latency.
 
 As long as the device node is held open that process has a registered
 request on the parameter.
 
-To change the requested target value the process needs to write an s32 value to
-the open device node.  Alternatively the user mode program could write a hex
-string for the value using 10 char long format e.g. "0x12345678".  This
-translates to a pm_qos_update_request call.
+To change the requested target value, the process needs to write an s32 value to
+the open device node.  Alternatively, it can write a hex string for the value
+using the 10 char long format e.g. "0x12345678".  This translates to a
+cpu_latency_qos_update_request() call.
 
 To remove the user mode request for a target value simply close the device
 node.
index 2ef3189..f45bf11 100644 (file)
@@ -75,16 +75,6 @@ The PM QoS events are used for QoS add/update/remove request and for
 target/flags update.
 ::
 
-  pm_qos_add_request                 "pm_qos_class=%s value=%d"
-  pm_qos_update_request              "pm_qos_class=%s value=%d"
-  pm_qos_remove_request              "pm_qos_class=%s value=%d"
-  pm_qos_update_request_timeout      "pm_qos_class=%s value=%d, timeout_us=%ld"
-
-The first parameter gives the QoS class name (e.g. "CPU_DMA_LATENCY").
-The second parameter is value to be added/updated/removed.
-The third parameter is timeout value in usec.
-::
-
   pm_qos_update_target               "action=%s prev_value=%d curr_value=%d"
   pm_qos_update_flags                "action=%s prev_value=0x%x curr_value=0x%x"
 
@@ -92,7 +82,7 @@ The first parameter gives the QoS action name (e.g. "ADD_REQ").
 The second parameter is the previous QoS value.
 The third parameter is the current QoS value to update.
 
-And, there are also events used for device PM QoS add/update/remove request.
+There are also events used for device PM QoS add/update/remove request.
 ::
 
   dev_pm_qos_add_request             "device=%s type=%s new_value=%d"
@@ -103,3 +93,12 @@ The first parameter gives the device name which tries to add/update/remove
 QoS requests.
 The second parameter gives the request type (e.g. "DEV_PM_QOS_RESUME_LATENCY").
 The third parameter is value to be added/updated/removed.
+
+And, there are events used for CPU latency QoS add/update/remove request.
+::
+
+  pm_qos_add_request        "value=%d"
+  pm_qos_update_request     "value=%d"
+  pm_qos_remove_request     "value=%d"
+
+The parameter is the value to be added/updated/removed.
index a6fbdf3..cc1d18c 100644 (file)
@@ -4073,7 +4073,6 @@ F:        drivers/scsi/snic/
 CISCO VIC ETHERNET NIC DRIVER
 M:     Christian Benvenuti <benve@cisco.com>
 M:     Govindarajulu Varadarajan <_govind@gmx.com>
-M:     Parvi Kaustubhi <pkaustub@cisco.com>
 S:     Supported
 F:     drivers/net/ethernet/cisco/enic/
 
@@ -4572,7 +4571,7 @@ F:        drivers/infiniband/hw/cxgb4/
 F:     include/uapi/rdma/cxgb4-abi.h
 
 CXGB4VF ETHERNET DRIVER (CXGB4VF)
-M:     Casey Leedom <leedom@chelsio.com>
+M:     Vishal Kulkarni <vishal@gmail.com>
 L:     netdev@vger.kernel.org
 W:     http://www.chelsio.com
 S:     Supported
@@ -6198,7 +6197,6 @@ S:        Supported
 F:     drivers/scsi/be2iscsi/
 
 Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net)
-M:     Sathya Perla <sathya.perla@broadcom.com>
 M:     Ajit Khaparde <ajit.khaparde@broadcom.com>
 M:     Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
 M:     Somnath Kotur <somnath.kotur@broadcom.com>
@@ -11119,7 +11117,7 @@ M:      Thomas Bogendoerfer <tsbogend@alpha.franken.de>
 L:     linux-mips@vger.kernel.org
 W:     http://www.linux-mips.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
-Q:     http://patchwork.linux-mips.org/project/linux-mips/list/
+Q:     https://patchwork.kernel.org/project/linux-mips/list/
 S:     Maintained
 F:     Documentation/devicetree/bindings/mips/
 F:     Documentation/mips/
index e25db57..e56bf7e 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc7
 NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
@@ -1804,7 +1804,7 @@ existing-targets := $(wildcard $(sort $(targets)))
 
 -include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
 
-endif # config-targets
+endif # config-build
 endif # mixed-build
 endif # need-sub-make
 
index ff2a393..7124ab8 100644 (file)
@@ -154,7 +154,7 @@ config ARC_CPU_HS
        help
          Support for ARC HS38x Cores based on ARCv2 ISA
          The notable features are:
-           - SMP configurations of upto 4 core with coherency
+           - SMP configurations of up to 4 cores with coherency
            - Optional L2 Cache and IO-Coherency
            - Revised Interrupt Architecture (multiple priorites, reg banks,
                auto stack switch, auto regfile save/restore)
@@ -192,7 +192,7 @@ config ARC_SMP_HALT_ON_RESET
        help
          In SMP configuration cores can be configured as Halt-on-reset
          or they could all start at same time. For Halt-on-reset, non
-         masters are parked until Master kicks them so they can start of
+         masters are parked until Master kicks them so they can start off
          at designated entry point. For other case, all jump to common
          entry point and spin wait for Master's signal.
 
index 07f26ed..f7a978d 100644 (file)
@@ -21,8 +21,6 @@ CONFIG_MODULES=y
 CONFIG_MODULE_FORCE_LOAD=y
 CONFIG_MODULE_UNLOAD=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARC_PLAT_EZNPS=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=4096
index 5dd470b..bf39a00 100644 (file)
@@ -20,8 +20,6 @@ CONFIG_ISA_ARCOMPACT=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci"
 # CONFIG_COMPACTION is not set
 CONFIG_NET=y
index 3532e86..7121bd7 100644 (file)
@@ -19,8 +19,6 @@ CONFIG_PERF_EVENTS=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_ISA_ARCV2=y
 CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs"
 # CONFIG_COMPACTION is not set
index d90448b..f9863b2 100644 (file)
@@ -14,8 +14,6 @@ CONFIG_PERF_EVENTS=y
 CONFIG_KPROBES=y
 CONFIG_MODULES=y
 # CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
 CONFIG_ISA_ARCV2=y
 CONFIG_SMP=y
 # CONFIG_ARC_TIMERS_64BIT is not set
index 6434725..006bcf8 100644 (file)
@@ -43,6 +43,8 @@ extern void fpu_init_task(struct pt_regs *regs);
 
 #endif /* !CONFIG_ISA_ARCOMPACT */
 
+struct task_struct;
+
 extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
 
 #else  /* !CONFIG_ARC_FPU_SAVE_RESTORE */
index d9ee43c..fe19f1d 100644 (file)
@@ -29,6 +29,8 @@
 .endm
 
 #define ASM_NL          `      /* use '`' to mark new line in macro */
+#define __ALIGN                .align 4
+#define __ALIGN_STR    __stringify(__ALIGN)
 
 /* annotation for data we want in DCCM - if enabled in .config */
 .macro ARCFP_DATA nm
index e1c6474..aa41af6 100644 (file)
@@ -8,11 +8,11 @@
 #include <linux/delay.h>
 #include <linux/root_dev.h>
 #include <linux/clk.h>
-#include <linux/clk-provider.h>
 #include <linux/clocksource.h>
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/cpu.h>
+#include <linux/of_clk.h>
 #include <linux/of_fdt.h>
 #include <linux/of.h>
 #include <linux/cache.h>
index b79886a..d299950 100644 (file)
@@ -104,8 +104,7 @@ static void show_faulting_vma(unsigned long address)
                        if (IS_ERR(nm))
                                nm = "?";
                }
-               pr_info("    @off 0x%lx in [%s]\n"
-                       "    VMA: 0x%08lx to 0x%08lx\n",
+               pr_info("  @off 0x%lx in [%s]  VMA: 0x%08lx to 0x%08lx\n",
                        vma->vm_start < TASK_UNMAPPED_BASE ?
                                address : address - vma->vm_start,
                        nm, vma->vm_start, vma->vm_end);
@@ -120,8 +119,6 @@ static void show_ecr_verbose(struct pt_regs *regs)
        unsigned int vec, cause_code;
        unsigned long address;
 
-       pr_info("\n[ECR   ]: 0x%08lx => ", regs->event);
-
        /* For Data fault, this is data address not instruction addr */
        address = current->thread.fault_address;
 
@@ -130,10 +127,10 @@ static void show_ecr_verbose(struct pt_regs *regs)
 
        /* For DTLB Miss or ProtV, display the memory involved too */
        if (vec == ECR_V_DTLB_MISS) {
-               pr_cont("Invalid %s @ 0x%08lx by insn @ 0x%08lx\n",
+               pr_cont("Invalid %s @ 0x%08lx by insn @ %pS\n",
                       (cause_code == 0x01) ? "Read" :
                       ((cause_code == 0x02) ? "Write" : "EX"),
-                      address, regs->ret);
+                      address, (void *)regs->ret);
        } else if (vec == ECR_V_ITLB_MISS) {
                pr_cont("Insn could not be fetched\n");
        } else if (vec == ECR_V_MACH_CHK) {
@@ -191,31 +188,31 @@ void show_regs(struct pt_regs *regs)
 
        show_ecr_verbose(regs);
 
-       pr_info("[EFA   ]: 0x%08lx\n[BLINK ]: %pS\n[ERET  ]: %pS\n",
-               current->thread.fault_address,
-               (void *)regs->blink, (void *)regs->ret);
-
        if (user_mode(regs))
                show_faulting_vma(regs->ret); /* faulting code, not data */
 
-       pr_info("[STAT32]: 0x%08lx", regs->status32);
+       pr_info("ECR: 0x%08lx EFA: 0x%08lx ERET: 0x%08lx\n",
+               regs->event, current->thread.fault_address, regs->ret);
+
+       pr_info("STAT32: 0x%08lx", regs->status32);
 
 #define STS_BIT(r, bit)        r->status32 & STATUS_##bit##_MASK ? #bit" " : ""
 
 #ifdef CONFIG_ISA_ARCOMPACT
-       pr_cont(" : %2s%2s%2s%2s%2s%2s%2s\n",
+       pr_cont(" [%2s%2s%2s%2s%2s%2s%2s]",
                        (regs->status32 & STATUS_U_MASK) ? "U " : "K ",
                        STS_BIT(regs, DE), STS_BIT(regs, AE),
                        STS_BIT(regs, A2), STS_BIT(regs, A1),
                        STS_BIT(regs, E2), STS_BIT(regs, E1));
 #else
-       pr_cont(" : %2s%2s%2s%2s\n",
+       pr_cont(" [%2s%2s%2s%2s]",
                        STS_BIT(regs, IE),
                        (regs->status32 & STATUS_U_MASK) ? "U " : "K ",
                        STS_BIT(regs, DE), STS_BIT(regs, AE));
 #endif
-       pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n",
-               regs->bta, regs->sp, regs->fp);
+       pr_cont("  BTA: 0x%08lx\n", regs->bta);
+       pr_info("BLK: %pS\n SP: 0x%08lx  FP: 0x%08lx\n",
+               (void *)regs->blink, regs->sp, regs->fp);
        pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
               regs->lp_start, regs->lp_end, regs->lp_count);
 
index db857d0..1fc32b6 100644 (file)
@@ -307,13 +307,15 @@ endif
 ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
 prepare: stack_protector_prepare
 stack_protector_prepare: prepare0
-       $(eval KBUILD_CFLAGS += \
+       $(eval SSP_PLUGIN_CFLAGS := \
                -fplugin-arg-arm_ssp_per_task_plugin-tso=$(shell        \
                        awk '{if ($$2 == "THREAD_SZ_ORDER") print $$3;}'\
                                include/generated/asm-offsets.h)        \
                -fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell     \
                        awk '{if ($$2 == "TI_STACK_CANARY") print $$3;}'\
                                include/generated/asm-offsets.h))
+       $(eval KBUILD_CFLAGS += $(SSP_PLUGIN_CFLAGS))
+       $(eval GCC_PLUGINS_CFLAGS += $(SSP_PLUGIN_CFLAGS))
 endif
 
 all:   $(notdir $(KBUILD_IMAGE))
index da599c3..9c11e74 100644 (file)
@@ -101,7 +101,6 @@ clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S \
                $(libfdt) $(libfdt_hdrs) hyp-stub.S
 
 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
-KBUILD_CFLAGS += $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
 
 ifeq ($(CONFIG_FUNCTION_TRACER),y)
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
@@ -117,7 +116,8 @@ CFLAGS_fdt_ro.o := $(nossp-flags-y)
 CFLAGS_fdt_rw.o := $(nossp-flags-y)
 CFLAGS_fdt_wip.o := $(nossp-flags-y)
 
-ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
+ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \
+            -I$(obj) $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
 asflags-y := -DZIMAGE
 
 # Supply kernel BSS size to the decompressor via a linker symbol.
index c89ac1b..e0330a2 100644 (file)
@@ -95,6 +95,8 @@ static bool __init cntvct_functional(void)
         */
        np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
        if (!np)
+               np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer");
+       if (!np)
                goto out_put;
 
        if (of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
index 95b2e1c..f8016e3 100644 (file)
@@ -118,7 +118,7 @@ ENTRY(arm_copy_from_user)
 
 ENDPROC(arm_copy_from_user)
 
-       .pushsection .fixup,"ax"
+       .pushsection .text.fixup,"ax"
        .align 0
        copy_abort_preamble
        ldmfd   sp!, {r1, r2, r3}
index 6082ae0..d237162 100644 (file)
@@ -20,6 +20,8 @@
 };
 
 &fman0 {
+       fsl,erratum-a050385;
+
        /* these aliases provide the FMan ports mapping */
        enet0: ethernet@e0000 {
        };
index e4d8624..d79ce6d 100644 (file)
@@ -29,11 +29,9 @@ typedef struct {
  */
 #define ASID(mm)       ((mm)->context.id.counter & 0xffff)
 
-extern bool arm64_use_ng_mappings;
-
 static inline bool arm64_kernel_unmapped_at_el0(void)
 {
-       return arm64_use_ng_mappings;
+       return cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
 }
 
 typedef void (*bp_hardening_cb_t)(void);
index 6f87839..1305e28 100644 (file)
 
 #include <asm/pgtable-types.h>
 
+extern bool arm64_use_ng_mappings;
+
 #define _PROT_DEFAULT          (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define _PROT_SECT_DEFAULT     (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 
-#define PTE_MAYBE_NG           (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
-#define PMD_MAYBE_NG           (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
+#define PTE_MAYBE_NG           (arm64_use_ng_mappings ? PTE_NG : 0)
+#define PMD_MAYBE_NG           (arm64_use_ng_mappings ? PMD_SECT_NG : 0)
 
 #define PROT_DEFAULT           (_PROT_DEFAULT | PTE_MAYBE_NG)
 #define PROT_SECT_DEFAULT      (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
index 1dd22da..803039d 100644 (file)
@@ -25,8 +25,8 @@
 #define __NR_compat_gettimeofday       78
 #define __NR_compat_sigreturn          119
 #define __NR_compat_rt_sigreturn       173
-#define __NR_compat_clock_getres       247
 #define __NR_compat_clock_gettime      263
+#define __NR_compat_clock_getres       264
 #define __NR_compat_clock_gettime64    403
 #define __NR_compat_clock_getres_time64        406
 
index d4ed9a1..5407bf5 100644 (file)
@@ -958,11 +958,22 @@ void tick_broadcast(const struct cpumask *mask)
 }
 #endif
 
+/*
+ * The number of CPUs online, not counting this CPU (which may not be
+ * fully online and so not counted in num_online_cpus()).
+ */
+static inline unsigned int num_other_online_cpus(void)
+{
+       unsigned int this_cpu_online = cpu_online(smp_processor_id());
+
+       return num_online_cpus() - this_cpu_online;
+}
+
 void smp_send_stop(void)
 {
        unsigned long timeout;
 
-       if (num_online_cpus() > 1) {
+       if (num_other_online_cpus()) {
                cpumask_t mask;
 
                cpumask_copy(&mask, cpu_online_mask);
@@ -975,10 +986,10 @@ void smp_send_stop(void)
 
        /* Wait up to one second for other CPUs to stop */
        timeout = USEC_PER_SEC;
-       while (num_online_cpus() > 1 && timeout--)
+       while (num_other_online_cpus() && timeout--)
                udelay(1);
 
-       if (num_online_cpus() > 1)
+       if (num_other_online_cpus())
                pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
                        cpumask_pr_args(cpu_online_mask));
 
@@ -1001,7 +1012,11 @@ void crash_smp_send_stop(void)
 
        cpus_stopped = 1;
 
-       if (num_online_cpus() == 1) {
+       /*
+        * If this cpu is the only one alive at this point in time, online or
+        * not, there are no stop messages to be sent around, so just back out.
+        */
+       if (num_other_online_cpus() == 0) {
                sdei_mask_local_cpu();
                return;
        }
@@ -1009,7 +1024,7 @@ void crash_smp_send_stop(void)
        cpumask_copy(&mask, cpu_online_mask);
        cpumask_clear_cpu(smp_processor_id(), &mask);
 
-       atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+       atomic_set(&waiting_for_crash_ipi, num_other_online_cpus());
 
        pr_crit("SMP: stopping secondary CPUs\n");
        smp_cross_call(&mask, IPI_CPU_CRASH_STOP);
index 37b9316..c340f94 100644 (file)
@@ -4,6 +4,8 @@
 #include "jz4780.dtsi"
 #include <dt-bindings/clock/ingenic,tcu.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/regulator/active-semi,8865-regulator.h>
 
 / {
        compatible = "img,ci20", "ingenic,jz4780";
 
                regulators {
                        vddcore: SUDCDC1 {
-                               regulator-name = "VDDCORE";
+                               regulator-name = "DCDC_REG1";
                                regulator-min-microvolt = <1100000>;
                                regulator-max-microvolt = <1100000>;
                                regulator-always-on;
                        };
                        vddmem: SUDCDC2 {
-                               regulator-name = "VDDMEM";
+                               regulator-name = "DCDC_REG2";
                                regulator-min-microvolt = <1500000>;
                                regulator-max-microvolt = <1500000>;
                                regulator-always-on;
                        };
                        vcc_33: SUDCDC3 {
-                               regulator-name = "VCC33";
+                               regulator-name = "DCDC_REG3";
                                regulator-min-microvolt = <3300000>;
                                regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
                        vcc_50: SUDCDC4 {
-                               regulator-name = "VCC50";
+                               regulator-name = "SUDCDC_REG4";
                                regulator-min-microvolt = <5000000>;
                                regulator-max-microvolt = <5000000>;
                                regulator-always-on;
                        };
                        vcc_25: LDO_REG5 {
-                               regulator-name = "VCC25";
+                               regulator-name = "LDO_REG5";
                                regulator-min-microvolt = <2500000>;
                                regulator-max-microvolt = <2500000>;
                                regulator-always-on;
                        };
                        wifi_io: LDO_REG6 {
-                               regulator-name = "WIFIIO";
+                               regulator-name = "LDO_REG6";
                                regulator-min-microvolt = <2500000>;
                                regulator-max-microvolt = <2500000>;
                                regulator-always-on;
                        };
                        vcc_28: LDO_REG7 {
-                               regulator-name = "VCC28";
+                               regulator-name = "LDO_REG7";
                                regulator-min-microvolt = <2800000>;
                                regulator-max-microvolt = <2800000>;
                                regulator-always-on;
                        };
                        vcc_15: LDO_REG8 {
-                               regulator-name = "VCC15";
+                               regulator-name = "LDO_REG8";
                                regulator-min-microvolt = <1500000>;
                                regulator-max-microvolt = <1500000>;
                                regulator-always-on;
                        };
-                       vcc_18: LDO_REG9 {
-                               regulator-name = "VCC18";
-                               regulator-min-microvolt = <1800000>;
-                               regulator-max-microvolt = <1800000>;
+                       vrtc_18: LDO_REG9 {
+                               regulator-name = "LDO_REG9";
+                               /* Despite the datasheet stating 3.3V
+                                * for REG9 and the driver expecting that,
+                                * REG9 outputs 1.8V.
+                                * Likely the CI20 uses a proprietary
+                                * factory programmed chip variant.
+                                * Since this is a simple on/off LDO the
+                                * exact values do not matter.
+                                */
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
                                regulator-always-on;
                        };
                        vcc_11: LDO_REG10 {
-                               regulator-name = "VCC11";
-                               regulator-min-microvolt = <1100000>;
-                               regulator-max-microvolt = <1100000>;
+                               regulator-name = "LDO_REG10";
+                               regulator-min-microvolt = <1200000>;
+                               regulator-max-microvolt = <1200000>;
                                regulator-always-on;
                        };
                };
                rtc@51 {
                        compatible = "nxp,pcf8563";
                        reg = <0x51>;
-                       interrupts = <110>;
+
+                       interrupt-parent = <&gpf>;
+                       interrupts = <30 IRQ_TYPE_LEVEL_LOW>;
                };
 };
 
index 1ac2752..a7b469d 100644 (file)
@@ -605,7 +605,8 @@ static void __init bootcmdline_init(char **cmdline_p)
         * If we're configured to take boot arguments from DT, look for those
         * now.
         */
-       if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB))
+       if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
+           IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
                of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
 #endif
 
index 729a0f1..db3a873 100644 (file)
@@ -1817,6 +1817,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
 {
        struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
 
+       kvmppc_mmu_destroy_pr(vcpu);
        free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
        kfree(vcpu->arch.shadow_vcpu);
index 1af96fb..302e9dc 100644 (file)
@@ -759,7 +759,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
        return 0;
 
 out_vcpu_uninit:
-       kvmppc_mmu_destroy(vcpu);
        kvmppc_subarch_vcpu_uninit(vcpu);
        return err;
 }
@@ -792,7 +791,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 
        kvmppc_core_vcpu_free(vcpu);
 
-       kvmppc_mmu_destroy(vcpu);
        kvmppc_subarch_vcpu_uninit(vcpu);
 }
 
index db5664d..d2bed3f 100644 (file)
@@ -120,12 +120,6 @@ static void __init kasan_unmap_early_shadow_vmalloc(void)
        unsigned long k_cur;
        phys_addr_t pa = __pa(kasan_early_shadow_page);
 
-       if (!early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
-               int ret = kasan_init_shadow_page_tables(k_start, k_end);
-
-               if (ret)
-                       panic("kasan: kasan_init_shadow_page_tables() failed");
-       }
        for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
                pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
                pte_t *ptep = pte_offset_kernel(pmd, k_cur);
@@ -143,7 +137,8 @@ void __init kasan_mmu_init(void)
        int ret;
        struct memblock_region *reg;
 
-       if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
+       if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE) ||
+           IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
                ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
                if (ret)
index d7ff30e..c2e6d4b 100644 (file)
@@ -3268,7 +3268,10 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
        /* Initial reset is a superset of the normal reset */
        kvm_arch_vcpu_ioctl_normal_reset(vcpu);
 
-       /* this equals initial cpu reset in pop, but we don't switch to ESA */
+       /*
+        * This equals initial cpu reset in pop, but we don't switch to ESA.
+        * We do not only reset the internal data, but also ...
+        */
        vcpu->arch.sie_block->gpsw.mask = 0;
        vcpu->arch.sie_block->gpsw.addr = 0;
        kvm_s390_set_prefix(vcpu, 0);
@@ -3278,6 +3281,19 @@ static void kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
        memset(vcpu->arch.sie_block->gcr, 0, sizeof(vcpu->arch.sie_block->gcr));
        vcpu->arch.sie_block->gcr[0] = CR0_INITIAL_MASK;
        vcpu->arch.sie_block->gcr[14] = CR14_INITIAL_MASK;
+
+       /* ... the data in sync regs */
+       memset(vcpu->run->s.regs.crs, 0, sizeof(vcpu->run->s.regs.crs));
+       vcpu->run->s.regs.ckc = 0;
+       vcpu->run->s.regs.crs[0] = CR0_INITIAL_MASK;
+       vcpu->run->s.regs.crs[14] = CR14_INITIAL_MASK;
+       vcpu->run->psw_addr = 0;
+       vcpu->run->psw_mask = 0;
+       vcpu->run->s.regs.todpr = 0;
+       vcpu->run->s.regs.cputm = 0;
+       vcpu->run->s.regs.ckc = 0;
+       vcpu->run->s.regs.pp = 0;
+       vcpu->run->s.regs.gbea = 1;
        vcpu->run->s.regs.fpc = 0;
        vcpu->arch.sie_block->gbea = 1;
        vcpu->arch.sie_block->pp = 0;
index 94df086..513a555 100644 (file)
@@ -194,9 +194,10 @@ avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
 avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1)
 sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1)
 sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1)
+adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1)
 
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr)
+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr)
+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr)
 
 KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE)
 
index b69e00b..8c2e9ea 100644 (file)
@@ -11,6 +11,7 @@ avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
 avx512_supported :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,yes,no)
 sha1_ni_supported :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,yes,no)
 sha256_ni_supported :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,yes,no)
+adx_supported := $(call as-instr,adox %r10$(comma)%r10,yes,no)
 
 obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o
 
@@ -39,7 +40,11 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) += aegis128-aesni.o
 
 obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o
 obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o
-obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
+
+# These modules require the assembler to support ADX.
+ifeq ($(adx_supported),yes)
+       obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o
+endif
 
 # These modules require assembler to support AVX.
 ifeq ($(avx_supported),yes)
index a6ea07f..4d867a7 100644 (file)
@@ -190,15 +190,12 @@ static int amd_uncore_event_init(struct perf_event *event)
 
        /*
         * NB and Last level cache counters (MSRs) are shared across all cores
-        * that share the same NB / Last level cache. Interrupts can be directed
-        * to a single target core, however, event counts generated by processes
-        * running on other cores cannot be masked out. So we do not support
-        * sampling and per-thread events.
+        * that share the same NB / Last level cache.  On family 16h and below,
+        * Interrupts can be directed to a single target core, however, event
+        * counts generated by processes running on other cores cannot be masked
+        * out. So we do not support sampling and per-thread events via
+        * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
         */
-       if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
-               return -EINVAL;
-
-       /* and we do not enable counter overflow interrupts */
        hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
        hwc->idx = -1;
 
@@ -306,7 +303,7 @@ static struct pmu amd_nb_pmu = {
        .start          = amd_uncore_start,
        .stop           = amd_uncore_stop,
        .read           = amd_uncore_read,
-       .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
 };
 
 static struct pmu amd_llc_pmu = {
@@ -317,7 +314,7 @@ static struct pmu amd_llc_pmu = {
        .start          = amd_uncore_start,
        .stop           = amd_uncore_stop,
        .read           = amd_uncore_read,
-       .capabilities   = PERF_PMU_CAP_NO_EXCLUDE,
+       .capabilities   = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
 };
 
 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
index 2a8f2bd..c06e835 100644 (file)
@@ -360,7 +360,6 @@ struct x86_emulate_ctxt {
        u64 d;
        unsigned long _eip;
        struct operand memop;
-       /* Fields above regs are cleared together. */
        unsigned long _regs[NR_VCPU_REGS];
        struct operand *memopp;
        struct fetch_cache fetch;
index 2c5676b..48293d1 100644 (file)
@@ -838,13 +838,15 @@ static void free_moved_vector(struct apic_chip_data *apicd)
        bool managed = apicd->is_managed;
 
        /*
-        * This should never happen. Managed interrupts are not
-        * migrated except on CPU down, which does not involve the
-        * cleanup vector. But try to keep the accounting correct
-        * nevertheless.
+        * Managed interrupts are usually not migrated away
+        * from an online CPU, but CPU isolation 'managed_irq'
+        * can make that happen.
+        * 1) Activation does not take the isolation into account
+        *    to keep the code simple
+        * 2) Migration away from an isolated CPU can happen when
+        *    a non-isolated CPU which is in the calculated
+        *    affinity mask comes online.
         */
-       WARN_ON_ONCE(managed);
-
        trace_vector_free_moved(apicd->irq, cpu, vector, managed);
        irq_matrix_free(vector_matrix, cpu, vector, managed);
        per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED;
index 5627b10..f996ffb 100644 (file)
@@ -493,17 +493,18 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
                        return;
 
                if ((val & 3UL) == 1UL) {
-                       /* PPIN available but disabled: */
+                       /* PPIN locked in disabled mode */
                        return;
                }
 
-               /* If PPIN is disabled, but not locked, try to enable: */
-               if (!(val & 3UL)) {
+               /* If PPIN is disabled, try to enable */
+               if (!(val & 2UL)) {
                        wrmsrl_safe(MSR_PPIN_CTL,  val | 2UL);
                        rdmsrl_safe(MSR_PPIN_CTL, &val);
                }
 
-               if ((val & 3UL) == 2UL)
+               /* Is the enable bit set? */
+               if (val & 2UL)
                        set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
        }
 }
index 58b4ee3..f36dc07 100644 (file)
@@ -486,9 +486,14 @@ static int thermal_throttle_offline(unsigned int cpu)
 {
        struct thermal_state *state = &per_cpu(thermal_state, cpu);
        struct device *dev = get_cpu_device(cpu);
+       u32 l;
+
+       /* Mask the thermal vector before draining evtl. pending work */
+       l = apic_read(APIC_LVTTHMR);
+       apic_write(APIC_LVTTHMR, l | APIC_LVT_MASKED);
 
-       cancel_delayed_work(&state->package_throttle.therm_work);
-       cancel_delayed_work(&state->core_throttle.therm_work);
+       cancel_delayed_work_sync(&state->package_throttle.therm_work);
+       cancel_delayed_work_sync(&state->core_throttle.therm_work);
 
        state->package_throttle.rate_control_active = false;
        state->core_throttle.rate_control_active = false;
index 1bb4927..9fea075 100644 (file)
@@ -68,7 +68,7 @@ config KVM_WERROR
        depends on (X86_64 && !KASAN) || !COMPILE_TEST
        depends on EXPERT
        help
-         Add -Werror to the build flags for (and only for) i915.ko.
+         Add -Werror to the build flags for KVM.
 
          If in doubt, say "N".
 
index dd19fb3..bc00642 100644 (file)
@@ -5173,6 +5173,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        ctxt->fetch.ptr = ctxt->fetch.data;
        ctxt->fetch.end = ctxt->fetch.data + insn_len;
        ctxt->opcode_len = 1;
+       ctxt->intercept = x86_intercept_none;
        if (insn_len > 0)
                memcpy(ctxt->fetch.data, insn, insn_len);
        else {
index 7668fed..750ff0b 100644 (file)
@@ -378,12 +378,15 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
                if (e->fields.delivery_mode == APIC_DM_FIXED) {
                        struct kvm_lapic_irq irq;
 
-                       irq.shorthand = APIC_DEST_NOSHORT;
                        irq.vector = e->fields.vector;
                        irq.delivery_mode = e->fields.delivery_mode << 8;
-                       irq.dest_id = e->fields.dest_id;
                        irq.dest_mode =
                            kvm_lapic_irq_dest_mode(!!e->fields.dest_mode);
+                       irq.level = false;
+                       irq.trig_mode = e->fields.trig_mode;
+                       irq.shorthand = APIC_DEST_NOSHORT;
+                       irq.dest_id = e->fields.dest_id;
+                       irq.msi_redir_hint = false;
                        bitmap_zero(&vcpu_bitmap, 16);
                        kvm_bitmap_or_dest_vcpus(ioapic->kvm, &irq,
                                                 &vcpu_bitmap);
index 24c0b2b..9100050 100644 (file)
@@ -6312,7 +6312,8 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu,
        enum exit_fastpath_completion *exit_fastpath)
 {
        if (!is_guest_mode(vcpu) &&
-               to_svm(vcpu)->vmcb->control.exit_code == EXIT_REASON_MSR_WRITE)
+           to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
+           to_svm(vcpu)->vmcb->control.exit_info_1)
                *exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
 }
 
index e920d78..9750e59 100644 (file)
@@ -224,7 +224,7 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
                return;
 
        kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
-       vmx->nested.hv_evmcs_vmptr = -1ull;
+       vmx->nested.hv_evmcs_vmptr = 0;
        vmx->nested.hv_evmcs = NULL;
 }
 
@@ -1923,7 +1923,8 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
        if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
                return 1;
 
-       if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
+       if (unlikely(!vmx->nested.hv_evmcs ||
+                    evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
                if (!vmx->nested.hv_evmcs)
                        vmx->nested.current_vmptr = -1ull;
 
index 40b1e61..26f8f31 100644 (file)
@@ -2338,6 +2338,17 @@ static void hardware_disable(void)
        kvm_cpu_vmxoff();
 }
 
+/*
+ * There is no X86_FEATURE for SGX yet, but anyway we need to query CPUID
+ * directly instead of going through cpu_has(), to ensure KVM is trapping
+ * ENCLS whenever it's supported in hardware.  It does not matter whether
+ * the host OS supports or has enabled SGX.
+ */
+static bool cpu_has_sgx(void)
+{
+       return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
+}
+
 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
                                      u32 msr, u32 *result)
 {
@@ -2418,8 +2429,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
                        SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
                        SECONDARY_EXEC_PT_USE_GPA |
                        SECONDARY_EXEC_PT_CONCEAL_VMX |
-                       SECONDARY_EXEC_ENABLE_VMFUNC |
-                       SECONDARY_EXEC_ENCLS_EXITING;
+                       SECONDARY_EXEC_ENABLE_VMFUNC;
+               if (cpu_has_sgx())
+                       opt2 |= SECONDARY_EXEC_ENCLS_EXITING;
                if (adjust_vmx_controls(min2, opt2,
                                        MSR_IA32_VMX_PROCBASED_CTLS2,
                                        &_cpu_based_2nd_exec_control) < 0)
index 5de2006..3156e25 100644 (file)
@@ -7195,10 +7195,12 @@ static void kvm_timer_init(void)
 
                cpu = get_cpu();
                policy = cpufreq_cpu_get(cpu);
-               if (policy && policy->cpuinfo.max_freq)
-                       max_tsc_khz = policy->cpuinfo.max_freq;
+               if (policy) {
+                       if (policy->cpuinfo.max_freq)
+                               max_tsc_khz = policy->cpuinfo.max_freq;
+                       cpufreq_cpu_put(policy);
+               }
                put_cpu();
-               cpufreq_cpu_put(policy);
 #endif
                cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
                                          CPUFREQ_TRANSITION_NOTIFIER);
index fa4ea09..629fdf1 100644 (file)
@@ -190,7 +190,7 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
        return pmd_k;
 }
 
-void vmalloc_sync_all(void)
+static void vmalloc_sync(void)
 {
        unsigned long address;
 
@@ -217,6 +217,16 @@ void vmalloc_sync_all(void)
        }
 }
 
+void vmalloc_sync_mappings(void)
+{
+       vmalloc_sync();
+}
+
+void vmalloc_sync_unmappings(void)
+{
+       vmalloc_sync();
+}
+
 /*
  * 32-bit:
  *
@@ -319,11 +329,23 @@ out:
 
 #else /* CONFIG_X86_64: */
 
-void vmalloc_sync_all(void)
+void vmalloc_sync_mappings(void)
 {
+       /*
+        * 64-bit mappings might allocate new p4d/pud pages
+        * that need to be propagated to all tasks' PGDs.
+        */
        sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
 }
 
+void vmalloc_sync_unmappings(void)
+{
+       /*
+        * Unmappings never allocate or free p4d/pud pages.
+        * No work is required here.
+        */
+}
+
 /*
  * 64-bit:
  *
index 44e4beb..935a91e 100644 (file)
@@ -106,6 +106,19 @@ static unsigned int __ioremap_check_encrypted(struct resource *res)
        return 0;
 }
 
+/*
+ * The EFI runtime services data area is not covered by walk_mem_res(), but must
+ * be mapped encrypted when SEV is active.
+ */
+static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
+{
+       if (!sev_active())
+               return;
+
+       if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
+               desc->flags |= IORES_MAP_ENCRYPTED;
+}
+
 static int __ioremap_collect_map_flags(struct resource *res, void *arg)
 {
        struct ioremap_desc *desc = arg;
@@ -124,6 +137,9 @@ static int __ioremap_collect_map_flags(struct resource *res, void *arg)
  * To avoid multiple resource walks, this function walks resources marked as
  * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
  * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
+ *
+ * After that, deal with misc other ranges in __ioremap_check_other() which do
+ * not fall into the above category.
  */
 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
                                struct ioremap_desc *desc)
@@ -135,6 +151,8 @@ static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
        memset(desc, 0, sizeof(struct ioremap_desc));
 
        walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
+
+       __ioremap_check_other(addr, desc);
 }
 
 /*
index 9e24445..526f70f 100644 (file)
@@ -265,7 +265,7 @@ static void iosf_mbi_reset_semaphore(void)
                            iosf_mbi_sem_address, 0, PUNIT_SEMAPHORE_BIT))
                dev_err(&mbi_pdev->dev, "Error P-Unit semaphore reset failed\n");
 
-       pm_qos_update_request(&iosf_mbi_pm_qos, PM_QOS_DEFAULT_VALUE);
+       cpu_latency_qos_update_request(&iosf_mbi_pm_qos, PM_QOS_DEFAULT_VALUE);
 
        blocking_notifier_call_chain(&iosf_mbi_pmic_bus_access_notifier,
                                     MBI_PMIC_BUS_ACCESS_END, NULL);
@@ -301,8 +301,8 @@ static void iosf_mbi_reset_semaphore(void)
  * 4) When CPU cores enter C6 or C7 the P-Unit needs to talk to the PMIC
  *    if this happens while the kernel itself is accessing the PMIC I2C bus
  *    the SoC hangs.
- *    As the third step we call pm_qos_update_request() to disallow the CPU
- *    to enter C6 or C7.
+ *    As the third step we call cpu_latency_qos_update_request() to disallow the
+ *    CPU to enter C6 or C7.
  *
  * 5) The P-Unit has a PMIC bus semaphore which we can request to stop
  *    autonomous P-Unit tasks from accessing the PMIC I2C bus while we hold it.
@@ -338,7 +338,7 @@ int iosf_mbi_block_punit_i2c_access(void)
         * requires the P-Unit to talk to the PMIC and if this happens while
         * we're holding the semaphore, the SoC hangs.
         */
-       pm_qos_update_request(&iosf_mbi_pm_qos, 0);
+       cpu_latency_qos_update_request(&iosf_mbi_pm_qos, 0);
 
        /* host driver writes to side band semaphore register */
        ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
@@ -547,8 +547,7 @@ static int __init iosf_mbi_init(void)
 {
        iosf_debugfs_init();
 
-       pm_qos_add_request(&iosf_mbi_pm_qos, PM_QOS_CPU_DMA_LATENCY,
-                          PM_QOS_DEFAULT_VALUE);
+       cpu_latency_qos_add_request(&iosf_mbi_pm_qos, PM_QOS_DEFAULT_VALUE);
 
        return pci_register_driver(&iosf_mbi_pci_driver);
 }
@@ -561,7 +560,7 @@ static void __exit iosf_mbi_exit(void)
        pci_dev_put(mbi_pdev);
        mbi_pdev = NULL;
 
-       pm_qos_remove_request(&iosf_mbi_pm_qos);
+       cpu_latency_qos_remove_request(&iosf_mbi_pm_qos);
 }
 
 module_init(iosf_mbi_init);
index 27ca686..9a599cc 100644 (file)
@@ -1318,7 +1318,7 @@ static bool iocg_is_idle(struct ioc_gq *iocg)
                return false;
 
        /* is something in flight? */
-       if (atomic64_read(&iocg->done_vtime) < atomic64_read(&iocg->vtime))
+       if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
                return false;
 
        return true;
index 856356b..74cedea 100644 (file)
@@ -398,6 +398,28 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
        WARN_ON(e && (rq->tag != -1));
 
        if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
+               /*
+                * Firstly normal IO request is inserted to scheduler queue or
+                * sw queue, meantime we add flush request to dispatch queue(
+                * hctx->dispatch) directly and there is at most one in-flight
+                * flush request for each hw queue, so it doesn't matter to add
+                * flush request to tail or front of the dispatch queue.
+                *
+                * Secondly in case of NCQ, flush request belongs to non-NCQ
+                * command, and queueing it will fail when there is any
+                * in-flight normal IO request(NCQ command). When adding flush
+                * rq to the front of hctx->dispatch, it is easier to introduce
+                * extra time to flush rq's latency because of S_SCHED_RESTART
+                * compared with adding to the tail of dispatch queue, then
+                * chance of flush merge is increased, and less flush requests
+                * will be issued to controller. It is observed that ~10% time
+                * is saved in blktests block/004 on disk attached to AHCI/NCQ
+                * drive when adding flush rq to the front of hctx->dispatch.
+                *
+                * Simply queue flush rq to the front of hctx->dispatch so that
+                * intensive flush workloads can benefit in case of NCQ HW.
+                */
+               at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
                blk_mq_request_bypass_insert(rq, at_head, false);
                goto run;
        }
index ff62689..9c2e13c 100644 (file)
@@ -301,6 +301,42 @@ struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
 }
 EXPORT_SYMBOL_GPL(disk_map_sector_rcu);
 
+/**
+ * disk_has_partitions
+ * @disk: gendisk of interest
+ *
+ * Walk through the partition table and check if valid partition exists.
+ *
+ * CONTEXT:
+ * Don't care.
+ *
+ * RETURNS:
+ * True if the gendisk has at least one valid non-zero size partition.
+ * Otherwise false.
+ */
+bool disk_has_partitions(struct gendisk *disk)
+{
+       struct disk_part_tbl *ptbl;
+       int i;
+       bool ret = false;
+
+       rcu_read_lock();
+       ptbl = rcu_dereference(disk->part_tbl);
+
+       /* Iterate partitions skipping the whole device at index 0 */
+       for (i = 1; i < ptbl->len; i++) {
+               if (rcu_dereference(ptbl->part[i])) {
+                       ret = true;
+                       break;
+               }
+       }
+
+       rcu_read_unlock();
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(disk_has_partitions);
+
 /*
  * Can be deleted altogether. Later.
  *
index 103acbb..24c9642 100644 (file)
@@ -171,7 +171,7 @@ int ghes_estatus_pool_init(int num_ghes)
         * New allocation must be visible in all pgd before it can be found by
         * an NMI allocating from the pool.
         */
-       vmalloc_sync_all();
+       vmalloc_sync_mappings();
 
        rc = gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
        if (rc)
index 110e41f..f303106 100644 (file)
@@ -448,6 +448,7 @@ static int binderfs_binder_ctl_create(struct super_block *sb)
        inode->i_uid = info->root_uid;
        inode->i_gid = info->root_gid;
 
+       refcount_set(&device->ref, 1);
        device->binderfs_inode = inode;
        device->miscdev.minor = minor;
 
index 8db8c0f..7af74fb 100644 (file)
@@ -91,7 +91,7 @@
 #ifdef GENERAL_DEBUG
 #define PRINTK(args...) printk(args)
 #else
-#define PRINTK(args...)
+#define PRINTK(args...) do {} while (0)
 #endif /* GENERAL_DEBUG */
 
 #ifdef EXTRA_DEBUG
index b8313a0..48efa7a 100644 (file)
@@ -111,7 +111,7 @@ config CFAG12864B
          If unsure, say N.
 
 config CFAG12864B_RATE
-       int "Refresh rate (hertz)"
+       int "Refresh rate (hertz)"
        depends on CFAG12864B
        default "20"
        ---help---
@@ -329,7 +329,7 @@ config PANEL_LCD_PROTO
 
 config PANEL_LCD_PIN_E
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
-        int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
        range -17 17
        default 14
        ---help---
@@ -344,7 +344,7 @@ config PANEL_LCD_PIN_E
 
 config PANEL_LCD_PIN_RS
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
-        int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
        range -17 17
        default 17
        ---help---
@@ -359,7 +359,7 @@ config PANEL_LCD_PIN_RS
 
 config PANEL_LCD_PIN_RW
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
-        int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
        range -17 17
        default 16
        ---help---
@@ -374,7 +374,7 @@ config PANEL_LCD_PIN_RW
 
 config PANEL_LCD_PIN_SCL
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
-        int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
        range -17 17
        default 1
        ---help---
@@ -389,7 +389,7 @@ config PANEL_LCD_PIN_SCL
 
 config PANEL_LCD_PIN_SDA
        depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
-        int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
        range -17 17
        default 2
        ---help---
@@ -404,12 +404,12 @@ config PANEL_LCD_PIN_SDA
 
 config PANEL_LCD_PIN_BL
        depends on PANEL_PROFILE="0" && PANEL_LCD="1"
-        int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
+       int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
        range -17 17
        default 0
        ---help---
          This describes the number of the parallel port pin to which the LCD 'BL' signal
-          has been connected. It can be :
+         has been connected. It can be :
 
                  0 : no connection (eg: connected to ground)
              1..17 : directly connected to any of these pins on the DB25 plug
index 874c259..c0da382 100644 (file)
@@ -88,7 +88,7 @@ struct charlcd_priv {
                int len;
        } esc_seq;
 
-       unsigned long long drvdata[0];
+       unsigned long long drvdata[];
 };
 
 #define charlcd_to_priv(p)     container_of(p, struct charlcd_priv, lcd)
index efb928e..1cce409 100644 (file)
@@ -356,7 +356,6 @@ static int img_ascii_lcd_probe(struct platform_device *pdev)
        const struct of_device_id *match;
        const struct img_ascii_lcd_config *cfg;
        struct img_ascii_lcd_ctx *ctx;
-       struct resource *res;
        int err;
 
        match = of_match_device(img_ascii_lcd_matches, &pdev->dev);
@@ -378,8 +377,7 @@ static int img_ascii_lcd_probe(struct platform_device *pdev)
                                         &ctx->offset))
                        return -EINVAL;
        } else {
-               res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-               ctx->base = devm_ioremap_resource(&pdev->dev, res);
+               ctx->base = devm_platform_ioremap_resource(pdev, 0);
                if (IS_ERR(ctx->base))
                        return PTR_ERR(ctx->base);
        }
index 7fa654f..b5ce7b0 100644 (file)
@@ -363,10 +363,10 @@ static void setup_pdev_dma_masks(struct platform_device *pdev)
 {
        if (!pdev->dev.coherent_dma_mask)
                pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-       if (!pdev->dma_mask)
-               pdev->dma_mask = DMA_BIT_MASK(32);
-       if (!pdev->dev.dma_mask)
-               pdev->dev.dma_mask = &pdev->dma_mask;
+       if (!pdev->dev.dma_mask) {
+               pdev->platform_dma_mask = DMA_BIT_MASK(32);
+               pdev->dev.dma_mask = &pdev->platform_dma_mask;
+       }
 };
 
 /**
@@ -662,20 +662,8 @@ struct platform_device *platform_device_register_full(
        pdev->dev.of_node_reused = pdevinfo->of_node_reused;
 
        if (pdevinfo->dma_mask) {
-               /*
-                * This memory isn't freed when the device is put,
-                * I don't have a nice idea for that though.  Conceptually
-                * dma_mask in struct device should not be a pointer.
-                * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
-                */
-               pdev->dev.dma_mask =
-                       kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
-               if (!pdev->dev.dma_mask)
-                       goto err;
-
-               kmemleak_ignore(pdev->dev.dma_mask);
-
-               *pdev->dev.dma_mask = pdevinfo->dma_mask;
+               pdev->platform_dma_mask = pdevinfo->dma_mask;
+               pdev->dev.dma_mask = &pdev->platform_dma_mask;
                pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
        }
 
@@ -700,7 +688,6 @@ struct platform_device *platform_device_register_full(
        if (ret) {
 err:
                ACPI_COMPANION_SET(&pdev->dev, NULL);
-               kfree(pdev->dev.dma_mask);
                platform_device_put(pdev);
                return ERR_PTR(ret);
        }
index 5415876..0736248 100644 (file)
@@ -245,13 +245,20 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
        err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
        if (err) {
                virtqueue_kick(vblk->vqs[qid].vq);
-               blk_mq_stop_hw_queue(hctx);
+               /* Don't stop the queue if -ENOMEM: we may have failed to
+                * bounce the buffer due to global resource outage.
+                */
+               if (err == -ENOSPC)
+                       blk_mq_stop_hw_queue(hctx);
                spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
-               /* Out of mem doesn't actually happen, since we fall back
-                * to direct descriptors */
-               if (err == -ENOMEM || err == -ENOSPC)
+               switch (err) {
+               case -ENOSPC:
                        return BLK_STS_DEV_RESOURCE;
-               return BLK_STS_IOERR;
+               case -ENOMEM:
+                       return BLK_STS_RESOURCE;
+               default:
+                       return BLK_STS_IOERR;
+               }
        }
 
        if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
index c78127c..638c693 100644 (file)
@@ -194,7 +194,7 @@ static int platform_ipmi_probe(struct platform_device *pdev)
        else
                io.slave_addr = slave_addr;
 
-       io.irq = platform_get_irq(pdev, 0);
+       io.irq = platform_get_irq_optional(pdev, 0);
        if (io.irq > 0)
                io.irq_setup = ipmi_std_irq_setup;
        else
@@ -378,7 +378,7 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
                io.irq = tmp;
                io.irq_setup = acpi_gpe_irq_setup;
        } else {
-               int irq = platform_get_irq(pdev, 0);
+               int irq = platform_get_irq_optional(pdev, 0);
 
                if (irq > 0) {
                        io.irq = irq;
index f0f2b59..95adf6c 100644 (file)
@@ -4713,7 +4713,7 @@ EXPORT_SYMBOL(of_clk_get_by_name);
  *
  * Returns: The number of clocks that are possible parents of this node
  */
-unsigned int of_clk_get_parent_count(struct device_node *np)
+unsigned int of_clk_get_parent_count(const struct device_node *np)
 {
        int count;
 
@@ -4725,7 +4725,7 @@ unsigned int of_clk_get_parent_count(struct device_node *np)
 }
 EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
 
-const char *of_clk_get_parent_name(struct device_node *np, int index)
+const char *of_clk_get_parent_name(const struct device_node *np, int index)
 {
        struct of_phandle_args clkspec;
        struct property *prop;
index dd7af41..0a5d395 100644 (file)
@@ -592,24 +592,6 @@ static struct clk_branch disp_cc_mdss_rot_clk = {
        },
 };
 
-static struct clk_branch disp_cc_mdss_rscc_ahb_clk = {
-       .halt_reg = 0x400c,
-       .halt_check = BRANCH_HALT,
-       .clkr = {
-               .enable_reg = 0x400c,
-               .enable_mask = BIT(0),
-               .hw.init = &(struct clk_init_data){
-                       .name = "disp_cc_mdss_rscc_ahb_clk",
-                       .parent_data = &(const struct clk_parent_data){
-                               .hw = &disp_cc_mdss_ahb_clk_src.clkr.hw,
-                       },
-                       .num_parents = 1,
-                       .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
-                       .ops = &clk_branch2_ops,
-               },
-       },
-};
-
 static struct clk_branch disp_cc_mdss_rscc_vsync_clk = {
        .halt_reg = 0x4008,
        .halt_check = BRANCH_HALT,
@@ -687,7 +669,6 @@ static struct clk_regmap *disp_cc_sc7180_clocks[] = {
        [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
        [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
        [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
-       [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr,
        [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr,
        [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
        [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
index c363c3c..276e5ec 100644 (file)
@@ -97,7 +97,7 @@ static struct clk_branch video_cc_vcodec0_axi_clk = {
 
 static struct clk_branch video_cc_vcodec0_core_clk = {
        .halt_reg = 0x890,
-       .halt_check = BRANCH_HALT,
+       .halt_check = BRANCH_HALT_VOTED,
        .clkr = {
                .enable_reg = 0x890,
                .enable_mask = BIT(0),
index b0ce9bc..db124bc 100644 (file)
 #include <linux/kvm_para.h>
 #include <linux/cpuidle_haltpoll.h>
 
+static bool force __read_mostly;
+module_param(force, bool, 0444);
+MODULE_PARM_DESC(force, "Load unconditionally");
+
 static struct cpuidle_device __percpu *haltpoll_cpuidle_devices;
 static enum cpuhp_state haltpoll_hp_state;
 
@@ -90,6 +94,11 @@ static void haltpoll_uninit(void)
        haltpoll_cpuidle_devices = NULL;
 }
 
+static bool haltpool_want(void)
+{
+       return kvm_para_has_hint(KVM_HINTS_REALTIME) || force;
+}
+
 static int __init haltpoll_init(void)
 {
        int ret;
@@ -101,8 +110,7 @@ static int __init haltpoll_init(void)
 
        cpuidle_poll_state_init(drv);
 
-       if (!kvm_para_available() ||
-               !kvm_para_has_hint(KVM_HINTS_REALTIME))
+       if (!kvm_para_available() || !haltpool_want())
                return -ENODEV;
 
        ret = cpuidle_register_driver(drv);
index de81298..c149d9e 100644 (file)
@@ -736,53 +736,15 @@ int cpuidle_register(struct cpuidle_driver *drv,
 }
 EXPORT_SYMBOL_GPL(cpuidle_register);
 
-#ifdef CONFIG_SMP
-
-/*
- * This function gets called when a part of the kernel has a new latency
- * requirement.  This means we need to get all processors out of their C-state,
- * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
- * wakes them all right up.
- */
-static int cpuidle_latency_notify(struct notifier_block *b,
-               unsigned long l, void *v)
-{
-       wake_up_all_idle_cpus();
-       return NOTIFY_OK;
-}
-
-static struct notifier_block cpuidle_latency_notifier = {
-       .notifier_call = cpuidle_latency_notify,
-};
-
-static inline void latency_notifier_init(struct notifier_block *n)
-{
-       pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY, n);
-}
-
-#else /* CONFIG_SMP */
-
-#define latency_notifier_init(x) do { } while (0)
-
-#endif /* CONFIG_SMP */
-
 /**
  * cpuidle_init - core initializer
  */
 static int __init cpuidle_init(void)
 {
-       int ret;
-
        if (cpuidle_disabled())
                return -ENODEV;
 
-       ret = cpuidle_add_interface(cpu_subsys.dev_root);
-       if (ret)
-               return ret;
-
-       latency_notifier_init(&cpuidle_latency_notifier);
-
-       return 0;
+       return cpuidle_add_interface(cpu_subsys.dev_root);
 }
 
 module_param(off, int, 0444);
index e48271e..29acaf4 100644 (file)
@@ -109,9 +109,9 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
  */
 s64 cpuidle_governor_latency_req(unsigned int cpu)
 {
-       int global_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
        struct device *device = get_cpu_device(cpu);
        int device_req = dev_pm_qos_raw_resume_latency(device);
+       int global_req = cpu_latency_qos_limit();
 
        if (device_req > global_req)
                device_req = global_req;
index 7576450..aff3dfb 100644 (file)
@@ -83,13 +83,16 @@ static ssize_t
 efivar_attr_read(struct efivar_entry *entry, char *buf)
 {
        struct efi_variable *var = &entry->var;
+       unsigned long size = sizeof(var->Data);
        char *str = buf;
+       int ret;
 
        if (!entry || !buf)
                return -EINVAL;
 
-       var->DataSize = 1024;
-       if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+       ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+       var->DataSize = size;
+       if (ret)
                return -EIO;
 
        if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
@@ -116,13 +119,16 @@ static ssize_t
 efivar_size_read(struct efivar_entry *entry, char *buf)
 {
        struct efi_variable *var = &entry->var;
+       unsigned long size = sizeof(var->Data);
        char *str = buf;
+       int ret;
 
        if (!entry || !buf)
                return -EINVAL;
 
-       var->DataSize = 1024;
-       if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+       ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+       var->DataSize = size;
+       if (ret)
                return -EIO;
 
        str += sprintf(str, "0x%lx\n", var->DataSize);
@@ -133,12 +139,15 @@ static ssize_t
 efivar_data_read(struct efivar_entry *entry, char *buf)
 {
        struct efi_variable *var = &entry->var;
+       unsigned long size = sizeof(var->Data);
+       int ret;
 
        if (!entry || !buf)
                return -EINVAL;
 
-       var->DataSize = 1024;
-       if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
+       ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
+       var->DataSize = size;
+       if (ret)
                return -EIO;
 
        memcpy(buf, var->Data, var->DataSize);
@@ -199,6 +208,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
        u8 *data;
        int err;
 
+       if (!entry || !buf)
+               return -EINVAL;
+
        if (in_compat_syscall()) {
                struct compat_efi_variable *compat;
 
@@ -250,14 +262,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
 {
        struct efi_variable *var = &entry->var;
        struct compat_efi_variable *compat;
+       unsigned long datasize = sizeof(var->Data);
        size_t size;
+       int ret;
 
        if (!entry || !buf)
                return 0;
 
-       var->DataSize = 1024;
-       if (efivar_entry_get(entry, &entry->var.Attributes,
-                            &entry->var.DataSize, entry->var.Data))
+       ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
+       var->DataSize = datasize;
+       if (ret)
                return -EIO;
 
        if (in_compat_syscall()) {
index f24ed9a..337d7cd 100644 (file)
@@ -781,11 +781,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
        ssize_t result = 0;
        uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
 
-       if (size & 3 || *pos & 3)
+       if (size > 4096 || size & 3 || *pos & 3)
                return -EINVAL;
 
        /* decode offset */
-       offset = *pos & GENMASK_ULL(11, 0);
+       offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
        se = (*pos & GENMASK_ULL(19, 12)) >> 12;
        sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
        cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
@@ -823,7 +823,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
        while (size) {
                uint32_t value;
 
-               value = data[offset++];
+               value = data[result >> 2];
                r = put_user(value, (uint32_t *)buf);
                if (r) {
                        result = r;
index 39cd545..b897585 100644 (file)
@@ -3913,6 +3913,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
                                if (r)
                                        goto out;
 
+                               amdgpu_fbdev_set_suspend(tmp_adev, 0);
+
                                /* must succeed. */
                                amdgpu_ras_resume(tmp_adev);
 
@@ -4086,6 +4088,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
                 */
                amdgpu_unregister_gpu_instance(tmp_adev);
 
+               amdgpu_fbdev_set_suspend(adev, 1);
+
                /* disable ras on ALL IPs */
                if (!(in_ras_intr && !use_baco) &&
                      amdgpu_device_ip_need_full_reset(tmp_adev))
index ff2e6e1..6173951 100644 (file)
@@ -693,7 +693,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
        bool enable = (state == AMD_CG_STATE_GATE);
 
        if (enable) {
-               if (jpeg_v2_0_is_idle(handle))
+               if (!jpeg_v2_0_is_idle(handle))
                        return -EBUSY;
                jpeg_v2_0_enable_clock_gating(adev);
        } else {
index c6d046d..c04c207 100644 (file)
@@ -477,7 +477,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
                        continue;
 
                if (enable) {
-                       if (jpeg_v2_5_is_idle(handle))
+                       if (!jpeg_v2_5_is_idle(handle))
                                return -EBUSY;
                        jpeg_v2_5_enable_clock_gating(adev, i);
                } else {
index 2b488df..d8945c3 100644 (file)
 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK  0x00010000L
 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK            0x00020000L
 #define mmHDP_MEM_POWER_CTRL_BASE_IDX  0
+
+/* for Vega20/arcturus regiter offset change */
+#define        mmROM_INDEX_VG20                                0x00e4
+#define        mmROM_INDEX_VG20_BASE_IDX                       0
+#define        mmROM_DATA_VG20                                 0x00e5
+#define        mmROM_DATA_VG20_BASE_IDX                        0
+
 /*
  * Indirect registers accessor
  */
@@ -309,6 +316,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
 {
        u32 *dw_ptr;
        u32 i, length_dw;
+       uint32_t rom_index_offset;
+       uint32_t rom_data_offset;
 
        if (bios == NULL)
                return false;
@@ -321,11 +330,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
        dw_ptr = (u32 *)bios;
        length_dw = ALIGN(length_bytes, 4) / 4;
 
+       switch (adev->asic_type) {
+       case CHIP_VEGA20:
+       case CHIP_ARCTURUS:
+               rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
+               rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
+               break;
+       default:
+               rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
+               rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
+               break;
+       }
+
        /* set rom index to 0 */
-       WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
+       WREG32(rom_index_offset, 0);
        /* read out the rom data */
        for (i = 0; i < length_dw; i++)
-               dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
+               dw_ptr[i] = RREG32(rom_data_offset);
 
        return true;
 }
index 71f61af..09b0572 100644 (file)
@@ -1352,7 +1352,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
 
        if (enable) {
                /* wait for STATUS to clear */
-               if (vcn_v1_0_is_idle(handle))
+               if (!vcn_v1_0_is_idle(handle))
                        return -EBUSY;
                vcn_v1_0_enable_clock_gating(adev);
        } else {
index c387c81..b7f1734 100644 (file)
@@ -1217,7 +1217,7 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
 
        if (enable) {
                /* wait for STATUS to clear */
-               if (vcn_v2_0_is_idle(handle))
+               if (!vcn_v2_0_is_idle(handle))
                        return -EBUSY;
                vcn_v2_0_enable_clock_gating(adev);
        } else {
index 2d64ba1..678253d 100644 (file)
@@ -1672,7 +1672,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
                return 0;
 
        if (enable) {
-               if (vcn_v2_5_is_idle(handle))
+               if (!vcn_v2_5_is_idle(handle))
                        return -EBUSY;
                vcn_v2_5_enable_clock_gating(adev);
        } else {
index e997251..6240259 100644 (file)
@@ -522,8 +522,9 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
 
        acrtc_state = to_dm_crtc_state(acrtc->base.state);
 
-       DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
-                               amdgpu_dm_vrr_active(acrtc_state));
+       DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+                        amdgpu_dm_vrr_active(acrtc_state),
+                        acrtc_state->active_planes);
 
        amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
        drm_crtc_handle_vblank(&acrtc->base);
@@ -543,7 +544,18 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
                        &acrtc_state->vrr_params.adjust);
        }
 
-       if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED) {
+       /*
+        * If there aren't any active_planes then DCH HUBP may be clock-gated.
+        * In that case, pageflip completion interrupts won't fire and pageflip
+        * completion events won't get delivered. Prevent this by sending
+        * pending pageflip events from here if a flip is still pending.
+        *
+        * If any planes are enabled, use dm_pflip_high_irq() instead, to
+        * avoid race conditions between flip programming and completion,
+        * which could cause too early flip completion events.
+        */
+       if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
+           acrtc_state->active_planes == 0) {
                if (acrtc->event) {
                        drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
                        acrtc->event = NULL;
index cb731c1..fd9e696 100644 (file)
@@ -3401,6 +3401,17 @@ static bool retrieve_link_cap(struct dc_link *link)
                sink_id.ieee_device_id,
                sizeof(sink_id.ieee_device_id));
 
+       /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */
+       {
+               uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 };
+
+               if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
+                   !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017,
+                           sizeof(str_mbp_2017))) {
+                       link->reported_link_cap.link_rate = 0x0c;
+               }
+       }
+
        core_link_read_dpcd(
                link,
                DP_SINK_HW_REVISION_START,
index d51e02f..5e640f1 100644 (file)
@@ -108,7 +108,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
        .enable_power_gating_plane = dcn20_enable_power_gating_plane,
        .dpp_pg_control = dcn20_dpp_pg_control,
        .hubp_pg_control = dcn20_hubp_pg_control,
-       .dsc_pg_control = NULL,
        .update_odm = dcn20_update_odm,
        .dsc_pg_control = dcn20_dsc_pg_control,
        .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
index 85f90f3..e310d67 100644 (file)
@@ -335,6 +335,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
        .use_urgent_burst_bw = 0
 };
 
+struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
+       .clock_limits = {
+                       {
+                               .state = 0,
+                               .dcfclk_mhz = 560.0,
+                               .fabricclk_mhz = 560.0,
+                               .dispclk_mhz = 513.0,
+                               .dppclk_mhz = 513.0,
+                               .phyclk_mhz = 540.0,
+                               .socclk_mhz = 560.0,
+                               .dscclk_mhz = 171.0,
+                               .dram_speed_mts = 8960.0,
+                       },
+                       {
+                               .state = 1,
+                               .dcfclk_mhz = 694.0,
+                               .fabricclk_mhz = 694.0,
+                               .dispclk_mhz = 642.0,
+                               .dppclk_mhz = 642.0,
+                               .phyclk_mhz = 600.0,
+                               .socclk_mhz = 694.0,
+                               .dscclk_mhz = 214.0,
+                               .dram_speed_mts = 11104.0,
+                       },
+                       {
+                               .state = 2,
+                               .dcfclk_mhz = 875.0,
+                               .fabricclk_mhz = 875.0,
+                               .dispclk_mhz = 734.0,
+                               .dppclk_mhz = 734.0,
+                               .phyclk_mhz = 810.0,
+                               .socclk_mhz = 875.0,
+                               .dscclk_mhz = 245.0,
+                               .dram_speed_mts = 14000.0,
+                       },
+                       {
+                               .state = 3,
+                               .dcfclk_mhz = 1000.0,
+                               .fabricclk_mhz = 1000.0,
+                               .dispclk_mhz = 1100.0,
+                               .dppclk_mhz = 1100.0,
+                               .phyclk_mhz = 810.0,
+                               .socclk_mhz = 1000.0,
+                               .dscclk_mhz = 367.0,
+                               .dram_speed_mts = 16000.0,
+                       },
+                       {
+                               .state = 4,
+                               .dcfclk_mhz = 1200.0,
+                               .fabricclk_mhz = 1200.0,
+                               .dispclk_mhz = 1284.0,
+                               .dppclk_mhz = 1284.0,
+                               .phyclk_mhz = 810.0,
+                               .socclk_mhz = 1200.0,
+                               .dscclk_mhz = 428.0,
+                               .dram_speed_mts = 16000.0,
+                       },
+                       /*Extra state, no dispclk ramping*/
+                       {
+                               .state = 5,
+                               .dcfclk_mhz = 1200.0,
+                               .fabricclk_mhz = 1200.0,
+                               .dispclk_mhz = 1284.0,
+                               .dppclk_mhz = 1284.0,
+                               .phyclk_mhz = 810.0,
+                               .socclk_mhz = 1200.0,
+                               .dscclk_mhz = 428.0,
+                               .dram_speed_mts = 16000.0,
+                       },
+               },
+       .num_states = 5,
+       .sr_exit_time_us = 8.6,
+       .sr_enter_plus_exit_time_us = 10.9,
+       .urgent_latency_us = 4.0,
+       .urgent_latency_pixel_data_only_us = 4.0,
+       .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+       .urgent_latency_vm_data_only_us = 4.0,
+       .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+       .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+       .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+       .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
+       .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
+       .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+       .max_avg_sdp_bw_use_normal_percent = 40.0,
+       .max_avg_dram_bw_use_normal_percent = 40.0,
+       .writeback_latency_us = 12.0,
+       .ideal_dram_bw_after_urgent_percent = 40.0,
+       .max_request_size_bytes = 256,
+       .dram_channel_width_bytes = 2,
+       .fabric_datapath_to_dcn_data_return_bytes = 64,
+       .dcn_downspread_percent = 0.5,
+       .downspread_percent = 0.38,
+       .dram_page_open_time_ns = 50.0,
+       .dram_rw_turnaround_time_ns = 17.5,
+       .dram_return_buffer_per_channel_bytes = 8192,
+       .round_trip_ping_latency_dcfclk_cycles = 131,
+       .urgent_out_of_order_return_per_channel_bytes = 256,
+       .channel_interleave_bytes = 256,
+       .num_banks = 8,
+       .num_chans = 8,
+       .vmm_page_size_bytes = 4096,
+       .dram_clock_change_latency_us = 404.0,
+       .dummy_pstate_latency_us = 5.0,
+       .writeback_dram_clock_change_latency_us = 23.0,
+       .return_bus_width_bytes = 64,
+       .dispclk_dppclk_vco_speed_mhz = 3850,
+       .xfc_bus_transport_time_us = 20,
+       .xfc_xbuf_latency_tolerance_us = 4,
+       .use_urgent_burst_bw = 0
+};
+
 struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
 
 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
@@ -3291,6 +3402,9 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
 static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
        uint32_t hw_internal_rev)
 {
+       if (ASICREV_IS_NAVI14_M(hw_internal_rev))
+               return &dcn2_0_nv14_soc;
+
        if (ASICREV_IS_NAVI12_P(hw_internal_rev))
                return &dcn2_0_nv12_soc;
 
index 4861aa5..fddbd59 100644 (file)
@@ -116,7 +116,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
        .enable_power_gating_plane = dcn20_enable_power_gating_plane,
        .dpp_pg_control = dcn20_dpp_pg_control,
        .hubp_pg_control = dcn20_hubp_pg_control,
-       .dsc_pg_control = NULL,
        .update_odm = dcn20_update_odm,
        .dsc_pg_control = dcn20_dsc_pg_control,
        .get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
index ad8e9b5..96e81c7 100644 (file)
@@ -2006,8 +2006,11 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
                        smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
                        smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
                smu_set_watermarks_table(smu, table, clock_ranges);
-               smu->watermarks_bitmap |= WATERMARKS_EXIST;
-               smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+
+               if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
+                       smu->watermarks_bitmap |= WATERMARKS_EXIST;
+                       smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+               }
        }
 
        mutex_unlock(&smu->mutex);
index 0d73a49..aed4d6e 100644 (file)
@@ -1063,15 +1063,6 @@ static int navi10_display_config_changed(struct smu_context *smu)
        int ret = 0;
 
        if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
-           !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
-               ret = smu_write_watermarks_table(smu);
-               if (ret)
-                       return ret;
-
-               smu->watermarks_bitmap |= WATERMARKS_LOADED;
-       }
-
-       if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
            smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
            smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
                ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
@@ -1493,6 +1484,7 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
                                       *clock_ranges)
 {
        int i;
+       int ret = 0;
        Watermarks_t *table = watermarks;
 
        if (!table || !clock_ranges)
@@ -1544,6 +1536,18 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
                                clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
        }
 
+       smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
+       /* pass data to smu controller */
+       if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+               ret = smu_write_watermarks_table(smu);
+               if (ret) {
+                       pr_err("Failed to update WMTABLE!");
+                       return ret;
+               }
+               smu->watermarks_bitmap |= WATERMARKS_LOADED;
+       }
+
        return 0;
 }
 
index 568c041..3ad0f4a 100644 (file)
@@ -806,9 +806,10 @@ static int renoir_set_watermarks_table(
                                clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
        }
 
+       smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
        /* pass data to smu controller */
-       if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
-                       !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+       if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
                ret = smu_write_watermarks_table(smu);
                if (ret) {
                        pr_err("Failed to update WMTABLE!");
index ea5cd1e..e793393 100644 (file)
@@ -146,14 +146,14 @@ static const struct of_device_id komeda_of_match[] = {
 
 MODULE_DEVICE_TABLE(of, komeda_of_match);
 
-static int komeda_rt_pm_suspend(struct device *dev)
+static int __maybe_unused komeda_rt_pm_suspend(struct device *dev)
 {
        struct komeda_drv *mdrv = dev_get_drvdata(dev);
 
        return komeda_dev_suspend(mdrv->mdev);
 }
 
-static int komeda_rt_pm_resume(struct device *dev)
+static int __maybe_unused komeda_rt_pm_resume(struct device *dev)
 {
        struct komeda_drv *mdrv = dev_get_drvdata(dev);
 
index b615b7d..a4fc4e6 100644 (file)
@@ -156,10 +156,8 @@ int bochs_hw_init(struct drm_device *dev)
                size = min(size, mem);
        }
 
-       if (pci_request_region(pdev, 0, "bochs-drm") != 0) {
-               DRM_ERROR("Cannot request framebuffer\n");
-               return -EBUSY;
-       }
+       if (pci_request_region(pdev, 0, "bochs-drm") != 0)
+               DRM_WARN("Cannot request framebuffer, boot fb still active?\n");
 
        bochs->fb_map = ioremap(addr, size);
        if (bochs->fb_map == NULL) {
index 67fca43..24965e5 100644 (file)
@@ -1624,28 +1624,34 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
                frame.colorspace = HDMI_COLORSPACE_RGB;
 
        /* Set up colorimetry */
-       switch (hdmi->hdmi_data.enc_out_encoding) {
-       case V4L2_YCBCR_ENC_601:
-               if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
-                       frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
-               else
+       if (!hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
+               switch (hdmi->hdmi_data.enc_out_encoding) {
+               case V4L2_YCBCR_ENC_601:
+                       if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV601)
+                               frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
+                       else
+                               frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
+                       frame.extended_colorimetry =
+                                       HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+                       break;
+               case V4L2_YCBCR_ENC_709:
+                       if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
+                               frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
+                       else
+                               frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
+                       frame.extended_colorimetry =
+                                       HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
+                       break;
+               default: /* Carries no data */
                        frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
+                       frame.extended_colorimetry =
+                                       HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
+                       break;
+               }
+       } else {
+               frame.colorimetry = HDMI_COLORIMETRY_NONE;
                frame.extended_colorimetry =
-                               HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
-               break;
-       case V4L2_YCBCR_ENC_709:
-               if (hdmi->hdmi_data.enc_in_encoding == V4L2_YCBCR_ENC_XV709)
-                       frame.colorimetry = HDMI_COLORIMETRY_EXTENDED;
-               else
-                       frame.colorimetry = HDMI_COLORIMETRY_ITU_709;
-               frame.extended_colorimetry =
-                               HDMI_EXTENDED_COLORIMETRY_XV_YCC_709;
-               break;
-       default: /* Carries no data */
-               frame.colorimetry = HDMI_COLORIMETRY_ITU_601;
-               frame.extended_colorimetry =
-                               HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
-               break;
+                       HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
        }
 
        frame.scan_mode = HDMI_SCAN_MODE_NONE;
index cce0b1b..ed0fea2 100644 (file)
@@ -1935,7 +1935,7 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
        return parent_lct + 1;
 }
 
-static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
+static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
 {
        switch (pdt) {
        case DP_PEER_DEVICE_DP_LEGACY_CONV:
@@ -1965,13 +1965,13 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
 
        /* Teardown the old pdt, if there is one */
        if (port->pdt != DP_PEER_DEVICE_NONE) {
-               if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+               if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
                        /*
                         * If the new PDT would also have an i2c bus,
                         * don't bother with reregistering it
                         */
                        if (new_pdt != DP_PEER_DEVICE_NONE &&
-                           drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
+                           drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
                                port->pdt = new_pdt;
                                port->mcs = new_mcs;
                                return 0;
@@ -1991,7 +1991,7 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
        port->mcs = new_mcs;
 
        if (port->pdt != DP_PEER_DEVICE_NONE) {
-               if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+               if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
                        /* add i2c over sideband */
                        ret = drm_dp_mst_register_i2c_bus(&port->aux);
                } else {
@@ -2172,7 +2172,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
        }
 
        if (port->pdt != DP_PEER_DEVICE_NONE &&
-           drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
+           drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
                port->cached_edid = drm_get_edid(port->connector,
                                                 &port->aux.ddc);
                drm_connector_set_tile_property(port->connector);
@@ -2302,14 +2302,18 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
                mutex_unlock(&mgr->lock);
        }
 
-       if (old_ddps != port->ddps) {
-               if (port->ddps) {
-                       if (!port->input) {
-                               drm_dp_send_enum_path_resources(mgr, mstb,
-                                                               port);
-                       }
+       /*
+        * Reprobe PBN caps on both hotplug, and when re-probing the link
+        * for our parent mstb
+        */
+       if (old_ddps != port->ddps || !created) {
+               if (port->ddps && !port->input) {
+                       ret = drm_dp_send_enum_path_resources(mgr, mstb,
+                                                             port);
+                       if (ret == 1)
+                               changed = true;
                } else {
-                       port->available_pbn = 0;
+                       port->full_pbn = 0;
                }
        }
 
@@ -2401,11 +2405,10 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
        port->ddps = conn_stat->displayport_device_plug_status;
 
        if (old_ddps != port->ddps) {
-               if (port->ddps) {
-                       dowork = true;
-               } else {
-                       port->available_pbn = 0;
-               }
+               if (port->ddps && !port->input)
+                       drm_dp_send_enum_path_resources(mgr, mstb, port);
+               else
+                       port->full_pbn = 0;
        }
 
        new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
@@ -2556,13 +2559,6 @@ static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mg
                if (port->input || !port->ddps)
                        continue;
 
-               if (!port->available_pbn) {
-                       drm_modeset_lock(&mgr->base.lock, NULL);
-                       drm_dp_send_enum_path_resources(mgr, mstb, port);
-                       drm_modeset_unlock(&mgr->base.lock);
-                       changed = true;
-               }
-
                if (port->mstb)
                        mstb_child = drm_dp_mst_topology_get_mstb_validated(
                            mgr, port->mstb);
@@ -2990,6 +2986,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
 
        ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
        if (ret > 0) {
+               ret = 0;
                path_res = &txmsg->reply.u.path_resources;
 
                if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
@@ -3002,14 +2999,22 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
                                      path_res->port_number,
                                      path_res->full_payload_bw_number,
                                      path_res->avail_payload_bw_number);
-                       port->available_pbn =
-                               path_res->avail_payload_bw_number;
+
+                       /*
+                        * If something changed, make sure we send a
+                        * hotplug
+                        */
+                       if (port->full_pbn != path_res->full_payload_bw_number ||
+                           port->fec_capable != path_res->fec_capable)
+                               ret = 1;
+
+                       port->full_pbn = path_res->full_payload_bw_number;
                        port->fec_capable = path_res->fec_capable;
                }
        }
 
        kfree(txmsg);
-       return 0;
+       return ret;
 }
 
 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
@@ -3596,13 +3601,9 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
        /* The link address will need to be re-sent on resume */
        mstb->link_address_sent = false;
 
-       list_for_each_entry(port, &mstb->ports, next) {
-               /* The PBN for each port will also need to be re-probed */
-               port->available_pbn = 0;
-
+       list_for_each_entry(port, &mstb->ports, next)
                if (port->mstb)
                        drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
-       }
 }
 
 /**
@@ -4829,41 +4830,102 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
        return false;
 }
 
-static inline
-int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
-                                    struct drm_dp_mst_topology_state *mst_state)
+static int
+drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
+                                     struct drm_dp_mst_topology_state *state);
+
+static int
+drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
+                                     struct drm_dp_mst_topology_state *state)
 {
-       struct drm_dp_mst_port *port;
        struct drm_dp_vcpi_allocation *vcpi;
-       int pbn_limit = 0, pbn_used = 0;
+       struct drm_dp_mst_port *port;
+       int pbn_used = 0, ret;
+       bool found = false;
 
-       list_for_each_entry(port, &branch->ports, next) {
-               if (port->mstb)
-                       if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
-                               return -ENOSPC;
+       /* Check that we have at least one port in our state that's downstream
+        * of this branch, otherwise we can skip this branch
+        */
+       list_for_each_entry(vcpi, &state->vcpis, next) {
+               if (!vcpi->pbn ||
+                   !drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
+                       continue;
 
-               if (port->available_pbn > 0)
-                       pbn_limit = port->available_pbn;
+               found = true;
+               break;
        }
-       DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
-                        branch, pbn_limit);
+       if (!found)
+               return 0;
 
-       list_for_each_entry(vcpi, &mst_state->vcpis, next) {
-               if (!vcpi->pbn)
-                       continue;
+       if (mstb->port_parent)
+               DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
+                                mstb->port_parent->parent, mstb->port_parent,
+                                mstb);
+       else
+               DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
+                                mstb);
+
+       list_for_each_entry(port, &mstb->ports, next) {
+               ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
+               if (ret < 0)
+                       return ret;
 
-               if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
-                       pbn_used += vcpi->pbn;
+               pbn_used += ret;
        }
-       DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
-                        branch, pbn_used);
 
-       if (pbn_used > pbn_limit) {
-               DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
-                                branch);
+       return pbn_used;
+}
+
+static int
+drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
+                                     struct drm_dp_mst_topology_state *state)
+{
+       struct drm_dp_vcpi_allocation *vcpi;
+       int pbn_used = 0;
+
+       if (port->pdt == DP_PEER_DEVICE_NONE)
+               return 0;
+
+       if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
+               bool found = false;
+
+               list_for_each_entry(vcpi, &state->vcpis, next) {
+                       if (vcpi->port != port)
+                               continue;
+                       if (!vcpi->pbn)
+                               return 0;
+
+                       found = true;
+                       break;
+               }
+               if (!found)
+                       return 0;
+
+               /* This should never happen, as it means we tried to
+                * set a mode before querying the full_pbn
+                */
+               if (WARN_ON(!port->full_pbn))
+                       return -EINVAL;
+
+               pbn_used = vcpi->pbn;
+       } else {
+               pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
+                                                                state);
+               if (pbn_used <= 0)
+                       return pbn_used;
+       }
+
+       if (pbn_used > port->full_pbn) {
+               DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
+                                port->parent, port, pbn_used,
+                                port->full_pbn);
                return -ENOSPC;
        }
-       return 0;
+
+       DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
+                        port->parent, port, pbn_used, port->full_pbn);
+
+       return pbn_used;
 }
 
 static inline int
@@ -5061,9 +5123,15 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
                ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
                if (ret)
                        break;
-               ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
-               if (ret)
+
+               mutex_lock(&mgr->lock);
+               ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
+                                                           mst_state);
+               mutex_unlock(&mgr->lock);
+               if (ret < 0)
                        break;
+               else
+                       ret = 0;
        }
 
        return ret;
index b481caf..825abe3 100644 (file)
@@ -542,10 +542,12 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
        }
 
        DRM_DEBUG_LEASE("Creating lease\n");
+       /* lessee will take the ownership of leases */
        lessee = drm_lease_create(lessor, &leases);
 
        if (IS_ERR(lessee)) {
                ret = PTR_ERR(lessee);
+               idr_destroy(&leases);
                goto out_leases;
        }
 
@@ -580,7 +582,6 @@ out_lessee:
 
 out_leases:
        put_unused_fd(fd);
-       idr_destroy(&leases);
 
        DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl failed: %d\n", ret);
        return ret;
index 8428ae1..1f79bc2 100644 (file)
@@ -55,6 +55,7 @@ static const char * const decon_clks_name[] = {
 struct decon_context {
        struct device                   *dev;
        struct drm_device               *drm_dev;
+       void                            *dma_priv;
        struct exynos_drm_crtc          *crtc;
        struct exynos_drm_plane         planes[WINDOWS_NR];
        struct exynos_drm_plane_config  configs[WINDOWS_NR];
@@ -644,7 +645,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
 
        decon_clear_channels(ctx->crtc);
 
-       return exynos_drm_register_dma(drm_dev, dev);
+       return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
 }
 
 static void decon_unbind(struct device *dev, struct device *master, void *data)
@@ -654,7 +655,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
        decon_atomic_disable(ctx->crtc);
 
        /* detach this sub driver from iommu mapping if supported. */
-       exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+       exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
 }
 
 static const struct component_ops decon_component_ops = {
index ff59c64..1eed332 100644 (file)
@@ -40,6 +40,7 @@
 struct decon_context {
        struct device                   *dev;
        struct drm_device               *drm_dev;
+       void                            *dma_priv;
        struct exynos_drm_crtc          *crtc;
        struct exynos_drm_plane         planes[WINDOWS_NR];
        struct exynos_drm_plane_config  configs[WINDOWS_NR];
@@ -127,13 +128,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
 
        decon_clear_channels(ctx->crtc);
 
-       return exynos_drm_register_dma(drm_dev, ctx->dev);
+       return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
 }
 
 static void decon_ctx_remove(struct decon_context *ctx)
 {
        /* detach this sub driver from iommu mapping if supported. */
-       exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+       exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
 }
 
 static u32 decon_calc_clkdiv(struct decon_context *ctx,
index 9ebc027..619f814 100644 (file)
@@ -58,7 +58,7 @@ static inline void clear_dma_max_seg_size(struct device *dev)
  * mapping.
  */
 static int drm_iommu_attach_device(struct drm_device *drm_dev,
-                               struct device *subdrv_dev)
+                               struct device *subdrv_dev, void **dma_priv)
 {
        struct exynos_drm_private *priv = drm_dev->dev_private;
        int ret;
@@ -74,7 +74,14 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
                return ret;
 
        if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
-               if (to_dma_iommu_mapping(subdrv_dev))
+               /*
+                * Keep the original DMA mapping of the sub-device and
+                * restore it on Exynos DRM detach, otherwise the DMA
+                * framework considers it as IOMMU-less during the next
+                * probe (in case of deferred probe or modular build)
+                */
+               *dma_priv = to_dma_iommu_mapping(subdrv_dev);
+               if (*dma_priv)
                        arm_iommu_detach_device(subdrv_dev);
 
                ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
@@ -98,19 +105,21 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
  * mapping
  */
 static void drm_iommu_detach_device(struct drm_device *drm_dev,
-                               struct device *subdrv_dev)
+                                   struct device *subdrv_dev, void **dma_priv)
 {
        struct exynos_drm_private *priv = drm_dev->dev_private;
 
-       if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+       if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
                arm_iommu_detach_device(subdrv_dev);
-       else if (IS_ENABLED(CONFIG_IOMMU_DMA))
+               arm_iommu_attach_device(subdrv_dev, *dma_priv);
+       } else if (IS_ENABLED(CONFIG_IOMMU_DMA))
                iommu_detach_device(priv->mapping, subdrv_dev);
 
        clear_dma_max_seg_size(subdrv_dev);
 }
 
-int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
+                           void **dma_priv)
 {
        struct exynos_drm_private *priv = drm->dev_private;
 
@@ -137,13 +146,14 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
                priv->mapping = mapping;
        }
 
-       return drm_iommu_attach_device(drm, dev);
+       return drm_iommu_attach_device(drm, dev, dma_priv);
 }
 
-void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
+                              void **dma_priv)
 {
        if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
-               drm_iommu_detach_device(drm, dev);
+               drm_iommu_detach_device(drm, dev, dma_priv);
 }
 
 void exynos_drm_cleanup_dma(struct drm_device *drm)
index d4d21d8..6ae9056 100644 (file)
@@ -223,8 +223,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
        return priv->mapping ? true : false;
 }
 
-int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
-void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
+int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
+                           void **dma_priv);
+void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
+                              void **dma_priv);
 void exynos_drm_cleanup_dma(struct drm_device *drm);
 
 #ifdef CONFIG_DRM_EXYNOS_DPI
index 8ea2e1d..29ab8be 100644 (file)
@@ -97,6 +97,7 @@ struct fimc_scaler {
 struct fimc_context {
        struct exynos_drm_ipp ipp;
        struct drm_device *drm_dev;
+       void            *dma_priv;
        struct device   *dev;
        struct exynos_drm_ipp_task      *task;
        struct exynos_drm_ipp_formats   *formats;
@@ -1133,7 +1134,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
 
        ctx->drm_dev = drm_dev;
        ipp->drm_dev = drm_dev;
-       exynos_drm_register_dma(drm_dev, dev);
+       exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
 
        exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
                        DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1153,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
        struct exynos_drm_ipp *ipp = &ctx->ipp;
 
        exynos_drm_ipp_unregister(dev, ipp);
-       exynos_drm_unregister_dma(drm_dev, dev);
+       exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
 }
 
 static const struct component_ops fimc_component_ops = {
index 21aec38..bb67cad 100644 (file)
@@ -167,6 +167,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
 struct fimd_context {
        struct device                   *dev;
        struct drm_device               *drm_dev;
+       void                            *dma_priv;
        struct exynos_drm_crtc          *crtc;
        struct exynos_drm_plane         planes[WINDOWS_NR];
        struct exynos_drm_plane_config  configs[WINDOWS_NR];
@@ -1090,7 +1091,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
        if (is_drm_iommu_supported(drm_dev))
                fimd_clear_channels(ctx->crtc);
 
-       return exynos_drm_register_dma(drm_dev, dev);
+       return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
 }
 
 static void fimd_unbind(struct device *dev, struct device *master,
@@ -1100,7 +1101,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
 
        fimd_atomic_disable(ctx->crtc);
 
-       exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
+       exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
 
        if (ctx->encoder)
                exynos_dpi_remove(ctx->encoder);
index 2a3382d..fcee33a 100644 (file)
@@ -232,6 +232,7 @@ struct g2d_runqueue_node {
 
 struct g2d_data {
        struct device                   *dev;
+       void                            *dma_priv;
        struct clk                      *gate_clk;
        void __iomem                    *regs;
        int                             irq;
@@ -1409,7 +1410,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
                return ret;
        }
 
-       ret = exynos_drm_register_dma(drm_dev, dev);
+       ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
        if (ret < 0) {
                dev_err(dev, "failed to enable iommu.\n");
                g2d_fini_cmdlist(g2d);
@@ -1434,7 +1435,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
        priv->g2d_dev = NULL;
 
        cancel_work_sync(&g2d->runqueue_work);
-       exynos_drm_unregister_dma(g2d->drm_dev, dev);
+       exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
 }
 
 static const struct component_ops g2d_component_ops = {
index 88b6fca..45e9aee 100644 (file)
@@ -97,6 +97,7 @@ struct gsc_scaler {
 struct gsc_context {
        struct exynos_drm_ipp ipp;
        struct drm_device *drm_dev;
+       void            *dma_priv;
        struct device   *dev;
        struct exynos_drm_ipp_task      *task;
        struct exynos_drm_ipp_formats   *formats;
@@ -1169,7 +1170,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
 
        ctx->drm_dev = drm_dev;
        ctx->drm_dev = drm_dev;
-       exynos_drm_register_dma(drm_dev, dev);
+       exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
 
        exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
                        DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -1189,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
        struct exynos_drm_ipp *ipp = &ctx->ipp;
 
        exynos_drm_ipp_unregister(dev, ipp);
-       exynos_drm_unregister_dma(drm_dev, dev);
+       exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
 }
 
 static const struct component_ops gsc_component_ops = {
index b984829..dafa87b 100644 (file)
@@ -56,6 +56,7 @@ struct rot_variant {
 struct rot_context {
        struct exynos_drm_ipp ipp;
        struct drm_device *drm_dev;
+       void            *dma_priv;
        struct device   *dev;
        void __iomem    *regs;
        struct clk      *clock;
@@ -243,7 +244,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
 
        rot->drm_dev = drm_dev;
        ipp->drm_dev = drm_dev;
-       exynos_drm_register_dma(drm_dev, dev);
+       exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv);
 
        exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
                           DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
@@ -261,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
        struct exynos_drm_ipp *ipp = &rot->ipp;
 
        exynos_drm_ipp_unregister(dev, ipp);
-       exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
+       exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv);
 }
 
 static const struct component_ops rotator_component_ops = {
index 497973e..93c43c8 100644 (file)
@@ -39,6 +39,7 @@ struct scaler_data {
 struct scaler_context {
        struct exynos_drm_ipp           ipp;
        struct drm_device               *drm_dev;
+       void                            *dma_priv;
        struct device                   *dev;
        void __iomem                    *regs;
        struct clk                      *clock[SCALER_MAX_CLK];
@@ -450,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
 
        scaler->drm_dev = drm_dev;
        ipp->drm_dev = drm_dev;
-       exynos_drm_register_dma(drm_dev, dev);
+       exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv);
 
        exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
                        DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
@@ -470,7 +471,8 @@ static void scaler_unbind(struct device *dev, struct device *master,
        struct exynos_drm_ipp *ipp = &scaler->ipp;
 
        exynos_drm_ipp_unregister(dev, ipp);
-       exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
+       exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev,
+                                 &scaler->dma_priv);
 }
 
 static const struct component_ops scaler_component_ops = {
index 38ae9c3..21b726b 100644 (file)
@@ -94,6 +94,7 @@ struct mixer_context {
        struct platform_device *pdev;
        struct device           *dev;
        struct drm_device       *drm_dev;
+       void                    *dma_priv;
        struct exynos_drm_crtc  *crtc;
        struct exynos_drm_plane planes[MIXER_WIN_NR];
        unsigned long           flags;
@@ -894,12 +895,14 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
                }
        }
 
-       return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
+       return exynos_drm_register_dma(drm_dev, mixer_ctx->dev,
+                                      &mixer_ctx->dma_priv);
 }
 
 static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
 {
-       exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
+       exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev,
+                                 &mixer_ctx->dma_priv);
 }
 
 static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
index c7424e2..2084570 100644 (file)
@@ -1360,7 +1360,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
         * lowest possible wakeup latency and so prevent the cpu from going into
         * deep sleep states.
         */
-       pm_qos_update_request(&i915->pm_qos, 0);
+       cpu_latency_qos_update_request(&i915->pm_qos, 0);
 
        intel_dp_check_edp(intel_dp);
 
@@ -1488,7 +1488,7 @@ done:
 
        ret = recv_bytes;
 out:
-       pm_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
+       cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
 
        if (vdd)
                edp_panel_vdd_off(intel_dp, false);
index 60c984e..7643a30 100644 (file)
@@ -423,7 +423,8 @@ eb_validate_vma(struct i915_execbuffer *eb,
        if (unlikely(entry->flags & eb->invalid_flags))
                return -EINVAL;
 
-       if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
+       if (unlikely(entry->alignment &&
+                    !is_power_of_2_u64(entry->alignment)))
                return -EINVAL;
 
        /*
index fe8a59a..31455ec 100644 (file)
@@ -1600,17 +1600,6 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
        spin_unlock(&old->breadcrumbs.irq_lock);
 }
 
-static struct i915_request *
-last_active(const struct intel_engine_execlists *execlists)
-{
-       struct i915_request * const *last = READ_ONCE(execlists->active);
-
-       while (*last && i915_request_completed(*last))
-               last++;
-
-       return *last;
-}
-
 #define for_each_waiter(p__, rq__) \
        list_for_each_entry_lockless(p__, \
                                     &(rq__)->sched.waiters_list, \
@@ -1679,11 +1668,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
        if (!intel_engine_has_timeslices(engine))
                return false;
 
-       if (list_is_last(&rq->sched.link, &engine->active.requests))
-               return false;
-
-       hint = max(rq_prio(list_next_entry(rq, sched.link)),
-                  engine->execlists.queue_priority_hint);
+       hint = engine->execlists.queue_priority_hint;
+       if (!list_is_last(&rq->sched.link, &engine->active.requests))
+               hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
 
        return hint >= effective_prio(rq);
 }
@@ -1725,16 +1712,26 @@ static void set_timeslice(struct intel_engine_cs *engine)
        set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
 }
 
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+       struct intel_engine_execlists *execlists = &engine->execlists;
+
+       execlists->switch_priority_hint = execlists->queue_priority_hint;
+
+       if (timer_pending(&execlists->timer))
+               return;
+
+       set_timer_ms(&execlists->timer, timeslice(engine));
+}
+
 static void record_preemption(struct intel_engine_execlists *execlists)
 {
        (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
 }
 
-static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
+                                           const struct i915_request *rq)
 {
-       struct i915_request *rq;
-
-       rq = last_active(&engine->execlists);
        if (!rq)
                return 0;
 
@@ -1745,13 +1742,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
        return READ_ONCE(engine->props.preempt_timeout_ms);
 }
 
-static void set_preempt_timeout(struct intel_engine_cs *engine)
+static void set_preempt_timeout(struct intel_engine_cs *engine,
+                               const struct i915_request *rq)
 {
        if (!intel_engine_has_preempt_reset(engine))
                return;
 
        set_timer_ms(&engine->execlists.preempt,
-                    active_preempt_timeout(engine));
+                    active_preempt_timeout(engine, rq));
 }
 
 static inline void clear_ports(struct i915_request **ports, int count)
@@ -1764,6 +1762,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
        struct intel_engine_execlists * const execlists = &engine->execlists;
        struct i915_request **port = execlists->pending;
        struct i915_request ** const last_port = port + execlists->port_mask;
+       struct i915_request * const *active;
        struct i915_request *last;
        struct rb_node *rb;
        bool submit = false;
@@ -1818,7 +1817,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
         * i.e. we will retrigger preemption following the ack in case
         * of trouble.
         */
-       last = last_active(execlists);
+       active = READ_ONCE(execlists->active);
+       while ((last = *active) && i915_request_completed(last))
+               active++;
+
        if (last) {
                if (need_preempt(engine, last, rb)) {
                        ENGINE_TRACE(engine,
@@ -1888,11 +1890,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                 * Even if ELSP[1] is occupied and not worthy
                                 * of timeslices, our queue might be.
                                 */
-                               if (!execlists->timer.expires &&
-                                   need_timeslice(engine, last))
-                                       set_timer_ms(&execlists->timer,
-                                                    timeslice(engine));
-
+                               start_timeslice(engine);
                                return;
                        }
                }
@@ -1927,7 +1925,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
                        if (last && !can_merge_rq(last, rq)) {
                                spin_unlock(&ve->base.active.lock);
-                               return; /* leave this for another */
+                               start_timeslice(engine);
+                               return; /* leave this for another sibling */
                        }
 
                        ENGINE_TRACE(engine,
@@ -2103,7 +2102,7 @@ done:
                 * Skip if we ended up with exactly the same set of requests,
                 * e.g. trying to timeslice a pair of ordered contexts
                 */
-               if (!memcmp(execlists->active, execlists->pending,
+               if (!memcmp(active, execlists->pending,
                            (port - execlists->pending + 1) * sizeof(*port))) {
                        do
                                execlists_schedule_out(fetch_and_zero(port));
@@ -2114,7 +2113,7 @@ done:
                clear_ports(port + 1, last_port - port);
 
                execlists_submit_ports(engine);
-               set_preempt_timeout(engine);
+               set_preempt_timeout(engine, *active);
        } else {
 skip_submit:
                ring_set_paused(engine, 0);
@@ -4001,26 +4000,6 @@ static int gen12_emit_flush_render(struct i915_request *request,
 
                *cs++ = preparser_disable(false);
                intel_ring_advance(request, cs);
-
-               /*
-                * Wa_1604544889:tgl
-                */
-               if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
-                       flags = 0;
-                       flags |= PIPE_CONTROL_CS_STALL;
-                       flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
-
-                       flags |= PIPE_CONTROL_STORE_DATA_INDEX;
-                       flags |= PIPE_CONTROL_QW_WRITE;
-
-                       cs = intel_ring_begin(request, 6);
-                       if (IS_ERR(cs))
-                               return PTR_ERR(cs);
-
-                       cs = gen8_emit_pipe_control(cs, flags,
-                                                   LRC_PPHWSP_SCRATCH_ADDR);
-                       intel_ring_advance(request, cs);
-               }
        }
 
        return 0;
index 8771652..d8d9f11 100644 (file)
@@ -192,11 +192,15 @@ static void cacheline_release(struct intel_timeline_cacheline *cl)
 
 static void cacheline_free(struct intel_timeline_cacheline *cl)
 {
+       if (!i915_active_acquire_if_busy(&cl->active)) {
+               __idle_cacheline_free(cl);
+               return;
+       }
+
        GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
        cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
 
-       if (i915_active_is_idle(&cl->active))
-               __idle_cacheline_free(cl);
+       i915_active_release(&cl->active);
 }
 
 int intel_timeline_init(struct intel_timeline *timeline,
index 173a7f2..6c2f846 100644 (file)
@@ -1529,15 +1529,34 @@ err_obj:
        return ERR_PTR(err);
 }
 
+static const struct {
+       u32 start;
+       u32 end;
+} mcr_ranges_gen8[] = {
+       { .start = 0x5500, .end = 0x55ff },
+       { .start = 0x7000, .end = 0x7fff },
+       { .start = 0x9400, .end = 0x97ff },
+       { .start = 0xb000, .end = 0xb3ff },
+       { .start = 0xe000, .end = 0xe7ff },
+       {},
+};
+
 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
 {
+       int i;
+
+       if (INTEL_GEN(i915) < 8)
+               return false;
+
        /*
-        * Registers in this range are affected by the MCR selector
+        * Registers in these ranges are affected by the MCR selector
         * which only controls CPU initiated MMIO. Routing does not
         * work for CS access so we cannot verify them on this path.
         */
-       if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
-               return true;
+       for (i = 0; mcr_ranges_gen8[i].start; i++)
+               if (offset >= mcr_ranges_gen8[i].start &&
+                   offset <= mcr_ranges_gen8[i].end)
+                       return true;
 
        return false;
 }
index e1c313d..a62bdf9 100644 (file)
@@ -457,7 +457,8 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 
        /* TODO: add more platforms support */
-       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
+               IS_COFFEELAKE(dev_priv)) {
                if (connected) {
                        vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
                                SFUSE_STRAP_DDID_DETECTED;
index 867e762..33569b9 100644 (file)
@@ -147,15 +147,14 @@ static void virt_vbt_generation(struct vbt *v)
        /* there's features depending on version! */
        v->header.version = 155;
        v->header.header_size = sizeof(v->header);
-       v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header);
+       v->header.vbt_size = sizeof(struct vbt);
        v->header.bdb_offset = offsetof(struct vbt, bdb_header);
 
        strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
        v->bdb_header.version = 186; /* child_dev_size = 33 */
        v->bdb_header.header_size = sizeof(v->bdb_header);
 
-       v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header)
-               - sizeof(struct bdb_header);
+       v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header);
 
        /* general features */
        v->general_features_header.id = BDB_GENERAL_FEATURES;
index 487af6e..345c2aa 100644 (file)
@@ -272,10 +272,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
 {
        struct intel_gvt *gvt = vgpu->gvt;
 
-       mutex_lock(&vgpu->vgpu_lock);
-
        WARN(vgpu->active, "vGPU is still active!\n");
 
+       /*
+        * remove idr first so later clean can judge if need to stop
+        * service if no active vgpu.
+        */
+       mutex_lock(&gvt->lock);
+       idr_remove(&gvt->vgpu_idr, vgpu->id);
+       mutex_unlock(&gvt->lock);
+
+       mutex_lock(&vgpu->vgpu_lock);
        intel_gvt_debugfs_remove_vgpu(vgpu);
        intel_vgpu_clean_sched_policy(vgpu);
        intel_vgpu_clean_submission(vgpu);
@@ -290,7 +297,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        mutex_unlock(&vgpu->vgpu_lock);
 
        mutex_lock(&gvt->lock);
-       idr_remove(&gvt->vgpu_idr, vgpu->id);
        if (idr_is_empty(&gvt->vgpu_idr))
                intel_gvt_clean_irq(gvt);
        intel_gvt_update_vgpu_types(gvt);
index 8410330..eec1d6a 100644 (file)
@@ -505,8 +505,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
        mutex_init(&dev_priv->backlight_lock);
 
        mutex_init(&dev_priv->sb_lock);
-       pm_qos_add_request(&dev_priv->sb_qos,
-                          PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+       cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
 
        mutex_init(&dev_priv->av_mutex);
        mutex_init(&dev_priv->wm.wm_mutex);
@@ -571,7 +570,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
        vlv_free_s0ix_state(dev_priv);
        i915_workqueues_cleanup(dev_priv);
 
-       pm_qos_remove_request(&dev_priv->sb_qos);
+       cpu_latency_qos_remove_request(&dev_priv->sb_qos);
        mutex_destroy(&dev_priv->sb_lock);
 }
 
@@ -1229,8 +1228,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
                }
        }
 
-       pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
-                          PM_QOS_DEFAULT_VALUE);
+       cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
 
        intel_gt_init_workarounds(dev_priv);
 
@@ -1276,7 +1274,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 err_msi:
        if (pdev->msi_enabled)
                pci_disable_msi(pdev);
-       pm_qos_remove_request(&dev_priv->pm_qos);
+       cpu_latency_qos_remove_request(&dev_priv->pm_qos);
 err_mem_regions:
        intel_memory_regions_driver_release(dev_priv);
 err_ggtt:
@@ -1299,7 +1297,7 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
        if (pdev->msi_enabled)
                pci_disable_msi(pdev);
 
-       pm_qos_remove_request(&dev_priv->pm_qos);
+       cpu_latency_qos_remove_request(&dev_priv->pm_qos);
 }
 
 /**
index dcaa85a..a18b2a2 100644 (file)
@@ -527,19 +527,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
        return NOTIFY_DONE;
 }
 
+static void irq_semaphore_cb(struct irq_work *wrk)
+{
+       struct i915_request *rq =
+               container_of(wrk, typeof(*rq), semaphore_work);
+
+       i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
+       i915_request_put(rq);
+}
+
 static int __i915_sw_fence_call
 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 {
-       struct i915_request *request =
-               container_of(fence, typeof(*request), semaphore);
+       struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
 
        switch (state) {
        case FENCE_COMPLETE:
-               i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
+               if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
+                       i915_request_get(rq);
+                       init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
+                       irq_work_queue(&rq->semaphore_work);
+               }
                break;
 
        case FENCE_FREE:
-               i915_request_put(request);
+               i915_request_put(rq);
                break;
        }
 
@@ -776,8 +788,8 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
        struct dma_fence *fence;
        int err;
 
-       GEM_BUG_ON(i915_request_timeline(rq) ==
-                  rcu_access_pointer(signal->timeline));
+       if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
+               return 0;
 
        if (i915_request_started(signal))
                return 0;
@@ -821,7 +833,7 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
                return 0;
 
        err = 0;
-       if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
+       if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
                err = i915_sw_fence_await_dma_fence(&rq->submit,
                                                    fence, 0,
                                                    I915_FENCE_GFP);
@@ -1318,9 +1330,9 @@ void __i915_request_queue(struct i915_request *rq,
         * decide whether to preempt the entire chain so that it is ready to
         * run at the earliest possible convenience.
         */
-       i915_sw_fence_commit(&rq->semaphore);
        if (attr && rq->engine->schedule)
                rq->engine->schedule(rq, attr);
+       i915_sw_fence_commit(&rq->semaphore);
        i915_sw_fence_commit(&rq->submit);
 }
 
index f57eadc..fccc339 100644 (file)
@@ -26,6 +26,7 @@
 #define I915_REQUEST_H
 
 #include <linux/dma-fence.h>
+#include <linux/irq_work.h>
 #include <linux/lockdep.h>
 
 #include "gem/i915_gem_context_types.h"
@@ -208,6 +209,7 @@ struct i915_request {
        };
        struct list_head execute_cb;
        struct i915_sw_fence semaphore;
+       struct irq_work semaphore_work;
 
        /*
         * A list of everyone we wait upon, and everyone who waits upon us.
index b0ade76..d34141f 100644 (file)
@@ -234,6 +234,11 @@ static inline u64 ptr_to_u64(const void *ptr)
        __idx;                                                          \
 })
 
+static inline bool is_power_of_2_u64(u64 n)
+{
+       return (n != 0 && ((n & (n - 1)) == 0));
+}
+
 static inline void __list_del_many(struct list_head *head,
                                   struct list_head *first)
 {
index cbfb717..0648eda 100644 (file)
@@ -60,7 +60,7 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
         * to the Valleyview P-unit and not all sideband communications.
         */
        if (IS_VALLEYVIEW(i915)) {
-               pm_qos_update_request(&i915->sb_qos, 0);
+               cpu_latency_qos_update_request(&i915->sb_qos, 0);
                on_each_cpu(ping, NULL, 1);
        }
 }
@@ -68,7 +68,8 @@ static void __vlv_punit_get(struct drm_i915_private *i915)
 static void __vlv_punit_put(struct drm_i915_private *i915)
 {
        if (IS_VALLEYVIEW(i915))
-               pm_qos_update_request(&i915->sb_qos, PM_QOS_DEFAULT_VALUE);
+               cpu_latency_qos_update_request(&i915->sb_qos,
+                                              PM_QOS_DEFAULT_VALUE);
 
        iosf_mbi_punit_release();
 }
index 2aa4ed1..85a054f 100644 (file)
@@ -533,6 +533,8 @@ static const struct hid_device_id hammer_devices[] = {
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MOONBALL) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) },
index 3a400ce..9f22134 100644 (file)
 #define USB_DEVICE_ID_GOOGLE_WHISKERS  0x5030
 #define USB_DEVICE_ID_GOOGLE_MASTERBALL        0x503c
 #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
+#define USB_DEVICE_ID_GOOGLE_MOONBALL  0x5044
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
 #define USB_DEVICE_ID_LENOVO_X1_COVER  0x6085
 #define USB_DEVICE_ID_LENOVO_X1_TAB    0x60a3
 #define USB_DEVICE_ID_LENOVO_X1_TAB3   0x60b5
+#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D     0x608d
 
 #define USB_VENDOR_ID_LG               0x1fd2
 #define USB_DEVICE_ID_LG_MULTITOUCH    0x0064
index a549c42..33c102a 100644 (file)
@@ -458,9 +458,9 @@ static ssize_t picolcd_fb_update_rate_show(struct device *dev,
                if (ret >= PAGE_SIZE)
                        break;
                else if (i == fb_update_rate)
-                       ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
+                       ret += scnprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i);
                else
-                       ret += snprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
+                       ret += scnprintf(buf+ret, PAGE_SIZE-ret, "%u ", i);
        if (ret > 0)
                buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n';
        return ret;
index 0e7b2d9..3735546 100644 (file)
@@ -103,6 +103,7 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
+       { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET },
index fb827c2..4d25577 100644 (file)
@@ -313,7 +313,7 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
 
                        while (i < ret) {
                                if (i + attribute->size > ret) {
-                                       len += snprintf(&buf[len],
+                                       len += scnprintf(&buf[len],
                                                        PAGE_SIZE - len,
                                                        "%d ", values[i]);
                                        break;
@@ -336,10 +336,10 @@ static ssize_t show_value(struct device *dev, struct device_attribute *attr,
                                        ++i;
                                        break;
                                }
-                               len += snprintf(&buf[len], PAGE_SIZE - len,
+                               len += scnprintf(&buf[len], PAGE_SIZE - len,
                                                "%lld ", value);
                        }
-                       len += snprintf(&buf[len], PAGE_SIZE - len, "\n");
+                       len += scnprintf(&buf[len], PAGE_SIZE - len, "\n");
 
                        return len;
                } else if (input)
index 9eec970..89869c6 100644 (file)
@@ -965,14 +965,13 @@ static int cs_hsi_buf_config(struct cs_hsi_iface *hi,
 
        if (old_state != hi->iface_state) {
                if (hi->iface_state == CS_STATE_CONFIGURED) {
-                       pm_qos_add_request(&hi->pm_qos_req,
-                               PM_QOS_CPU_DMA_LATENCY,
+                       cpu_latency_qos_add_request(&hi->pm_qos_req,
                                CS_QOS_LATENCY_FOR_DATA_USEC);
                        local_bh_disable();
                        cs_hsi_read_on_data(hi);
                        local_bh_enable();
                } else if (old_state == CS_STATE_CONFIGURED) {
-                       pm_qos_remove_request(&hi->pm_qos_req);
+                       cpu_latency_qos_remove_request(&hi->pm_qos_req);
                }
        }
        return r;
@@ -1075,8 +1074,8 @@ static void cs_hsi_stop(struct cs_hsi_iface *hi)
        WARN_ON(!cs_state_idle(hi->control_state));
        WARN_ON(!cs_state_idle(hi->data_state));
 
-       if (pm_qos_request_active(&hi->pm_qos_req))
-               pm_qos_remove_request(&hi->pm_qos_req);
+       if (cpu_latency_qos_request_active(&hi->pm_qos_req))
+               cpu_latency_qos_remove_request(&hi->pm_qos_req);
 
        spin_lock_bh(&hi->lock);
        cs_hsi_free_data(hi);
index 8e48c74..255f8f4 100644 (file)
@@ -718,9 +718,6 @@ static int msc_win_set_lockout(struct msc_window *win,
 
        if (old != expect) {
                ret = -EINVAL;
-               dev_warn_ratelimited(msc_dev(win->msc),
-                                    "expected lockout state %d, got %d\n",
-                                    expect, old);
                goto unlock;
        }
 
@@ -741,6 +738,10 @@ unlock:
                /* from intel_th_msc_window_unlock(), don't warn if not locked */
                if (expect == WIN_LOCKED && old == new)
                        return 0;
+
+               dev_warn_ratelimited(msc_dev(win->msc),
+                                    "expected lockout state %d, got %d\n",
+                                    expect, old);
        }
 
        return ret;
@@ -760,7 +761,7 @@ static int msc_configure(struct msc *msc)
        lockdep_assert_held(&msc->buf_mutex);
 
        if (msc->mode > MSC_MODE_MULTI)
-               return -ENOTSUPP;
+               return -EINVAL;
 
        if (msc->mode == MSC_MODE_MULTI) {
                if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
@@ -1294,7 +1295,7 @@ static int msc_buffer_alloc(struct msc *msc, unsigned long *nr_pages,
        } else if (msc->mode == MSC_MODE_MULTI) {
                ret = msc_buffer_multi_alloc(msc, nr_pages, nr_wins);
        } else {
-               ret = -ENOTSUPP;
+               ret = -EINVAL;
        }
 
        if (!ret) {
@@ -1530,7 +1531,7 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
                if (ret >= 0)
                        *ppos = iter->offset;
        } else {
-               ret = -ENOTSUPP;
+               ret = -EINVAL;
        }
 
 put_count:
index e9d90b5..86aa6a4 100644 (file)
@@ -235,6 +235,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
                .driver_data = (kernel_ulong_t)&intel_th_2x,
        },
        {
+               /* Elkhart Lake CPU */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529),
+               .driver_data = (kernel_ulong_t)&intel_th_2x,
+       },
+       {
                /* Elkhart Lake */
                PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
                .driver_data = (kernel_ulong_t)&intel_th_2x,
index b178a54..360b5c0 100644 (file)
@@ -238,7 +238,7 @@ static struct configfs_attribute *sys_t_policy_attrs[] = {
 static inline bool sys_t_need_ts(struct sys_t_output *op)
 {
        if (op->node.ts_interval &&
-           time_after(op->ts_jiffies + op->node.ts_interval, jiffies)) {
+           time_after(jiffies, op->ts_jiffies + op->node.ts_interval)) {
                op->ts_jiffies = jiffies;
 
                return true;
@@ -250,8 +250,8 @@ static inline bool sys_t_need_ts(struct sys_t_output *op)
 static bool sys_t_need_clock_sync(struct sys_t_output *op)
 {
        if (op->node.clocksync_interval &&
-           time_after(op->clocksync_jiffies + op->node.clocksync_interval,
-                      jiffies)) {
+           time_after(jiffies,
+                      op->clocksync_jiffies + op->node.clocksync_interval)) {
                op->clocksync_jiffies = jiffies;
 
                return true;
index 050adda..05b35ac 100644 (file)
@@ -313,6 +313,7 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
        pm_runtime_get_noresume(&pdev->dev);
 
        i2c_del_adapter(&dev->adapter);
+       devm_free_irq(&pdev->dev, dev->irq, dev);
        pci_free_irq_vectors(pdev);
 }
 
index 3a9e840..a4a6825 100644 (file)
@@ -348,7 +348,7 @@ static struct gpio_desc *i2c_gpio_get_desc(struct device *dev,
        if (ret == -ENOENT)
                retdesc = ERR_PTR(-EPROBE_DEFER);
 
-       if (ret != -EPROBE_DEFER)
+       if (PTR_ERR(retdesc) != -EPROBE_DEFER)
                dev_err(dev, "error trying to get descriptor: %d\n", ret);
 
        return retdesc;
index ca4f096..a9c03f5 100644 (file)
 #define TCOBASE                0x050
 #define TCOCTL         0x054
 
-#define ACPIBASE               0x040
-#define ACPIBASE_SMI_OFF       0x030
-#define ACPICTRL               0x044
-#define ACPICTRL_EN            0x080
-
 #define SBREG_BAR              0x10
 #define SBREG_SMBCTRL          0xc6000c
 #define SBREG_SMBCTRL_DNV      0xcf000c
@@ -1553,7 +1548,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
                pci_bus_write_config_byte(pci_dev->bus, devfn, 0xe1, hidden);
        spin_unlock(&p2sb_spinlock);
 
-       res = &tco_res[ICH_RES_MEM_OFF];
+       res = &tco_res[1];
        if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
                res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
        else
@@ -1563,7 +1558,7 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
        res->flags = IORESOURCE_MEM;
 
        return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
-                                       tco_res, 3, &spt_tco_platform_data,
+                                       tco_res, 2, &spt_tco_platform_data,
                                        sizeof(spt_tco_platform_data));
 }
 
@@ -1576,17 +1571,16 @@ static struct platform_device *
 i801_add_tco_cnl(struct i801_priv *priv, struct pci_dev *pci_dev,
                 struct resource *tco_res)
 {
-       return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
-                                       tco_res, 2, &cnl_tco_platform_data,
-                                       sizeof(cnl_tco_platform_data));
+       return platform_device_register_resndata(&pci_dev->dev,
+                       "iTCO_wdt", -1, tco_res, 1, &cnl_tco_platform_data,
+                       sizeof(cnl_tco_platform_data));
 }
 
 static void i801_add_tco(struct i801_priv *priv)
 {
-       u32 base_addr, tco_base, tco_ctl, ctrl_val;
        struct pci_dev *pci_dev = priv->pci_dev;
-       struct resource tco_res[3], *res;
-       unsigned int devfn;
+       struct resource tco_res[2], *res;
+       u32 tco_base, tco_ctl;
 
        /* If we have ACPI based watchdog use that instead */
        if (acpi_has_watchdog())
@@ -1601,30 +1595,15 @@ static void i801_add_tco(struct i801_priv *priv)
                return;
 
        memset(tco_res, 0, sizeof(tco_res));
-
-       res = &tco_res[ICH_RES_IO_TCO];
-       res->start = tco_base & ~1;
-       res->end = res->start + 32 - 1;
-       res->flags = IORESOURCE_IO;
-
        /*
-        * Power Management registers.
+        * Always populate the main iTCO IO resource here. The second entry
+        * for NO_REBOOT MMIO is filled by the SPT specific function.
         */
-       devfn = PCI_DEVFN(PCI_SLOT(pci_dev->devfn), 2);
-       pci_bus_read_config_dword(pci_dev->bus, devfn, ACPIBASE, &base_addr);
-
-       res = &tco_res[ICH_RES_IO_SMI];
-       res->start = (base_addr & ~1) + ACPIBASE_SMI_OFF;
-       res->end = res->start + 3;
+       res = &tco_res[0];
+       res->start = tco_base & ~1;
+       res->end = res->start + 32 - 1;
        res->flags = IORESOURCE_IO;
 
-       /*
-        * Enable the ACPI I/O space.
-        */
-       pci_bus_read_config_dword(pci_dev->bus, devfn, ACPICTRL, &ctrl_val);
-       ctrl_val |= ACPICTRL_EN;
-       pci_bus_write_config_dword(pci_dev->bus, devfn, ACPICTRL, ctrl_val);
-
        if (priv->features & FEATURE_TCO_CNL)
                priv->tco_pdev = i801_add_tco_cnl(priv, pci_dev, tco_res);
        else
index 8f3dbc9..8b0ff78 100644 (file)
@@ -394,9 +394,17 @@ EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle);
 static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
 {
        struct device *dev;
+       struct i2c_client *client;
 
        dev = bus_find_device_by_acpi_dev(&i2c_bus_type, adev);
-       return dev ? i2c_verify_client(dev) : NULL;
+       if (!dev)
+               return NULL;
+
+       client = i2c_verify_client(dev);
+       if (!client)
+               put_device(dev);
+
+       return client;
 }
 
 static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
index d556066..65b84d5 100644 (file)
@@ -2,8 +2,9 @@
 /*
  * intel_idle.c - native hardware idle loop for modern Intel processors
  *
- * Copyright (c) 2013, Intel Corporation.
+ * Copyright (c) 2013 - 2020, Intel Corporation.
  * Len Brown <len.brown@intel.com>
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  */
 
 /*
 /*
  * Known limitations
  *
- * The driver currently initializes for_each_online_cpu() upon modprobe.
- * It it unaware of subsequent processors hot-added to the system.
- * This means that if you boot with maxcpus=n and later online
- * processors above n, those processors will use C1 only.
- *
  * ACPI has a .suspend hack to turn off deep c-statees during suspend
  * to avoid complications with the lapic timer workaround.
  * Have not seen issues with suspend, but may need same workaround here.
@@ -55,7 +51,7 @@
 #include <asm/mwait.h>
 #include <asm/msr.h>
 
-#define INTEL_IDLE_VERSION "0.4.1"
+#define INTEL_IDLE_VERSION "0.5.1"
 
 static struct cpuidle_driver intel_idle_driver = {
        .name = "intel_idle",
@@ -65,11 +61,12 @@ static struct cpuidle_driver intel_idle_driver = {
 static int max_cstate = CPUIDLE_STATE_MAX - 1;
 static unsigned int disabled_states_mask;
 
-static unsigned int mwait_substates;
+static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
+
+static unsigned long auto_demotion_disable_flags;
+static bool disable_promotion_to_c1e;
 
-#define LAPIC_TIMER_ALWAYS_RELIABLE 0xFFFFFFFF
-/* Reliable LAPIC Timer States, bit 1 for C1 etc.  */
-static unsigned int lapic_timer_reliable_states = (1 << 1);     /* Default to only C1 */
+static bool lapic_timer_always_reliable;
 
 struct idle_cpu {
        struct cpuidle_state *state_table;
@@ -84,13 +81,10 @@ struct idle_cpu {
        bool use_acpi;
 };
 
-static const struct idle_cpu *icpu;
-static struct cpuidle_device __percpu *intel_idle_cpuidle_devices;
-static int intel_idle(struct cpuidle_device *dev,
-                       struct cpuidle_driver *drv, int index);
-static void intel_idle_s2idle(struct cpuidle_device *dev,
-                             struct cpuidle_driver *drv, int index);
-static struct cpuidle_state *cpuidle_state_table;
+static const struct idle_cpu *icpu __initdata;
+static struct cpuidle_state *cpuidle_state_table __initdata;
+
+static unsigned int mwait_substates __initdata;
 
 /*
  * Enable this state by default even if the ACPI _CST does not list it.
@@ -103,7 +97,7 @@ static struct cpuidle_state *cpuidle_state_table;
  * If this flag is set, SW flushes the TLB, so even if the
  * HW doesn't do the flushing, this flag is safe to use.
  */
-#define CPUIDLE_FLAG_TLB_FLUSHED       0x10000
+#define CPUIDLE_FLAG_TLB_FLUSHED       BIT(16)
 
 /*
  * MWAIT takes an 8-bit "hint" in EAX "suggesting"
@@ -115,12 +109,87 @@ static struct cpuidle_state *cpuidle_state_table;
 #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF)
 #define MWAIT2flg(eax) ((eax & 0xFF) << 24)
 
+/**
+ * intel_idle - Ask the processor to enter the given idle state.
+ * @dev: cpuidle device of the target CPU.
+ * @drv: cpuidle driver (assumed to point to intel_idle_driver).
+ * @index: Target idle state index.
+ *
+ * Use the MWAIT instruction to notify the processor that the CPU represented by
+ * @dev is idle and it can try to enter the idle state corresponding to @index.
+ *
+ * If the local APIC timer is not known to be reliable in the target idle state,
+ * enable one-shot tick broadcasting for the target CPU before executing MWAIT.
+ *
+ * Optionally call leave_mm() for the target CPU upfront to avoid wakeups due to
+ * flushing user TLBs.
+ *
+ * Must be called under local_irq_disable().
+ */
+static __cpuidle int intel_idle(struct cpuidle_device *dev,
+                               struct cpuidle_driver *drv, int index)
+{
+       struct cpuidle_state *state = &drv->states[index];
+       unsigned long eax = flg2MWAIT(state->flags);
+       unsigned long ecx = 1; /* break on interrupt flag */
+       bool uninitialized_var(tick);
+       int cpu = smp_processor_id();
+
+       /*
+        * leave_mm() to avoid costly and often unnecessary wakeups
+        * for flushing the user TLB's associated with the active mm.
+        */
+       if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
+               leave_mm(cpu);
+
+       if (!static_cpu_has(X86_FEATURE_ARAT) && !lapic_timer_always_reliable) {
+               /*
+                * Switch over to one-shot tick broadcast if the target C-state
+                * is deeper than C1.
+                */
+               if ((eax >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) {
+                       tick = true;
+                       tick_broadcast_enter();
+               } else {
+                       tick = false;
+               }
+       }
+
+       mwait_idle_with_hints(eax, ecx);
+
+       if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
+               tick_broadcast_exit();
+
+       return index;
+}
+
+/**
+ * intel_idle_s2idle - Ask the processor to enter the given idle state.
+ * @dev: cpuidle device of the target CPU.
+ * @drv: cpuidle driver (assumed to point to intel_idle_driver).
+ * @index: Target idle state index.
+ *
+ * Use the MWAIT instruction to notify the processor that the CPU represented by
+ * @dev is idle and it can try to enter the idle state corresponding to @index.
+ *
+ * Invoked as a suspend-to-idle callback routine with frozen user space, frozen
+ * scheduler tick and suspended scheduler clock on the target CPU.
+ */
+static __cpuidle void intel_idle_s2idle(struct cpuidle_device *dev,
+                                       struct cpuidle_driver *drv, int index)
+{
+       unsigned long eax = flg2MWAIT(drv->states[index].flags);
+       unsigned long ecx = 1; /* break on interrupt flag */
+
+       mwait_idle_with_hints(eax, ecx);
+}
+
 /*
  * States are indexed by the cstate number,
  * which is also the index into the MWAIT hint array.
  * Thus C0 is a dummy.
  */
-static struct cpuidle_state nehalem_cstates[] = {
+static struct cpuidle_state nehalem_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -157,7 +226,7 @@ static struct cpuidle_state nehalem_cstates[] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state snb_cstates[] = {
+static struct cpuidle_state snb_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -202,7 +271,7 @@ static struct cpuidle_state snb_cstates[] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state byt_cstates[] = {
+static struct cpuidle_state byt_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -247,7 +316,7 @@ static struct cpuidle_state byt_cstates[] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state cht_cstates[] = {
+static struct cpuidle_state cht_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -292,7 +361,7 @@ static struct cpuidle_state cht_cstates[] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state ivb_cstates[] = {
+static struct cpuidle_state ivb_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -337,7 +406,7 @@ static struct cpuidle_state ivb_cstates[] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state ivt_cstates[] = {
+static struct cpuidle_state ivt_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -374,7 +443,7 @@ static struct cpuidle_state ivt_cstates[] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state ivt_cstates_4s[] = {
+static struct cpuidle_state ivt_cstates_4s[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -411,7 +480,7 @@ static struct cpuidle_state ivt_cstates_4s[] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state ivt_cstates_8s[] = {
+static struct cpuidle_state ivt_cstates_8s[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -448,7 +517,7 @@ static struct cpuidle_state ivt_cstates_8s[] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state hsw_cstates[] = {
+static struct cpuidle_state hsw_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -516,7 +585,7 @@ static struct cpuidle_state hsw_cstates[] = {
        {
                .enter = NULL }
 };
-static struct cpuidle_state bdw_cstates[] = {
+static struct cpuidle_state bdw_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -585,7 +654,7 @@ static struct cpuidle_state bdw_cstates[] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state skl_cstates[] = {
+static struct cpuidle_state skl_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -654,7 +723,7 @@ static struct cpuidle_state skl_cstates[] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state skx_cstates[] = {
+static struct cpuidle_state skx_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -683,7 +752,7 @@ static struct cpuidle_state skx_cstates[] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state atom_cstates[] = {
+static struct cpuidle_state atom_cstates[] __initdata = {
        {
                .name = "C1E",
                .desc = "MWAIT 0x00",
@@ -719,7 +788,7 @@ static struct cpuidle_state atom_cstates[] = {
        {
                .enter = NULL }
 };
-static struct cpuidle_state tangier_cstates[] = {
+static struct cpuidle_state tangier_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -763,7 +832,7 @@ static struct cpuidle_state tangier_cstates[] = {
        {
                .enter = NULL }
 };
-static struct cpuidle_state avn_cstates[] = {
+static struct cpuidle_state avn_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -783,7 +852,7 @@ static struct cpuidle_state avn_cstates[] = {
        {
                .enter = NULL }
 };
-static struct cpuidle_state knl_cstates[] = {
+static struct cpuidle_state knl_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -804,7 +873,7 @@ static struct cpuidle_state knl_cstates[] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state bxt_cstates[] = {
+static struct cpuidle_state bxt_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -865,7 +934,7 @@ static struct cpuidle_state bxt_cstates[] = {
                .enter = NULL }
 };
 
-static struct cpuidle_state dnv_cstates[] = {
+static struct cpuidle_state dnv_cstates[] __initdata = {
        {
                .name = "C1",
                .desc = "MWAIT 0x00",
@@ -894,174 +963,116 @@ static struct cpuidle_state dnv_cstates[] = {
                .enter = NULL }
 };
 
-/**
- * intel_idle
- * @dev: cpuidle_device
- * @drv: cpuidle driver
- * @index: index of cpuidle state
- *
- * Must be called under local_irq_disable().
- */
-static __cpuidle int intel_idle(struct cpuidle_device *dev,
-                               struct cpuidle_driver *drv, int index)
-{
-       unsigned long ecx = 1; /* break on interrupt flag */
-       struct cpuidle_state *state = &drv->states[index];
-       unsigned long eax = flg2MWAIT(state->flags);
-       unsigned int cstate;
-       bool uninitialized_var(tick);
-       int cpu = smp_processor_id();
-
-       /*
-        * leave_mm() to avoid costly and often unnecessary wakeups
-        * for flushing the user TLB's associated with the active mm.
-        */
-       if (state->flags & CPUIDLE_FLAG_TLB_FLUSHED)
-               leave_mm(cpu);
-
-       if (!static_cpu_has(X86_FEATURE_ARAT)) {
-               cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) &
-                               MWAIT_CSTATE_MASK) + 1;
-               tick = false;
-               if (!(lapic_timer_reliable_states & (1 << (cstate)))) {
-                       tick = true;
-                       tick_broadcast_enter();
-               }
-       }
-
-       mwait_idle_with_hints(eax, ecx);
-
-       if (!static_cpu_has(X86_FEATURE_ARAT) && tick)
-               tick_broadcast_exit();
-
-       return index;
-}
-
-/**
- * intel_idle_s2idle - simplified "enter" callback routine for suspend-to-idle
- * @dev: cpuidle_device
- * @drv: cpuidle driver
- * @index: state index
- */
-static void intel_idle_s2idle(struct cpuidle_device *dev,
-                            struct cpuidle_driver *drv, int index)
-{
-       unsigned long ecx = 1; /* break on interrupt flag */
-       unsigned long eax = flg2MWAIT(drv->states[index].flags);
-
-       mwait_idle_with_hints(eax, ecx);
-}
-
-static const struct idle_cpu idle_cpu_nehalem = {
+static const struct idle_cpu idle_cpu_nehalem __initconst = {
        .state_table = nehalem_cstates,
        .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
        .disable_promotion_to_c1e = true,
 };
 
-static const struct idle_cpu idle_cpu_nhx = {
+static const struct idle_cpu idle_cpu_nhx __initconst = {
        .state_table = nehalem_cstates,
        .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
        .disable_promotion_to_c1e = true,
        .use_acpi = true,
 };
 
-static const struct idle_cpu idle_cpu_atom = {
+static const struct idle_cpu idle_cpu_atom __initconst = {
        .state_table = atom_cstates,
 };
 
-static const struct idle_cpu idle_cpu_tangier = {
+static const struct idle_cpu idle_cpu_tangier __initconst = {
        .state_table = tangier_cstates,
 };
 
-static const struct idle_cpu idle_cpu_lincroft = {
+static const struct idle_cpu idle_cpu_lincroft __initconst = {
        .state_table = atom_cstates,
        .auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE,
 };
 
-static const struct idle_cpu idle_cpu_snb = {
+static const struct idle_cpu idle_cpu_snb __initconst = {
        .state_table = snb_cstates,
        .disable_promotion_to_c1e = true,
 };
 
-static const struct idle_cpu idle_cpu_snx = {
+static const struct idle_cpu idle_cpu_snx __initconst = {
        .state_table = snb_cstates,
        .disable_promotion_to_c1e = true,
        .use_acpi = true,
 };
 
-static const struct idle_cpu idle_cpu_byt = {
+static const struct idle_cpu idle_cpu_byt __initconst = {
        .state_table = byt_cstates,
        .disable_promotion_to_c1e = true,
        .byt_auto_demotion_disable_flag = true,
 };
 
-static const struct idle_cpu idle_cpu_cht = {
+static const struct idle_cpu idle_cpu_cht __initconst = {
        .state_table = cht_cstates,
        .disable_promotion_to_c1e = true,
        .byt_auto_demotion_disable_flag = true,
 };
 
-static const struct idle_cpu idle_cpu_ivb = {
+static const struct idle_cpu idle_cpu_ivb __initconst = {
        .state_table = ivb_cstates,
        .disable_promotion_to_c1e = true,
 };
 
-static const struct idle_cpu idle_cpu_ivt = {
+static const struct idle_cpu idle_cpu_ivt __initconst = {
        .state_table = ivt_cstates,
        .disable_promotion_to_c1e = true,
        .use_acpi = true,
 };
 
-static const struct idle_cpu idle_cpu_hsw = {
+static const struct idle_cpu idle_cpu_hsw __initconst = {
        .state_table = hsw_cstates,
        .disable_promotion_to_c1e = true,
 };
 
-static const struct idle_cpu idle_cpu_hsx = {
+static const struct idle_cpu idle_cpu_hsx __initconst = {
        .state_table = hsw_cstates,
        .disable_promotion_to_c1e = true,
        .use_acpi = true,
 };
 
-static const struct idle_cpu idle_cpu_bdw = {
+static const struct idle_cpu idle_cpu_bdw __initconst = {
        .state_table = bdw_cstates,
        .disable_promotion_to_c1e = true,
 };
 
-static const struct idle_cpu idle_cpu_bdx = {
+static const struct idle_cpu idle_cpu_bdx __initconst = {
        .state_table = bdw_cstates,
        .disable_promotion_to_c1e = true,
        .use_acpi = true,
 };
 
-static const struct idle_cpu idle_cpu_skl = {
+static const struct idle_cpu idle_cpu_skl __initconst = {
        .state_table = skl_cstates,
        .disable_promotion_to_c1e = true,
 };
 
-static const struct idle_cpu idle_cpu_skx = {
+static const struct idle_cpu idle_cpu_skx __initconst = {
        .state_table = skx_cstates,
        .disable_promotion_to_c1e = true,
        .use_acpi = true,
 };
 
-static const struct idle_cpu idle_cpu_avn = {
+static const struct idle_cpu idle_cpu_avn __initconst = {
        .state_table = avn_cstates,
        .disable_promotion_to_c1e = true,
        .use_acpi = true,
 };
 
-static const struct idle_cpu idle_cpu_knl = {
+static const struct idle_cpu idle_cpu_knl __initconst = {
        .state_table = knl_cstates,
        .use_acpi = true,
 };
 
-static const struct idle_cpu idle_cpu_bxt = {
+static const struct idle_cpu idle_cpu_bxt __initconst = {
        .state_table = bxt_cstates,
        .disable_promotion_to_c1e = true,
 };
 
-static const struct idle_cpu idle_cpu_dnv = {
+static const struct idle_cpu idle_cpu_dnv __initconst = {
        .state_table = dnv_cstates,
        .disable_promotion_to_c1e = true,
        .use_acpi = true,
@@ -1273,11 +1284,11 @@ static inline void intel_idle_init_cstates_acpi(struct cpuidle_driver *drv) { }
 static inline bool intel_idle_off_by_default(u32 mwait_hint) { return false; }
 #endif /* !CONFIG_ACPI_PROCESSOR_CSTATE */
 
-/*
- * ivt_idle_state_table_update(void)
+/**
+ * ivt_idle_state_table_update - Tune the idle states table for Ivy Town.
  *
- * Tune IVT multi-socket targets
- * Assumption: num_sockets == (max_package_num + 1)
+ * Tune IVT multi-socket targets.
+ * Assumption: num_sockets == (max_package_num + 1).
  */
 static void __init ivt_idle_state_table_update(void)
 {
@@ -1323,11 +1334,11 @@ static unsigned long long __init irtl_2_usec(unsigned long long irtl)
        return div_u64((irtl & 0x3FF) * ns, NSEC_PER_USEC);
 }
 
-/*
- * bxt_idle_state_table_update(void)
+/**
+ * bxt_idle_state_table_update - Fix up the Broxton idle states table.
  *
- * On BXT, we trust the IRTL to show the definitive maximum latency
- * We use the same value for target_residency.
+ * On BXT, trust the IRTL (Interrupt Response Time Limit) MSR to show the
+ * definitive maximum latency and use the same value for target_residency.
  */
 static void __init bxt_idle_state_table_update(void)
 {
@@ -1370,11 +1381,11 @@ static void __init bxt_idle_state_table_update(void)
        }
 
 }
-/*
- * sklh_idle_state_table_update(void)
+
+/**
+ * sklh_idle_state_table_update - Fix up the Sky Lake idle states table.
  *
- * On SKL-H (model 0x5e) disable C8 and C9 if:
- * C10 is enabled and SGX disabled
+ * On SKL-H (model 0x5e) skip C8 and C9 if C10 is enabled and SGX disabled.
  */
 static void __init sklh_idle_state_table_update(void)
 {
@@ -1485,9 +1496,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv)
        }
 }
 
-/*
- * intel_idle_cpuidle_driver_init()
- * allocate, initialize cpuidle_states
+/**
+ * intel_idle_cpuidle_driver_init - Create the list of available idle states.
+ * @drv: cpuidle driver structure to initialize.
  */
 static void __init intel_idle_cpuidle_driver_init(struct cpuidle_driver *drv)
 {
@@ -1509,7 +1520,7 @@ static void auto_demotion_disable(void)
        unsigned long long msr_bits;
 
        rdmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
-       msr_bits &= ~(icpu->auto_demotion_disable_flags);
+       msr_bits &= ~auto_demotion_disable_flags;
        wrmsrl(MSR_PKG_CST_CONFIG_CONTROL, msr_bits);
 }
 
@@ -1522,10 +1533,12 @@ static void c1e_promotion_disable(void)
        wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
 }
 
-/*
- * intel_idle_cpu_init()
- * allocate, initialize, register cpuidle_devices
- * @cpu: cpu/core to initialize
+/**
+ * intel_idle_cpu_init - Register the target CPU with the cpuidle core.
+ * @cpu: CPU to initialize.
+ *
+ * Register a cpuidle device object for @cpu and update its MSRs in accordance
+ * with the processor model flags.
  */
 static int intel_idle_cpu_init(unsigned int cpu)
 {
@@ -1539,13 +1552,10 @@ static int intel_idle_cpu_init(unsigned int cpu)
                return -EIO;
        }
 
-       if (!icpu)
-               return 0;
-
-       if (icpu->auto_demotion_disable_flags)
+       if (auto_demotion_disable_flags)
                auto_demotion_disable();
 
-       if (icpu->disable_promotion_to_c1e)
+       if (disable_promotion_to_c1e)
                c1e_promotion_disable();
 
        return 0;
@@ -1555,7 +1565,7 @@ static int intel_idle_cpu_online(unsigned int cpu)
 {
        struct cpuidle_device *dev;
 
-       if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE)
+       if (!lapic_timer_always_reliable)
                tick_broadcast_enable();
 
        /*
@@ -1623,6 +1633,8 @@ static int __init intel_idle_init(void)
        icpu = (const struct idle_cpu *)id->driver_data;
        if (icpu) {
                cpuidle_state_table = icpu->state_table;
+               auto_demotion_disable_flags = icpu->auto_demotion_disable_flags;
+               disable_promotion_to_c1e = icpu->disable_promotion_to_c1e;
                if (icpu->use_acpi || force_use_acpi)
                        intel_idle_acpi_cst_extract();
        } else if (!intel_idle_acpi_cst_extract()) {
@@ -1647,15 +1659,15 @@ static int __init intel_idle_init(void)
        }
 
        if (boot_cpu_has(X86_FEATURE_ARAT))     /* Always Reliable APIC Timer */
-               lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
+               lapic_timer_always_reliable = true;
 
        retval = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "idle/intel:online",
                                   intel_idle_cpu_online, NULL);
        if (retval < 0)
                goto hp_setup_fail;
 
-       pr_debug("lapic_timer_reliable_states 0x%x\n",
-                lapic_timer_reliable_states);
+       pr_debug("Local APIC timer is reliable in %s\n",
+                lapic_timer_always_reliable ? "all C-states" : "C1");
 
        return 0;
 
index 67b8817..60daf04 100644 (file)
@@ -237,6 +237,7 @@ static const struct adxl372_axis_lookup adxl372_axis_lookup_table[] = {
                .realbits = 12,                                         \
                .storagebits = 16,                                      \
                .shift = 4,                                             \
+               .endianness = IIO_BE,                                   \
        },                                                              \
 }
 
index 633955d..849cf74 100644 (file)
@@ -110,7 +110,7 @@ MODULE_DEVICE_TABLE(of, st_accel_of_match);
 
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id st_accel_acpi_match[] = {
-       {"SMO8840", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
+       {"SMO8840", (kernel_ulong_t)LIS2DH12_ACCEL_DEV_NAME},
        {"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
        { },
 };
index a5c7771..9d96f7d 100644 (file)
@@ -723,6 +723,7 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
 
        for_each_set_bit(bit, indio->active_scan_mask, indio->num_channels) {
                struct iio_chan_spec const *chan = at91_adc_chan_get(indio, bit);
+               u32 cor;
 
                if (!chan)
                        continue;
@@ -732,6 +733,20 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
                        continue;
 
                if (state) {
+                       cor = at91_adc_readl(st, AT91_SAMA5D2_COR);
+
+                       if (chan->differential)
+                               cor |= (BIT(chan->channel) |
+                                       BIT(chan->channel2)) <<
+                                       AT91_SAMA5D2_COR_DIFF_OFFSET;
+                       else
+                               cor &= ~(BIT(chan->channel) <<
+                                      AT91_SAMA5D2_COR_DIFF_OFFSET);
+
+                       at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
+               }
+
+               if (state) {
                        at91_adc_writel(st, AT91_SAMA5D2_CHER,
                                        BIT(chan->channel));
                        /* enable irq only if not using DMA */
index 2aad2cd..76a60d9 100644 (file)
@@ -842,31 +842,6 @@ static inline void stm32_dfsdm_process_data(struct stm32_dfsdm_adc *adc,
        }
 }
 
-static irqreturn_t stm32_dfsdm_adc_trigger_handler(int irq, void *p)
-{
-       struct iio_poll_func *pf = p;
-       struct iio_dev *indio_dev = pf->indio_dev;
-       struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
-       int available = stm32_dfsdm_adc_dma_residue(adc);
-
-       while (available >= indio_dev->scan_bytes) {
-               s32 *buffer = (s32 *)&adc->rx_buf[adc->bufi];
-
-               stm32_dfsdm_process_data(adc, buffer);
-
-               iio_push_to_buffers_with_timestamp(indio_dev, buffer,
-                                                  pf->timestamp);
-               available -= indio_dev->scan_bytes;
-               adc->bufi += indio_dev->scan_bytes;
-               if (adc->bufi >= adc->buf_sz)
-                       adc->bufi = 0;
-       }
-
-       iio_trigger_notify_done(indio_dev->trig);
-
-       return IRQ_HANDLED;
-}
-
 static void stm32_dfsdm_dma_buffer_done(void *data)
 {
        struct iio_dev *indio_dev = data;
@@ -874,11 +849,6 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
        int available = stm32_dfsdm_adc_dma_residue(adc);
        size_t old_pos;
 
-       if (indio_dev->currentmode & INDIO_BUFFER_TRIGGERED) {
-               iio_trigger_poll_chained(indio_dev->trig);
-               return;
-       }
-
        /*
         * FIXME: In Kernel interface does not support cyclic DMA buffer,and
         * offers only an interface to push data samples per samples.
@@ -906,7 +876,15 @@ static void stm32_dfsdm_dma_buffer_done(void *data)
                        adc->bufi = 0;
                        old_pos = 0;
                }
-               /* regular iio buffer without trigger */
+               /*
+                * In DMA mode the trigger services of IIO are not used
+                * (e.g. no call to iio_trigger_poll).
+                * Calling irq handler associated to the hardware trigger is not
+                * relevant as the conversions have already been done. Data
+                * transfers are performed directly in DMA callback instead.
+                * This implementation avoids to call trigger irq handler that
+                * may sleep, in an atomic context (DMA irq handler context).
+                */
                if (adc->dev_data->type == DFSDM_IIO)
                        iio_push_to_buffers(indio_dev, buffer);
        }
@@ -1536,8 +1514,7 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
        }
 
        ret = iio_triggered_buffer_setup(indio_dev,
-                                        &iio_pollfunc_store_time,
-                                        &stm32_dfsdm_adc_trigger_handler,
+                                        &iio_pollfunc_store_time, NULL,
                                         &stm32_dfsdm_buffer_setup_ops);
        if (ret) {
                stm32_dfsdm_dma_release(indio_dev);
index 0b91de4..a7e65a5 100644 (file)
@@ -91,6 +91,8 @@ config SPS30
        tristate "SPS30 particulate matter sensor"
        depends on I2C
        select CRC8
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
        help
          Say Y here to build support for the Sensirion SPS30 particulate
          matter sensor.
index b0e241a..e5b00a6 100644 (file)
@@ -167,16 +167,17 @@ static int vcnl4200_init(struct vcnl4000_data *data)
        data->vcnl4200_ps.reg = VCNL4200_PS_DATA;
        switch (id) {
        case VCNL4200_PROD_ID:
-               /* Integration time is 50ms, but the experiments */
-               /* show 54ms in total. */
-               data->vcnl4200_al.sampling_rate = ktime_set(0, 54000 * 1000);
-               data->vcnl4200_ps.sampling_rate = ktime_set(0, 4200 * 1000);
+               /* Default wait time is 50ms, add 20% tolerance. */
+               data->vcnl4200_al.sampling_rate = ktime_set(0, 60000 * 1000);
+               /* Default wait time is 4.8ms, add 20% tolerance. */
+               data->vcnl4200_ps.sampling_rate = ktime_set(0, 5760 * 1000);
                data->al_scale = 24000;
                break;
        case VCNL4040_PROD_ID:
-               /* Integration time is 80ms, add 10ms. */
-               data->vcnl4200_al.sampling_rate = ktime_set(0, 100000 * 1000);
-               data->vcnl4200_ps.sampling_rate = ktime_set(0, 100000 * 1000);
+               /* Default wait time is 80ms, add 20% tolerance. */
+               data->vcnl4200_al.sampling_rate = ktime_set(0, 96000 * 1000);
+               /* Default wait time is 5ms, add 20% tolerance. */
+               data->vcnl4200_ps.sampling_rate = ktime_set(0, 6000 * 1000);
                data->al_scale = 120000;
                break;
        }
index fc7e910..d329967 100644 (file)
@@ -564,7 +564,7 @@ static int ak8974_read_raw(struct iio_dev *indio_dev,
                 * We read all axes and discard all but one, for optimized
                 * reading, use the triggered buffer.
                 */
-               *val = le16_to_cpu(hw_values[chan->address]);
+               *val = (s16)le16_to_cpu(hw_values[chan->address]);
 
                ret = IIO_VAL_INT;
        }
index 34aff10..12b893c 100644 (file)
@@ -269,7 +269,7 @@ static const struct iio_chan_spec ping_chan_spec[] = {
 
 static const struct of_device_id of_ping_match[] = {
        { .compatible = "parallax,ping", .data = &pa_ping_cfg},
-       { .compatible = "parallax,laserping", .data = &pa_ping_cfg},
+       { .compatible = "parallax,laserping", .data = &pa_laser_ping_cfg},
        {},
 };
 
index 2e0d32a..2f82e8c 100644 (file)
@@ -161,7 +161,8 @@ static int stm32_timer_start(struct stm32_timer_trigger *priv,
        return 0;
 }
 
-static void stm32_timer_stop(struct stm32_timer_trigger *priv)
+static void stm32_timer_stop(struct stm32_timer_trigger *priv,
+                            struct iio_trigger *trig)
 {
        u32 ccer, cr1;
 
@@ -179,6 +180,12 @@ static void stm32_timer_stop(struct stm32_timer_trigger *priv)
        regmap_write(priv->regmap, TIM_PSC, 0);
        regmap_write(priv->regmap, TIM_ARR, 0);
 
+       /* Force disable master mode */
+       if (stm32_timer_is_trgo2_name(trig->name))
+               regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS2, 0);
+       else
+               regmap_update_bits(priv->regmap, TIM_CR2, TIM_CR2_MMS, 0);
+
        /* Make sure that registers are updated */
        regmap_update_bits(priv->regmap, TIM_EGR, TIM_EGR_UG, TIM_EGR_UG);
 }
@@ -197,7 +204,7 @@ static ssize_t stm32_tt_store_frequency(struct device *dev,
                return ret;
 
        if (freq == 0) {
-               stm32_timer_stop(priv);
+               stm32_timer_stop(priv, trig);
        } else {
                ret = stm32_timer_start(priv, trig, freq);
                if (ret)
index aac132b..20cce36 100644 (file)
@@ -3826,7 +3826,7 @@ int amd_iommu_activate_guest_mode(void *data)
        entry->lo.fields_vapic.ga_tag      = ir_data->ga_tag;
 
        return modify_irte_ga(ir_data->irq_2_irte.devid,
-                             ir_data->irq_2_irte.index, entry, NULL);
+                             ir_data->irq_2_irte.index, entry, ir_data);
 }
 EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
 
@@ -3852,7 +3852,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
                                APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
 
        return modify_irte_ga(ir_data->irq_2_irte.devid,
-                             ir_data->irq_2_irte.index, entry, NULL);
+                             ir_data->irq_2_irte.index, entry, ir_data);
 }
 EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
 
index a2e96a5..ba128d1 100644 (file)
@@ -177,15 +177,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
        start -= iova_offset(iovad, start);
        num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
 
-       msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
-       if (!msi_page)
-               return -ENOMEM;
-
        for (i = 0; i < num_pages; i++) {
-               msi_page[i].phys = start;
-               msi_page[i].iova = start;
-               INIT_LIST_HEAD(&msi_page[i].list);
-               list_add(&msi_page[i].list, &cookie->msi_page_list);
+               msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
+               if (!msi_page)
+                       return -ENOMEM;
+
+               msi_page->phys = start;
+               msi_page->iova = start;
+               INIT_LIST_HEAD(&msi_page->list);
+               list_add(&msi_page->list, &cookie->msi_page_list);
                start += iovad->granule;
        }
 
index 071bb42..f77dae7 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/iommu.h>
 #include <linux/numa.h>
+#include <linux/limits.h>
 #include <asm/irq_remapping.h>
 #include <asm/iommu_table.h>
 
@@ -128,6 +129,13 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
 
        BUG_ON(dev->is_virtfn);
 
+       /*
+        * Ignore devices that have a domain number higher than what can
+        * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
+        */
+       if (pci_domain_nr(dev->bus) > U16_MAX)
+               return NULL;
+
        /* Only generate path[] for device addition event */
        if (event == BUS_NOTIFY_ADD_DEVICE)
                for (tmp = dev; tmp; tmp = tmp->bus->self)
@@ -363,7 +371,8 @@ dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
 {
        struct dmar_drhd_unit *dmaru;
 
-       list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
+       list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
+                               dmar_rcu_check())
                if (dmaru->segment == drhd->segment &&
                    dmaru->reg_base_addr == drhd->address)
                        return dmaru;
@@ -440,12 +449,13 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
 
        /* Check for NUL termination within the designated length */
        if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
-               WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+               pr_warn(FW_BUG
                           "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
                           "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
                           dmi_get_system_info(DMI_BIOS_VENDOR),
                           dmi_get_system_info(DMI_BIOS_VERSION),
                           dmi_get_system_info(DMI_PRODUCT_VERSION));
+               add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
                return -EINVAL;
        }
        pr_info("ANDD device: %x name: %s\n", andd->device_number,
@@ -471,14 +481,14 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
                        return 0;
                }
        }
-       WARN_TAINT(
-               1, TAINT_FIRMWARE_WORKAROUND,
+       pr_warn(FW_BUG
                "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
                "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
-               drhd->reg_base_addr,
+               rhsa->base_address,
                dmi_get_system_info(DMI_BIOS_VENDOR),
                dmi_get_system_info(DMI_BIOS_VERSION),
                dmi_get_system_info(DMI_PRODUCT_VERSION));
+       add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
 
        return 0;
 }
@@ -827,14 +837,14 @@ int __init dmar_table_init(void)
 
 static void warn_invalid_dmar(u64 addr, const char *message)
 {
-       WARN_TAINT_ONCE(
-               1, TAINT_FIRMWARE_WORKAROUND,
+       pr_warn_once(FW_BUG
                "Your BIOS is broken; DMAR reported at address %llx%s!\n"
                "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
                addr, message,
                dmi_get_system_info(DMI_BIOS_VENDOR),
                dmi_get_system_info(DMI_BIOS_VERSION),
                dmi_get_system_info(DMI_PRODUCT_VERSION));
+       add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
 }
 
 static int __ref
index c1257be..3eb1fe2 100644 (file)
@@ -33,38 +33,42 @@ struct iommu_regset {
 
 #define IOMMU_REGSET_ENTRY(_reg_)                                      \
        { DMAR_##_reg_##_REG, __stringify(_reg_) }
-static const struct iommu_regset iommu_regs[] = {
+
+static const struct iommu_regset iommu_regs_32[] = {
        IOMMU_REGSET_ENTRY(VER),
-       IOMMU_REGSET_ENTRY(CAP),
-       IOMMU_REGSET_ENTRY(ECAP),
        IOMMU_REGSET_ENTRY(GCMD),
        IOMMU_REGSET_ENTRY(GSTS),
-       IOMMU_REGSET_ENTRY(RTADDR),
-       IOMMU_REGSET_ENTRY(CCMD),
        IOMMU_REGSET_ENTRY(FSTS),
        IOMMU_REGSET_ENTRY(FECTL),
        IOMMU_REGSET_ENTRY(FEDATA),
        IOMMU_REGSET_ENTRY(FEADDR),
        IOMMU_REGSET_ENTRY(FEUADDR),
-       IOMMU_REGSET_ENTRY(AFLOG),
        IOMMU_REGSET_ENTRY(PMEN),
        IOMMU_REGSET_ENTRY(PLMBASE),
        IOMMU_REGSET_ENTRY(PLMLIMIT),
+       IOMMU_REGSET_ENTRY(ICS),
+       IOMMU_REGSET_ENTRY(PRS),
+       IOMMU_REGSET_ENTRY(PECTL),
+       IOMMU_REGSET_ENTRY(PEDATA),
+       IOMMU_REGSET_ENTRY(PEADDR),
+       IOMMU_REGSET_ENTRY(PEUADDR),
+};
+
+static const struct iommu_regset iommu_regs_64[] = {
+       IOMMU_REGSET_ENTRY(CAP),
+       IOMMU_REGSET_ENTRY(ECAP),
+       IOMMU_REGSET_ENTRY(RTADDR),
+       IOMMU_REGSET_ENTRY(CCMD),
+       IOMMU_REGSET_ENTRY(AFLOG),
        IOMMU_REGSET_ENTRY(PHMBASE),
        IOMMU_REGSET_ENTRY(PHMLIMIT),
        IOMMU_REGSET_ENTRY(IQH),
        IOMMU_REGSET_ENTRY(IQT),
        IOMMU_REGSET_ENTRY(IQA),
-       IOMMU_REGSET_ENTRY(ICS),
        IOMMU_REGSET_ENTRY(IRTA),
        IOMMU_REGSET_ENTRY(PQH),
        IOMMU_REGSET_ENTRY(PQT),
        IOMMU_REGSET_ENTRY(PQA),
-       IOMMU_REGSET_ENTRY(PRS),
-       IOMMU_REGSET_ENTRY(PECTL),
-       IOMMU_REGSET_ENTRY(PEDATA),
-       IOMMU_REGSET_ENTRY(PEADDR),
-       IOMMU_REGSET_ENTRY(PEUADDR),
        IOMMU_REGSET_ENTRY(MTRRCAP),
        IOMMU_REGSET_ENTRY(MTRRDEF),
        IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
@@ -127,10 +131,16 @@ static int iommu_regset_show(struct seq_file *m, void *unused)
                 * by adding the offset to the pointer (virtual address).
                 */
                raw_spin_lock_irqsave(&iommu->register_lock, flag);
-               for (i = 0 ; i < ARRAY_SIZE(iommu_regs); i++) {
-                       value = dmar_readq(iommu->reg + iommu_regs[i].offset);
+               for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) {
+                       value = dmar_readl(iommu->reg + iommu_regs_32[i].offset);
+                       seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
+                                  iommu_regs_32[i].regs, iommu_regs_32[i].offset,
+                                  value);
+               }
+               for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) {
+                       value = dmar_readq(iommu->reg + iommu_regs_64[i].offset);
                        seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
-                                  iommu_regs[i].regs, iommu_regs[i].offset,
+                                  iommu_regs_64[i].regs, iommu_regs_64[i].offset,
                                   value);
                }
                raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
@@ -272,9 +282,16 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
 {
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
+       u32 sts;
 
        rcu_read_lock();
        for_each_active_iommu(iommu, drhd) {
+               sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
+               if (!(sts & DMA_GSTS_TES)) {
+                       seq_printf(m, "DMA Remapping is not enabled on %s\n",
+                                  iommu->name);
+                       continue;
+               }
                root_tbl_walk(m, iommu);
                seq_putc(m, '\n');
        }
@@ -415,6 +432,7 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused)
        struct dmar_drhd_unit *drhd;
        struct intel_iommu *iommu;
        u64 irta;
+       u32 sts;
 
        rcu_read_lock();
        for_each_active_iommu(iommu, drhd) {
@@ -424,7 +442,8 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused)
                seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
                           iommu->name);
 
-               if (iommu->ir_table) {
+               sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
+               if (iommu->ir_table && (sts & DMA_GSTS_IRES)) {
                        irta = virt_to_phys(iommu->ir_table->base);
                        seq_printf(m, " IR table address:%llx\n", irta);
                        ir_tbl_remap_entry_show(m, iommu);
index 6fa6de2..4be5494 100644 (file)
@@ -4261,10 +4261,11 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
 
        /* we know that the this iommu should be at offset 0xa000 from vtbar */
        drhd = dmar_find_matched_drhd_unit(pdev);
-       if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
-                           TAINT_FIRMWARE_WORKAROUND,
-                           "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
+       if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
+               pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
+               add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
                pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
+       }
 }
 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
 
@@ -4460,14 +4461,16 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
        struct dmar_rmrr_unit *rmrru;
 
        rmrr = (struct acpi_dmar_reserved_memory *)header;
-       if (rmrr_sanity_check(rmrr))
-               WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
+       if (rmrr_sanity_check(rmrr)) {
+               pr_warn(FW_BUG
                           "Your BIOS is broken; bad RMRR [%#018Lx-%#018Lx]\n"
                           "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
                           rmrr->base_address, rmrr->end_address,
                           dmi_get_system_info(DMI_BIOS_VENDOR),
                           dmi_get_system_info(DMI_BIOS_VERSION),
                           dmi_get_system_info(DMI_PRODUCT_VERSION));
+               add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
+       }
 
        rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
        if (!rmrru)
@@ -5130,6 +5133,9 @@ int __init intel_iommu_init(void)
 
        down_write(&dmar_global_lock);
 
+       if (!no_iommu)
+               intel_iommu_debugfs_init();
+
        if (no_iommu || dmar_disabled) {
                /*
                 * We exit the function here to ensure IOMMU's remapping and
@@ -5193,6 +5199,7 @@ int __init intel_iommu_init(void)
 
        init_iommu_pm_ops();
 
+       down_read(&dmar_global_lock);
        for_each_active_iommu(iommu, drhd) {
                iommu_device_sysfs_add(&iommu->iommu, NULL,
                                       intel_iommu_groups,
@@ -5200,6 +5207,7 @@ int __init intel_iommu_init(void)
                iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
                iommu_device_register(&iommu->iommu);
        }
+       up_read(&dmar_global_lock);
 
        bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
        if (si_domain && !hw_pass_through)
@@ -5210,7 +5218,6 @@ int __init intel_iommu_init(void)
        down_read(&dmar_global_lock);
        if (probe_acpi_namespace_devices())
                pr_warn("ACPI name space devices didn't probe correctly\n");
-       up_read(&dmar_global_lock);
 
        /* Finally, we enable the DMA remapping hardware. */
        for_each_iommu(iommu, drhd) {
@@ -5219,10 +5226,11 @@ int __init intel_iommu_init(void)
 
                iommu_disable_protect_mem_regions(iommu);
        }
+       up_read(&dmar_global_lock);
+
        pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
 
        intel_iommu_enabled = 1;
-       intel_iommu_debugfs_init();
 
        return 0;
 
@@ -5700,8 +5708,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
        u64 phys = 0;
 
        pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
-       if (pte)
-               phys = dma_pte_addr(pte);
+       if (pte && dma_pte_present(pte))
+               phys = dma_pte_addr(pte) +
+                       (iova & (BIT_MASK(level_to_offset_bits(level) +
+                                               VTD_PAGE_SHIFT) - 1));
 
        return phys;
 }
index 983b084..04fbd4b 100644 (file)
@@ -468,7 +468,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
        arm_lpae_iopte *ptep = data->pgd;
        int ret, lvl = data->start_level;
        arm_lpae_iopte prot;
-       long iaext = (long)iova >> cfg->ias;
+       long iaext = (s64)iova >> cfg->ias;
 
        /* If no access, then nothing to do */
        if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
@@ -645,7 +645,7 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
        struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
        struct io_pgtable_cfg *cfg = &data->iop.cfg;
        arm_lpae_iopte *ptep = data->pgd;
-       long iaext = (long)iova >> cfg->ias;
+       long iaext = (s64)iova >> cfg->ias;
 
        if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
                return 0;
index c1f7af9..1eec9d4 100644 (file)
@@ -34,6 +34,7 @@
 #define GICD_INT_NMI_PRI       (GICD_INT_DEF_PRI & ~0x80)
 
 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996    (1ULL << 0)
+#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539  (1ULL << 1)
 
 struct redist_region {
        void __iomem            *redist_base;
@@ -1464,6 +1465,15 @@ static bool gic_enable_quirk_msm8996(void *data)
        return true;
 }
 
+static bool gic_enable_quirk_cavium_38539(void *data)
+{
+       struct gic_chip_data *d = data;
+
+       d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
+
+       return true;
+}
+
 static bool gic_enable_quirk_hip06_07(void *data)
 {
        struct gic_chip_data *d = data;
@@ -1503,6 +1513,19 @@ static const struct gic_quirk gic_quirks[] = {
                .init   = gic_enable_quirk_hip06_07,
        },
        {
+               /*
+                * Reserved register accesses generate a Synchronous
+                * External Abort. This erratum applies to:
+                * - ThunderX: CN88xx
+                * - OCTEON TX: CN83xx, CN81xx
+                * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
+                */
+               .desc   = "GICv3: Cavium erratum 38539",
+               .iidr   = 0xa000034c,
+               .mask   = 0xe8f00fff,
+               .init   = gic_enable_quirk_cavium_38539,
+       },
+       {
        }
 };
 
@@ -1577,7 +1600,12 @@ static int __init gic_init_bases(void __iomem *dist_base,
        pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
        pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
 
-       gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
+       /*
+        * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
+        * architecture spec (which says that reserved registers are RES0).
+        */
+       if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
+               gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
 
        gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
                                                 &gic_data);
index 1256059..e7dec32 100644 (file)
@@ -312,9 +312,16 @@ static const struct i2c_device_id wf_ad7417_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_ad7417_id);
 
+static const struct of_device_id wf_ad7417_of_id[] = {
+       { .compatible = "ad7417", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_ad7417_of_id);
+
 static struct i2c_driver wf_ad7417_driver = {
        .driver = {
                .name   = "wf_ad7417",
+               .of_match_table = wf_ad7417_of_id,
        },
        .probe          = wf_ad7417_probe,
        .remove         = wf_ad7417_remove,
index 67daeec..2470e5a 100644 (file)
@@ -580,9 +580,16 @@ static const struct i2c_device_id wf_fcu_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_fcu_id);
 
+static const struct of_device_id wf_fcu_of_id[] = {
+       { .compatible = "fcu", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_fcu_of_id);
+
 static struct i2c_driver wf_fcu_driver = {
        .driver = {
                .name   = "wf_fcu",
+               .of_match_table = wf_fcu_of_id,
        },
        .probe          = wf_fcu_probe,
        .remove         = wf_fcu_remove,
index 282c28a..1e5fa09 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/wait.h>
 #include <linux/i2c.h>
+#include <linux/of_device.h>
 #include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/io.h>
@@ -91,9 +92,14 @@ static int wf_lm75_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {      
        struct wf_lm75_sensor *lm;
-       int rc, ds1775 = id->driver_data;
+       int rc, ds1775;
        const char *name, *loc;
 
+       if (id)
+               ds1775 = id->driver_data;
+       else
+               ds1775 = !!of_device_get_match_data(&client->dev);
+
        DBG("wf_lm75: creating  %s device at address 0x%02x\n",
            ds1775 ? "ds1775" : "lm75", client->addr);
 
@@ -164,9 +170,17 @@ static const struct i2c_device_id wf_lm75_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_lm75_id);
 
+static const struct of_device_id wf_lm75_of_id[] = {
+       { .compatible = "lm75", .data = (void *)0},
+       { .compatible = "ds1775", .data = (void *)1 },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_lm75_of_id);
+
 static struct i2c_driver wf_lm75_driver = {
        .driver = {
                .name   = "wf_lm75",
+               .of_match_table = wf_lm75_of_id,
        },
        .probe          = wf_lm75_probe,
        .remove         = wf_lm75_remove,
index b03a33b..d011899 100644 (file)
@@ -166,9 +166,16 @@ static const struct i2c_device_id wf_lm87_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_lm87_id);
 
+static const struct of_device_id wf_lm87_of_id[] = {
+       { .compatible = "lm87cimt", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_lm87_of_id);
+
 static struct i2c_driver wf_lm87_driver = {
        .driver = {
                .name   = "wf_lm87",
+               .of_match_table = wf_lm87_of_id,
        },
        .probe          = wf_lm87_probe,
        .remove         = wf_lm87_remove,
index e666cc0..1e7b03d 100644 (file)
@@ -120,9 +120,16 @@ static const struct i2c_device_id wf_max6690_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_max6690_id);
 
+static const struct of_device_id wf_max6690_of_id[] = {
+       { .compatible = "max6690", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_max6690_of_id);
+
 static struct i2c_driver wf_max6690_driver = {
        .driver = {
                .name           = "wf_max6690",
+               .of_match_table = wf_max6690_of_id,
        },
        .probe          = wf_max6690_probe,
        .remove         = wf_max6690_remove,
index c84ec49..cb75dc0 100644 (file)
@@ -341,9 +341,16 @@ static const struct i2c_device_id wf_sat_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, wf_sat_id);
 
+static const struct of_device_id wf_sat_of_id[] = {
+       { .compatible = "smu-sat", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wf_sat_of_id);
+
 static struct i2c_driver wf_sat_driver = {
        .driver = {
                .name           = "wf_smu_sat",
+               .of_match_table = wf_sat_of_id,
        },
        .probe          = wf_sat_probe,
        .remove         = wf_sat_remove,
index 342cabf..a8ac94f 100644 (file)
@@ -1008,8 +1008,7 @@ int saa7134_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
         */
        if ((dmaq == &dev->video_q && !vb2_is_streaming(&dev->vbi_vbq)) ||
            (dmaq == &dev->vbi_q && !vb2_is_streaming(&dev->video_vbq)))
-               pm_qos_add_request(&dev->qos_request,
-                       PM_QOS_CPU_DMA_LATENCY, 20);
+               cpu_latency_qos_add_request(&dev->qos_request, 20);
        dmaq->seq_nr = 0;
 
        return 0;
@@ -1024,7 +1023,7 @@ void saa7134_vb2_stop_streaming(struct vb2_queue *vq)
 
        if ((dmaq == &dev->video_q && !vb2_is_streaming(&dev->vbi_vbq)) ||
            (dmaq == &dev->vbi_q && !vb2_is_streaming(&dev->video_vbq)))
-               pm_qos_remove_request(&dev->qos_request);
+               cpu_latency_qos_remove_request(&dev->qos_request);
 }
 
 static const struct vb2_ops vb2_qops = {
index 78841b9..1cd4f7b 100644 (file)
@@ -646,7 +646,7 @@ static int viacam_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
         * requirement which will keep the CPU out of the deeper sleep
         * states.
         */
-       pm_qos_add_request(&cam->qos_request, PM_QOS_CPU_DMA_LATENCY, 50);
+       cpu_latency_qos_add_request(&cam->qos_request, 50);
        viacam_start_engine(cam);
        return 0;
 out:
@@ -662,7 +662,7 @@ static void viacam_vb2_stop_streaming(struct vb2_queue *vq)
        struct via_camera *cam = vb2_get_drv_priv(vq);
        struct via_buffer *buf, *tmp;
 
-       pm_qos_remove_request(&cam->qos_request);
+       cpu_latency_qos_remove_request(&cam->qos_request);
        viacam_stop_engine(cam);
 
        list_for_each_entry_safe(buf, tmp, &cam->buffer_queue, queue) {
index 4feed29..423fecc 100644 (file)
@@ -394,7 +394,7 @@ static const struct pcr_ops rts522a_pcr_ops = {
 void rts522a_init_params(struct rtsx_pcr *pcr)
 {
        rts5227_init_params(pcr);
-
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
        pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
 
        pcr->option.ocp_en = 1;
index db936e4..1a81cda 100644 (file)
@@ -618,6 +618,7 @@ static const struct pcr_ops rts524a_pcr_ops = {
 void rts524a_init_params(struct rtsx_pcr *pcr)
 {
        rts5249_init_params(pcr);
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
        pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
        pcr->option.ltr_l1off_snooze_sspwrgate =
                LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
@@ -733,6 +734,7 @@ static const struct pcr_ops rts525a_pcr_ops = {
 void rts525a_init_params(struct rtsx_pcr *pcr)
 {
        rts5249_init_params(pcr);
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11);
        pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
        pcr->option.ltr_l1off_snooze_sspwrgate =
                LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
index 4214f02..711054e 100644 (file)
@@ -662,7 +662,7 @@ void rts5260_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
-       pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
        pcr->ic_version = rts5260_get_ic_version(pcr);
index bc4967a..78c3b1d 100644 (file)
@@ -764,7 +764,7 @@ void rts5261_init_params(struct rtsx_pcr *pcr)
        pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
        pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
        pcr->aspm_en = ASPM_L1_EN;
-       pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 27, 16);
+       pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 11);
        pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
 
        pcr->ic_version = rts5261_get_ic_version(pcr);
index 031eb64..282c9ef 100644 (file)
@@ -712,13 +712,14 @@ static int at24_probe(struct i2c_client *client)
         * chip is functional.
         */
        err = at24_read(at24, 0, &test_byte, 1);
-       pm_runtime_idle(dev);
        if (err) {
                pm_runtime_disable(dev);
                regulator_disable(at24->vcc_reg);
                return -ENODEV;
        }
 
+       pm_runtime_idle(dev);
+
        if (writable)
                dev_info(dev, "%u byte %s EEPROM, writable, %u bytes/write\n",
                         byte_len, client->name, at24->write_max);
index aa54d35..a971c4b 100644 (file)
@@ -1732,8 +1732,11 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
         * the erase operation does not exceed the max_busy_timeout, we should
         * use R1B response. Or we need to prevent the host from doing hw busy
         * detection, which is done by converting to a R1 response instead.
+        * Note, some hosts requires R1B, which also means they are on their own
+        * when it comes to deal with the busy timeout.
         */
-       if (card->host->max_busy_timeout &&
+       if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) &&
+           card->host->max_busy_timeout &&
            busy_timeout > card->host->max_busy_timeout) {
                cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
        } else {
index f6912de..de14b58 100644 (file)
@@ -1910,9 +1910,12 @@ static int mmc_sleep(struct mmc_host *host)
         * If the max_busy_timeout of the host is specified, validate it against
         * the sleep cmd timeout. A failure means we need to prevent the host
         * from doing hw busy detection, which is done by converting to a R1
-        * response instead of a R1B.
+        * response instead of a R1B. Note, some hosts requires R1B, which also
+        * means they are on their own when it comes to deal with the busy
+        * timeout.
         */
-       if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
+       if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
+           (timeout_ms > host->max_busy_timeout)) {
                cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
        } else {
                cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
index da425ee..e025604 100644 (file)
@@ -542,9 +542,11 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
         * If the max_busy_timeout of the host is specified, make sure it's
         * enough to fit the used timeout_ms. In case it's not, let's instruct
         * the host to avoid HW busy detection, by converting to a R1 response
-        * instead of a R1B.
+        * instead of a R1B. Note, some hosts requires R1B, which also means
+        * they are on their own when it comes to deal with the busy timeout.
         */
-       if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
+       if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
+           (timeout_ms > host->max_busy_timeout))
                use_r1b_resp = false;
 
        cmd.opcode = MMC_SWITCH;
index bd50935..1108797 100644 (file)
@@ -606,19 +606,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host,
                u8 sample_point, bool rx)
 {
        struct rtsx_pcr *pcr = host->pcr;
-
+       u16 SD_VP_CTL = 0;
        dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n",
                        __func__, rx ? "RX" : "TX", sample_point);
 
        rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK);
-       if (rx)
+       if (rx) {
+               SD_VP_CTL = SD_VPRX_CTL;
                rtsx_pci_write_register(pcr, SD_VPRX_CTL,
                        PHASE_SELECT_MASK, sample_point);
-       else
+       } else {
+               SD_VP_CTL = SD_VPTX_CTL;
                rtsx_pci_write_register(pcr, SD_VPTX_CTL,
                        PHASE_SELECT_MASK, sample_point);
-       rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0);
-       rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET,
+       }
+       rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET, 0);
+       rtsx_pci_write_register(pcr, SD_VP_CTL, PHASE_NOT_RESET,
                                PHASE_NOT_RESET);
        rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0);
        rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
index 9651dca..2a2173d 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/pm.h>
 #include <linux/pm_runtime.h>
 #include <linux/delay.h>
+#include <linux/dmi.h>
 
 #include <linux/mmc/host.h>
 #include <linux/mmc/pm.h>
@@ -72,9 +73,16 @@ struct sdhci_acpi_host {
        const struct sdhci_acpi_slot    *slot;
        struct platform_device          *pdev;
        bool                            use_runtime_pm;
+       bool                            is_intel;
+       bool                            reset_signal_volt_on_suspend;
        unsigned long                   private[0] ____cacheline_aligned;
 };
 
+enum {
+       DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP                  = BIT(0),
+       DMI_QUIRK_SD_NO_WRITE_PROTECT                           = BIT(1),
+};
+
 static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c)
 {
        return (void *)c->private;
@@ -391,6 +399,8 @@ static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *ad
        host->mmc_host_ops.start_signal_voltage_switch =
                                        intel_start_signal_voltage_switch;
 
+       c->is_intel = true;
+
        return 0;
 }
 
@@ -647,6 +657,36 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
 };
 MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
 
+static const struct dmi_system_id sdhci_acpi_quirks[] = {
+       {
+               /*
+                * The Lenovo Miix 320-10ICR has a bug in the _PS0 method of
+                * the SHC1 ACPI device, this bug causes it to reprogram the
+                * wrong LDO (DLDO3) to 1.8V if 1.8V modes are used and the
+                * card is (runtime) suspended + resumed. DLDO3 is used for
+                * the LCD and setting it to 1.8V causes the LCD to go black.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
+               },
+               .driver_data = (void *)DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP,
+       },
+       {
+               /*
+                * The Acer Aspire Switch 10 (SW5-012) microSD slot always
+                * reports the card being write-protected even though microSD
+                * cards do not have a write-protect switch at all.
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"),
+               },
+               .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT,
+       },
+       {} /* Terminating entry */
+};
+
 static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(struct acpi_device *adev)
 {
        const struct sdhci_acpi_uid_slot *u;
@@ -663,17 +703,23 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        const struct sdhci_acpi_slot *slot;
        struct acpi_device *device, *child;
+       const struct dmi_system_id *id;
        struct sdhci_acpi_host *c;
        struct sdhci_host *host;
        struct resource *iomem;
        resource_size_t len;
        size_t priv_size;
+       int quirks = 0;
        int err;
 
        device = ACPI_COMPANION(dev);
        if (!device)
                return -ENODEV;
 
+       id = dmi_first_match(sdhci_acpi_quirks);
+       if (id)
+               quirks = (long)id->driver_data;
+
        slot = sdhci_acpi_get_slot(device);
 
        /* Power on the SDHCI controller and its children */
@@ -759,6 +805,12 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
                        dev_warn(dev, "failed to setup card detect gpio\n");
                        c->use_runtime_pm = false;
                }
+
+               if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP)
+                       c->reset_signal_volt_on_suspend = true;
+
+               if (quirks & DMI_QUIRK_SD_NO_WRITE_PROTECT)
+                       host->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
        }
 
        err = sdhci_setup_host(host);
@@ -823,17 +875,39 @@ static int sdhci_acpi_remove(struct platform_device *pdev)
        return 0;
 }
 
+static void __maybe_unused sdhci_acpi_reset_signal_voltage_if_needed(
+       struct device *dev)
+{
+       struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+       struct sdhci_host *host = c->host;
+
+       if (c->is_intel && c->reset_signal_volt_on_suspend &&
+           host->mmc->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_330) {
+               struct intel_host *intel_host = sdhci_acpi_priv(c);
+               unsigned int fn = INTEL_DSM_V33_SWITCH;
+               u32 result = 0;
+
+               intel_dsm(intel_host, dev, fn, &result);
+       }
+}
+
 #ifdef CONFIG_PM_SLEEP
 
 static int sdhci_acpi_suspend(struct device *dev)
 {
        struct sdhci_acpi_host *c = dev_get_drvdata(dev);
        struct sdhci_host *host = c->host;
+       int ret;
 
        if (host->tuning_mode != SDHCI_TUNING_MODE_3)
                mmc_retune_needed(host->mmc);
 
-       return sdhci_suspend_host(host);
+       ret = sdhci_suspend_host(host);
+       if (ret)
+               return ret;
+
+       sdhci_acpi_reset_signal_voltage_if_needed(dev);
+       return 0;
 }
 
 static int sdhci_acpi_resume(struct device *dev)
@@ -853,11 +927,17 @@ static int sdhci_acpi_runtime_suspend(struct device *dev)
 {
        struct sdhci_acpi_host *c = dev_get_drvdata(dev);
        struct sdhci_host *host = c->host;
+       int ret;
 
        if (host->tuning_mode != SDHCI_TUNING_MODE_3)
                mmc_retune_needed(host->mmc);
 
-       return sdhci_runtime_suspend_host(host);
+       ret = sdhci_runtime_suspend_host(host);
+       if (ret)
+               return ret;
+
+       sdhci_acpi_reset_signal_voltage_if_needed(dev);
+       return 0;
 }
 
 static int sdhci_acpi_runtime_resume(struct device *dev)
index 5827d37..e573495 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/mmc/host.h>
 #include <linux/mmc/mmc.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 
 #include "sdhci-pltfm.h"
 
@@ -235,6 +236,11 @@ static const struct sdhci_ops sdhci_cdns_ops = {
        .set_uhs_signaling = sdhci_cdns_set_uhs_signaling,
 };
 
+static const struct sdhci_pltfm_data sdhci_cdns_uniphier_pltfm_data = {
+       .ops = &sdhci_cdns_ops,
+       .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
+};
+
 static const struct sdhci_pltfm_data sdhci_cdns_pltfm_data = {
        .ops = &sdhci_cdns_ops,
 };
@@ -334,6 +340,7 @@ static void sdhci_cdns_hs400_enhanced_strobe(struct mmc_host *mmc,
 static int sdhci_cdns_probe(struct platform_device *pdev)
 {
        struct sdhci_host *host;
+       const struct sdhci_pltfm_data *data;
        struct sdhci_pltfm_host *pltfm_host;
        struct sdhci_cdns_priv *priv;
        struct clk *clk;
@@ -350,8 +357,12 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
+       data = of_device_get_match_data(dev);
+       if (!data)
+               data = &sdhci_cdns_pltfm_data;
+
        nr_phy_params = sdhci_cdns_phy_param_count(dev->of_node);
-       host = sdhci_pltfm_init(pdev, &sdhci_cdns_pltfm_data,
+       host = sdhci_pltfm_init(pdev, data,
                                struct_size(priv, phy_params, nr_phy_params));
        if (IS_ERR(host)) {
                ret = PTR_ERR(host);
@@ -431,7 +442,10 @@ static const struct dev_pm_ops sdhci_cdns_pm_ops = {
 };
 
 static const struct of_device_id sdhci_cdns_match[] = {
-       { .compatible = "socionext,uniphier-sd4hc" },
+       {
+               .compatible = "socionext,uniphier-sd4hc",
+               .data = &sdhci_cdns_uniphier_pltfm_data,
+       },
        { .compatible = "cdns,sd4hc" },
        { /* sentinel */ }
 };
index 382f25b..b2bdf50 100644 (file)
@@ -1452,8 +1452,7 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
                                                  pdev->id_entry->driver_data;
 
        if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
-               pm_qos_add_request(&imx_data->pm_qos_req,
-                       PM_QOS_CPU_DMA_LATENCY, 0);
+               cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
 
        imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
        if (IS_ERR(imx_data->clk_ipg)) {
@@ -1572,7 +1571,7 @@ disable_per_clk:
        clk_disable_unprepare(imx_data->clk_per);
 free_sdhci:
        if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
-               pm_qos_remove_request(&imx_data->pm_qos_req);
+               cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
        sdhci_pltfm_free(pdev);
        return err;
 }
@@ -1595,7 +1594,7 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
        clk_disable_unprepare(imx_data->clk_ahb);
 
        if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
-               pm_qos_remove_request(&imx_data->pm_qos_req);
+               cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
 
        sdhci_pltfm_free(pdev);
 
@@ -1667,7 +1666,7 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
        clk_disable_unprepare(imx_data->clk_ahb);
 
        if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
-               pm_qos_remove_request(&imx_data->pm_qos_req);
+               cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
 
        return ret;
 }
@@ -1680,8 +1679,7 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
        int err;
 
        if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
-               pm_qos_add_request(&imx_data->pm_qos_req,
-                       PM_QOS_CPU_DMA_LATENCY, 0);
+               cpu_latency_qos_add_request(&imx_data->pm_qos_req, 0);
 
        err = clk_prepare_enable(imx_data->clk_ahb);
        if (err)
@@ -1714,7 +1712,7 @@ disable_ahb_clk:
        clk_disable_unprepare(imx_data->clk_ahb);
 remove_pm_qos_request:
        if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
-               pm_qos_remove_request(&imx_data->pm_qos_req);
+               cpu_latency_qos_remove_request(&imx_data->pm_qos_req);
        return err;
 }
 #endif
index c3a160c..3955fa5 100644 (file)
@@ -1590,7 +1590,7 @@ static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask)
        return 0;
 }
 
-void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
+static void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery)
 {
        struct sdhci_host *host = mmc_priv(mmc);
        unsigned long flags;
index ab2bd31..fcef5c0 100644 (file)
@@ -132,7 +132,8 @@ static void sdhci_at91_reset(struct sdhci_host *host, u8 mask)
 
        sdhci_reset(host, mask);
 
-       if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+       if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
+           || mmc_gpio_get_cd(host->mmc) >= 0)
                sdhci_at91_set_force_card_detect(host);
 
        if (priv->cal_always_on && (mask & SDHCI_RESET_ALL))
@@ -427,8 +428,11 @@ static int sdhci_at91_probe(struct platform_device *pdev)
         * detection procedure using the SDMCC_CD signal is bypassed.
         * This bit is reset when a software reset for all command is performed
         * so we need to implement our own reset function to set back this bit.
+        *
+        * WA: SAMA5D2 doesn't drive CMD if using CD GPIO line.
         */
-       if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
+       if ((host->mmc->caps & MMC_CAP_NONREMOVABLE)
+           || mmc_gpio_get_cd(host->mmc) >= 0)
                sdhci_at91_set_force_card_detect(host);
 
        pm_runtime_put_autosuspend(&pdev->dev);
index 8820531..c497817 100644 (file)
@@ -1192,6 +1192,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
        if (of_find_property(dev->of_node, "dmas", NULL))
                sdhci_switch_external_dma(host, true);
 
+       /* R1B responses is required to properly manage HW busy detection. */
+       mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+
        ret = sdhci_setup_host(host);
        if (ret)
                goto err_put_sync;
index 5eea8d7..ce15a05 100644 (file)
@@ -262,10 +262,26 @@ static int gl9750_execute_tuning(struct sdhci_host *host, u32 opcode)
        return 0;
 }
 
+static void gli_pcie_enable_msi(struct sdhci_pci_slot *slot)
+{
+       int ret;
+
+       ret = pci_alloc_irq_vectors(slot->chip->pdev, 1, 1,
+                                   PCI_IRQ_MSI | PCI_IRQ_MSIX);
+       if (ret < 0) {
+               pr_warn("%s: enable PCI MSI failed, error=%d\n",
+                      mmc_hostname(slot->host->mmc), ret);
+               return;
+       }
+
+       slot->host->irq = pci_irq_vector(slot->chip->pdev, 0);
+}
+
 static int gli_probe_slot_gl9750(struct sdhci_pci_slot *slot)
 {
        struct sdhci_host *host = slot->host;
 
+       gli_pcie_enable_msi(slot);
        slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
        sdhci_enable_v4_mode(host);
 
@@ -276,6 +292,7 @@ static int gli_probe_slot_gl9755(struct sdhci_pci_slot *slot)
 {
        struct sdhci_host *host = slot->host;
 
+       gli_pcie_enable_msi(slot);
        slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
        sdhci_enable_v4_mode(host);
 
index 403ac44..a25c3a4 100644 (file)
@@ -1552,6 +1552,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
        if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
                host->mmc->caps |= MMC_CAP_1_8V_DDR;
 
+       /* R1B responses is required to properly manage HW busy detection. */
+       host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+
        tegra_sdhci_parse_dt(host);
 
        tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
index 1cc2cd8..c816985 100644 (file)
@@ -50,11 +50,6 @@ struct arp_pkt {
 };
 #pragma pack()
 
-static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
-{
-       return (struct arp_pkt *)skb_network_header(skb);
-}
-
 /* Forward declaration */
 static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
                                      bool strict_match);
@@ -553,10 +548,11 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
        spin_unlock(&bond->mode_lock);
 }
 
-static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
+static struct slave *rlb_choose_channel(struct sk_buff *skb,
+                                       struct bonding *bond,
+                                       const struct arp_pkt *arp)
 {
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
-       struct arp_pkt *arp = arp_pkt(skb);
        struct slave *assigned_slave, *curr_active_slave;
        struct rlb_client_info *client_info;
        u32 hash_index = 0;
@@ -653,8 +649,12 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
  */
 static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
 {
-       struct arp_pkt *arp = arp_pkt(skb);
        struct slave *tx_slave = NULL;
+       struct arp_pkt *arp;
+
+       if (!pskb_network_may_pull(skb, sizeof(*arp)))
+               return NULL;
+       arp = (struct arp_pkt *)skb_network_header(skb);
 
        /* Don't modify or load balance ARPs that do not originate locally
         * (e.g.,arrive via a bridge).
@@ -664,7 +664,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
 
        if (arp->op_code == htons(ARPOP_REPLY)) {
                /* the arp must be sent on the selected rx channel */
-               tx_slave = rlb_choose_channel(skb, bond);
+               tx_slave = rlb_choose_channel(skb, bond, arp);
                if (tx_slave)
                        bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
                                          tx_slave->dev->addr_len);
@@ -676,7 +676,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
                 * When the arp reply is received the entry will be updated
                 * with the correct unicast address of the client.
                 */
-               tx_slave = rlb_choose_channel(skb, bond);
+               tx_slave = rlb_choose_channel(skb, bond, arp);
 
                /* The ARP reply packets must be delayed so that
                 * they can cancel out the influence of the ARP request.
index 6ee06a4..68834a2 100644 (file)
@@ -883,6 +883,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
                                = { .len = sizeof(struct can_bittiming) },
        [IFLA_CAN_DATA_BITTIMING_CONST]
                                = { .len = sizeof(struct can_bittiming_const) },
+       [IFLA_CAN_TERMINATION]  = { .type = NLA_U16 },
 };
 
 static int can_validate(struct nlattr *tb[], struct nlattr *data[],
index 8c92895..2f993e6 100644 (file)
@@ -2769,6 +2769,8 @@ static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
                goto unlock;
        }
 
+       occupancy &= MV88E6XXX_G2_ATU_STATS_MASK;
+
 unlock:
        mv88e6xxx_reg_unlock(chip);
 
index 0150301..8fd4830 100644 (file)
@@ -1099,6 +1099,13 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
 {
        int err, irq, virq;
 
+       chip->g2_irq.masked = ~0;
+       mv88e6xxx_reg_lock(chip);
+       err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked);
+       mv88e6xxx_reg_unlock(chip);
+       if (err)
+               return err;
+
        chip->g2_irq.domain = irq_domain_add_simple(
                chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
        if (!chip->g2_irq.domain)
@@ -1108,7 +1115,6 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
                irq_create_mapping(chip->g2_irq.domain, irq);
 
        chip->g2_irq.chip = mv88e6xxx_g2_irq_chip;
-       chip->g2_irq.masked = ~0;
 
        chip->device_irq = irq_find_mapping(chip->g1_irq.domain,
                                            MV88E6XXX_G1_STS_IRQ_DEVICE);
index 03ba6d2..7edea57 100644 (file)
@@ -1741,7 +1741,8 @@ static void sja1105_teardown(struct dsa_switch *ds)
                if (!dsa_is_user_port(ds, port))
                        continue;
 
-               kthread_destroy_worker(sp->xmit_worker);
+               if (sp->xmit_worker)
+                       kthread_destroy_worker(sp->xmit_worker);
        }
 
        sja1105_tas_teardown(ds);
index e0611cb..15b31cd 100644 (file)
@@ -2135,7 +2135,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
                return -ENOSPC;
 
        index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
-       if (index > RXCHK_BRCM_TAG_MAX)
+       if (index >= RXCHK_BRCM_TAG_MAX)
                return -ENOSPC;
 
        /* Location is the classification ID, and index is the position
index f9a8151..c5c8eff 100644 (file)
@@ -10982,13 +10982,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
        struct bnxt *bp = netdev_priv(dev);
 
        if (netif_running(dev))
-               bnxt_close_nic(bp, false, false);
+               bnxt_close_nic(bp, true, false);
 
        dev->mtu = new_mtu;
        bnxt_set_ring_params(bp);
 
        if (netif_running(dev))
-               return bnxt_open_nic(bp, false, false);
+               return bnxt_open_nic(bp, true, false);
 
        return 0;
 }
index e8fc167..1f67e67 100644 (file)
@@ -2007,8 +2007,8 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
        struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
        struct hwrm_nvm_install_update_input install = {0};
        const struct firmware *fw;
-       int rc, hwrm_err = 0;
        u32 item_len;
+       int rc = 0;
        u16 index;
 
        bnxt_hwrm_fw_set_time(bp);
@@ -2052,15 +2052,14 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
                        memcpy(kmem, fw->data, fw->size);
                        modify.host_src_addr = cpu_to_le64(dma_handle);
 
-                       hwrm_err = hwrm_send_message(bp, &modify,
-                                                    sizeof(modify),
-                                                    FLASH_PACKAGE_TIMEOUT);
+                       rc = hwrm_send_message(bp, &modify, sizeof(modify),
+                                              FLASH_PACKAGE_TIMEOUT);
                        dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
                                          dma_handle);
                }
        }
        release_firmware(fw);
-       if (rc || hwrm_err)
+       if (rc)
                goto err_exit;
 
        if ((install_type & 0xffff) == 0)
@@ -2069,20 +2068,19 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
        install.install_type = cpu_to_le32(install_type);
 
        mutex_lock(&bp->hwrm_cmd_lock);
-       hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
-                                     INSTALL_PACKAGE_TIMEOUT);
-       if (hwrm_err) {
+       rc = _hwrm_send_message(bp, &install, sizeof(install),
+                               INSTALL_PACKAGE_TIMEOUT);
+       if (rc) {
                u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
 
                if (resp->error_code && error_code ==
                    NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
                        install.flags |= cpu_to_le16(
                               NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
-                       hwrm_err = _hwrm_send_message(bp, &install,
-                                                     sizeof(install),
-                                                     INSTALL_PACKAGE_TIMEOUT);
+                       rc = _hwrm_send_message(bp, &install, sizeof(install),
+                                               INSTALL_PACKAGE_TIMEOUT);
                }
-               if (hwrm_err)
+               if (rc)
                        goto flash_pkg_exit;
        }
 
@@ -2094,7 +2092,7 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
 flash_pkg_exit:
        mutex_unlock(&bp->hwrm_cmd_lock);
 err_exit:
-       if (hwrm_err == -EACCES)
+       if (rc == -EACCES)
                bnxt_print_admin_err(bp);
        return rc;
 }
index 649842a..97f90ed 100644 (file)
@@ -5381,12 +5381,11 @@ static inline bool is_x_10g_port(const struct link_config *lc)
 static int cfg_queues(struct adapter *adap)
 {
        u32 avail_qsets, avail_eth_qsets, avail_uld_qsets;
+       u32 i, n10g = 0, qidx = 0, n1g = 0;
+       u32 ncpus = num_online_cpus();
        u32 niqflint, neq, num_ulds;
        struct sge *s = &adap->sge;
-       u32 i, n10g = 0, qidx = 0;
-#ifndef CONFIG_CHELSIO_T4_DCB
-       int q10g = 0;
-#endif
+       u32 q10g = 0, q1g;
 
        /* Reduce memory usage in kdump environment, disable all offload. */
        if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
@@ -5424,44 +5423,50 @@ static int cfg_queues(struct adapter *adap)
                n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
 
        avail_eth_qsets = min_t(u32, avail_qsets, MAX_ETH_QSETS);
+
+       /* We default to 1 queue per non-10G port and up to # of cores queues
+        * per 10G port.
+        */
+       if (n10g)
+               q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
+
+       n1g = adap->params.nports - n10g;
 #ifdef CONFIG_CHELSIO_T4_DCB
        /* For Data Center Bridging support we need to be able to support up
         * to 8 Traffic Priorities; each of which will be assigned to its
         * own TX Queue in order to prevent Head-Of-Line Blocking.
         */
+       q1g = 8;
        if (adap->params.nports * 8 > avail_eth_qsets) {
                dev_err(adap->pdev_dev, "DCB avail_eth_qsets=%d < %d!\n",
                        avail_eth_qsets, adap->params.nports * 8);
                return -ENOMEM;
        }
 
-       for_each_port(adap, i) {
-               struct port_info *pi = adap2pinfo(adap, i);
+       if (adap->params.nports * ncpus < avail_eth_qsets)
+               q10g = max(8U, ncpus);
+       else
+               q10g = max(8U, q10g);
 
-               pi->first_qset = qidx;
-               pi->nqsets = is_kdump_kernel() ? 1 : 8;
-               qidx += pi->nqsets;
-       }
-#else /* !CONFIG_CHELSIO_T4_DCB */
-       /* We default to 1 queue per non-10G port and up to # of cores queues
-        * per 10G port.
-        */
-       if (n10g)
-               q10g = (avail_eth_qsets - (adap->params.nports - n10g)) / n10g;
-       if (q10g > netif_get_num_default_rss_queues())
-               q10g = netif_get_num_default_rss_queues();
+       while ((q10g * n10g) > (avail_eth_qsets - n1g * q1g))
+               q10g--;
 
-       if (is_kdump_kernel())
+#else /* !CONFIG_CHELSIO_T4_DCB */
+       q1g = 1;
+       q10g = min(q10g, ncpus);
+#endif /* !CONFIG_CHELSIO_T4_DCB */
+       if (is_kdump_kernel()) {
                q10g = 1;
+               q1g = 1;
+       }
 
        for_each_port(adap, i) {
                struct port_info *pi = adap2pinfo(adap, i);
 
                pi->first_qset = qidx;
-               pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
+               pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : q1g;
                qidx += pi->nqsets;
        }
-#endif /* !CONFIG_CHELSIO_T4_DCB */
 
        s->ethqsets = qidx;
        s->max_ethqsets = qidx;   /* MSI-X may lower it later */
@@ -5473,7 +5478,7 @@ static int cfg_queues(struct adapter *adap)
                 * capped by the number of available cores.
                 */
                num_ulds = adap->num_uld + adap->num_ofld_uld;
-               i = min_t(u32, MAX_OFLD_QSETS, num_online_cpus());
+               i = min_t(u32, MAX_OFLD_QSETS, ncpus);
                avail_uld_qsets = roundup(i, adap->params.nports);
                if (avail_qsets < num_ulds * adap->params.nports) {
                        adap->params.offload = 0;
index fd93d54..ca74a68 100644 (file)
@@ -1,4 +1,5 @@
 /* Copyright 2008 - 2016 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -123,7 +124,22 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
 #define FSL_QMAN_MAX_OAL       127
 
 /* Default alignment for start of data in an Rx FD */
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+/* aligning data start to 64 avoids DMA transaction splits, unless the buffer
+ * is crossing a 4k page boundary
+ */
+#define DPAA_FD_DATA_ALIGNMENT  (fman_has_errata_a050385() ? 64 : 16)
+/* aligning to 256 avoids DMA transaction splits caused by 4k page boundary
+ * crossings; also, all SG fragments except the last must have a size multiple
+ * of 256 to avoid DMA transaction splits
+ */
+#define DPAA_A050385_ALIGN 256
+#define DPAA_FD_RX_DATA_ALIGNMENT (fman_has_errata_a050385() ? \
+                                  DPAA_A050385_ALIGN : 16)
+#else
 #define DPAA_FD_DATA_ALIGNMENT  16
+#define DPAA_FD_RX_DATA_ALIGNMENT DPAA_FD_DATA_ALIGNMENT
+#endif
 
 /* The DPAA requires 256 bytes reserved and mapped for the SGT */
 #define DPAA_SGT_SIZE 256
@@ -158,8 +174,13 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
 #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result)
 #define DPAA_TIME_STAMP_SIZE 8
 #define DPAA_HASH_RESULTS_SIZE 8
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\
+        + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE))
+#else
 #define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \
                                        dpaa_rx_extra_headroom)
+#endif
 
 #define DPAA_ETH_PCD_RXQ_NUM   128
 
@@ -180,7 +201,12 @@ static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
 
 #define DPAA_BP_RAW_SIZE 4096
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+#define dpaa_bp_size(raw_size) (SKB_WITH_OVERHEAD(raw_size) & \
+                               ~(DPAA_A050385_ALIGN - 1))
+#else
 #define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
+#endif
 
 static int dpaa_max_frm;
 
@@ -1192,7 +1218,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
        buf_prefix_content.pass_prs_result = true;
        buf_prefix_content.pass_hash_result = true;
        buf_prefix_content.pass_time_stamp = true;
-       buf_prefix_content.data_align = DPAA_FD_DATA_ALIGNMENT;
+       buf_prefix_content.data_align = DPAA_FD_RX_DATA_ALIGNMENT;
 
        rx_p = &params.specific_params.rx_params;
        rx_p->err_fqid = errq->fqid;
@@ -1662,6 +1688,8 @@ static u8 rx_csum_offload(const struct dpaa_priv *priv, const struct qm_fd *fd)
        return CHECKSUM_NONE;
 }
 
+#define PTR_IS_ALIGNED(x, a) (IS_ALIGNED((unsigned long)(x), (a)))
+
 /* Build a linear skb around the received buffer.
  * We are guaranteed there is enough room at the end of the data buffer to
  * accommodate the shared info area of the skb.
@@ -1733,8 +1761,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
 
                sg_addr = qm_sg_addr(&sgt[i]);
                sg_vaddr = phys_to_virt(sg_addr);
-               WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr,
-                                   SMP_CACHE_BYTES));
+               WARN_ON(!PTR_IS_ALIGNED(sg_vaddr, SMP_CACHE_BYTES));
 
                dma_unmap_page(priv->rx_dma_dev, sg_addr,
                               DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
@@ -2022,6 +2049,75 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
        return 0;
 }
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+int dpaa_a050385_wa(struct net_device *net_dev, struct sk_buff **s)
+{
+       struct dpaa_priv *priv = netdev_priv(net_dev);
+       struct sk_buff *new_skb, *skb = *s;
+       unsigned char *start, i;
+
+       /* check linear buffer alignment */
+       if (!PTR_IS_ALIGNED(skb->data, DPAA_A050385_ALIGN))
+               goto workaround;
+
+       /* linear buffers just need to have an aligned start */
+       if (!skb_is_nonlinear(skb))
+               return 0;
+
+       /* linear data size for nonlinear skbs needs to be aligned */
+       if (!IS_ALIGNED(skb_headlen(skb), DPAA_A050385_ALIGN))
+               goto workaround;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+               /* all fragments need to have aligned start addresses */
+               if (!IS_ALIGNED(skb_frag_off(frag), DPAA_A050385_ALIGN))
+                       goto workaround;
+
+               /* all but last fragment need to have aligned sizes */
+               if (!IS_ALIGNED(skb_frag_size(frag), DPAA_A050385_ALIGN) &&
+                   (i < skb_shinfo(skb)->nr_frags - 1))
+                       goto workaround;
+       }
+
+       return 0;
+
+workaround:
+       /* copy all the skb content into a new linear buffer */
+       new_skb = netdev_alloc_skb(net_dev, skb->len + DPAA_A050385_ALIGN - 1 +
+                                               priv->tx_headroom);
+       if (!new_skb)
+               return -ENOMEM;
+
+       /* NET_SKB_PAD bytes already reserved, adding up to tx_headroom */
+       skb_reserve(new_skb, priv->tx_headroom - NET_SKB_PAD);
+
+       /* Workaround for DPAA_A050385 requires data start to be aligned */
+       start = PTR_ALIGN(new_skb->data, DPAA_A050385_ALIGN);
+       if (start - new_skb->data != 0)
+               skb_reserve(new_skb, start - new_skb->data);
+
+       skb_put(new_skb, skb->len);
+       skb_copy_bits(skb, 0, new_skb->data, skb->len);
+       skb_copy_header(new_skb, skb);
+       new_skb->dev = skb->dev;
+
+       /* We move the headroom when we align it so we have to reset the
+        * network and transport header offsets relative to the new data
+        * pointer. The checksum offload relies on these offsets.
+        */
+       skb_set_network_header(new_skb, skb_network_offset(skb));
+       skb_set_transport_header(new_skb, skb_transport_offset(skb));
+
+       /* TODO: does timestamping need the result in the old skb? */
+       dev_kfree_skb(skb);
+       *s = new_skb;
+
+       return 0;
+}
+#endif
+
 static netdev_tx_t
 dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
 {
@@ -2068,6 +2164,14 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
                nonlinear = skb_is_nonlinear(skb);
        }
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+       if (unlikely(fman_has_errata_a050385())) {
+               if (dpaa_a050385_wa(net_dev, &skb))
+                       goto enomem;
+               nonlinear = skb_is_nonlinear(skb);
+       }
+#endif
+
        if (nonlinear) {
                /* Just create a S/G fd based on the skb */
                err = skb_to_sg_fd(priv, skb, &fd);
@@ -2741,9 +2845,7 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
        headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
                DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
 
-       return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
-                                             DPAA_FD_DATA_ALIGNMENT) :
-                                       headroom;
+       return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
 }
 
 static int dpaa_eth_probe(struct platform_device *pdev)
index 4432a59..23c5fef 100644 (file)
@@ -2529,15 +2529,15 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
                return -EINVAL;
        }
 
-       cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
+       cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
        if (cycle > 0xFFFF) {
                dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
                return -EINVAL;
        }
 
-       cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
+       cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
        if (cycle > 0xFFFF) {
-               dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
+               dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
                return -EINVAL;
        }
 
index 0139cb9..3415018 100644 (file)
@@ -8,3 +8,31 @@ config FSL_FMAN
        help
                Freescale Data-Path Acceleration Architecture Frame Manager
                (FMan) support
+
+config DPAA_ERRATUM_A050385
+       bool
+       depends on ARM64 && FSL_DPAA
+       default y
+       help
+               DPAA FMan erratum A050385 software workaround implementation:
+               align buffers, data start, SG fragment length to avoid FMan DMA
+               splits.
+               FMAN DMA read or writes under heavy traffic load may cause FMAN
+               internal resource leak thus stopping further packet processing.
+               The FMAN internal queue can overflow when FMAN splits single
+               read or write transactions into multiple smaller transactions
+               such that more than 17 AXI transactions are in flight from FMAN
+               to interconnect. When the FMAN internal queue overflows, it can
+               stall further packet processing. The issue can occur with any
+               one of the following three conditions:
+               1. FMAN AXI transaction crosses 4K address boundary (Errata
+               A010022)
+               2. FMAN DMA address for an AXI transaction is not 16 byte
+               aligned, i.e. the last 4 bits of an address are non-zero
+               3. Scatter Gather (SG) frames have more than one SG buffer in
+               the SG list and any one of the buffers, except the last
+               buffer in the SG list has data size that is not a multiple
+               of 16 bytes, i.e., other than 16, 32, 48, 64, etc.
+               With any one of the above three conditions present, there is
+               likelihood of stalled FMAN packet processing, especially under
+               stress with multiple ports injecting line-rate traffic.
index 934111d..f151d6e 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -566,6 +567,10 @@ struct fman_cfg {
        u32 qmi_def_tnums_thresh;
 };
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+static bool fman_has_err_a050385;
+#endif
+
 static irqreturn_t fman_exceptions(struct fman *fman,
                                   enum fman_exceptions exception)
 {
@@ -2518,6 +2523,14 @@ struct fman *fman_bind(struct device *fm_dev)
 }
 EXPORT_SYMBOL(fman_bind);
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+bool fman_has_errata_a050385(void)
+{
+       return fman_has_err_a050385;
+}
+EXPORT_SYMBOL(fman_has_errata_a050385);
+#endif
+
 static irqreturn_t fman_err_irq(int irq, void *handle)
 {
        struct fman *fman = (struct fman *)handle;
@@ -2845,6 +2858,11 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
                goto fman_free;
        }
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+       fman_has_err_a050385 =
+               of_property_read_bool(fm_node, "fsl,erratum-a050385");
+#endif
+
        return fman;
 
 fman_node_put:
index 935c317..f2ede13 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2020 NXP
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
@@ -398,6 +399,10 @@ u16 fman_get_max_frm(void);
 
 int fman_get_rx_extra_headroom(void);
 
+#ifdef CONFIG_DPAA_ERRATUM_A050385
+bool fman_has_errata_a050385(void);
+#endif
+
 struct fman *fman_bind(struct device *dev);
 
 #endif /* __FM_H */
index 1b03139..d87158a 100644 (file)
@@ -46,6 +46,7 @@ enum HCLGE_MBX_OPCODE {
        HCLGE_MBX_PUSH_VLAN_INFO,       /* (PF -> VF) push port base vlan */
        HCLGE_MBX_GET_MEDIA_TYPE,       /* (VF -> PF) get media type */
        HCLGE_MBX_PUSH_PROMISC_INFO,    /* (PF -> VF) push vf promisc info */
+       HCLGE_MBX_VF_UNINIT,            /* (VF -> PF) vf is unintializing */
 
        HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
        HCLGE_MBX_PUSH_LINK_STATUS,     /* (M7 -> PF) get port link status */
index acb796c..a7f40aa 100644 (file)
@@ -1711,7 +1711,7 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
        netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
 
        return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
-               kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
+               kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP;
 }
 
 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
index 492bc94..d3b0cd7 100644 (file)
@@ -2446,10 +2446,12 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
 
 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
 {
+       struct hclge_mac *mac = &hdev->hw.mac;
        int ret;
 
        duplex = hclge_check_speed_dup(duplex, speed);
-       if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
+       if (!mac->support_autoneg && mac->speed == speed &&
+           mac->duplex == duplex)
                return 0;
 
        ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
@@ -7743,16 +7745,27 @@ static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
        struct hclge_desc desc;
        int ret;
 
-       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, false);
-
+       /* read current vlan filter parameter */
+       hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
        req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
        req->vlan_type = vlan_type;
-       req->vlan_fe = filter_en ? fe_type : 0;
        req->vf_id = vf_id;
 
        ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+       if (ret) {
+               dev_err(&hdev->pdev->dev,
+                       "failed to get vlan filter config, ret = %d.\n", ret);
+               return ret;
+       }
+
+       /* modify and write new config parameter */
+       hclge_cmd_reuse_desc(&desc, false);
+       req->vlan_fe = filter_en ?
+                       (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
+
+       ret = hclge_cmd_send(&hdev->hw, &desc, 1);
        if (ret)
-               dev_err(&hdev->pdev->dev, "set vlan filter fail, ret =%d.\n",
+               dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
                        ret);
 
        return ret;
@@ -8270,6 +8283,7 @@ void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
                        kfree(vlan);
                }
        }
+       clear_bit(vport->vport_id, hdev->vf_vlan_full);
 }
 
 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
@@ -8486,6 +8500,28 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
        }
 }
 
+static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
+{
+       struct hclge_vlan_info *vlan_info;
+       struct hclge_vport *vport;
+       int ret;
+       int vf;
+
+       /* clear port base vlan for all vf */
+       for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
+               vport = &hdev->vport[vf];
+               vlan_info = &vport->port_base_vlan_cfg.vlan_info;
+
+               ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+                                              vport->vport_id,
+                                              vlan_info->vlan_tag, true);
+               if (ret)
+                       dev_err(&hdev->pdev->dev,
+                               "failed to clear vf vlan for vf%d, ret = %d\n",
+                               vf - HCLGE_VF_VPORT_START_NUM, ret);
+       }
+}
+
 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
                          u16 vlan_id, bool is_kill)
 {
@@ -9895,6 +9931,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
        struct hclge_mac *mac = &hdev->hw.mac;
 
        hclge_reset_vf_rate(hdev);
+       hclge_clear_vf_vlan(hdev);
        hclge_misc_affinity_teardown(hdev);
        hclge_state_uninit(hdev);
 
index a3c0822..3d850f6 100644 (file)
@@ -799,6 +799,7 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
                        hclge_get_link_mode(vport, req);
                        break;
                case HCLGE_MBX_GET_VF_FLR_STATUS:
+               case HCLGE_MBX_VF_UNINIT:
                        hclge_rm_vport_all_mac_table(vport, true,
                                                     HCLGE_MAC_ADDR_UC);
                        hclge_rm_vport_all_mac_table(vport, true,
index d659720..0510d85 100644 (file)
@@ -2803,6 +2803,9 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
 {
        hclgevf_state_uninit(hdev);
 
+       hclgevf_send_mbx_msg(hdev, HCLGE_MBX_VF_UNINIT, 0, NULL, 0,
+                            false, NULL, 0);
+
        if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
                hclgevf_misc_irq_uninit(hdev);
                hclgevf_uninit_msi(hdev);
index c75239d..4bd3324 100644 (file)
@@ -2142,6 +2142,8 @@ static void __ibmvnic_reset(struct work_struct *work)
 {
        struct ibmvnic_rwi *rwi;
        struct ibmvnic_adapter *adapter;
+       bool saved_state = false;
+       unsigned long flags;
        u32 reset_state;
        int rc = 0;
 
@@ -2153,17 +2155,25 @@ static void __ibmvnic_reset(struct work_struct *work)
                return;
        }
 
-       reset_state = adapter->state;
-
        rwi = get_next_rwi(adapter);
        while (rwi) {
+               spin_lock_irqsave(&adapter->state_lock, flags);
+
                if (adapter->state == VNIC_REMOVING ||
                    adapter->state == VNIC_REMOVED) {
+                       spin_unlock_irqrestore(&adapter->state_lock, flags);
                        kfree(rwi);
                        rc = EBUSY;
                        break;
                }
 
+               if (!saved_state) {
+                       reset_state = adapter->state;
+                       adapter->state = VNIC_RESETTING;
+                       saved_state = true;
+               }
+               spin_unlock_irqrestore(&adapter->state_lock, flags);
+
                if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
                        /* CHANGE_PARAM requestor holds rtnl_lock */
                        rc = do_change_param_reset(adapter, rwi, reset_state);
@@ -5091,6 +5101,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
                          __ibmvnic_delayed_reset);
        INIT_LIST_HEAD(&adapter->rwi_list);
        spin_lock_init(&adapter->rwi_lock);
+       spin_lock_init(&adapter->state_lock);
        mutex_init(&adapter->fw_lock);
        init_completion(&adapter->init_done);
        init_completion(&adapter->fw_done);
@@ -5163,8 +5174,17 @@ static int ibmvnic_remove(struct vio_dev *dev)
 {
        struct net_device *netdev = dev_get_drvdata(&dev->dev);
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+       unsigned long flags;
+
+       spin_lock_irqsave(&adapter->state_lock, flags);
+       if (adapter->state == VNIC_RESETTING) {
+               spin_unlock_irqrestore(&adapter->state_lock, flags);
+               return -EBUSY;
+       }
 
        adapter->state = VNIC_REMOVING;
+       spin_unlock_irqrestore(&adapter->state_lock, flags);
+
        rtnl_lock();
        unregister_netdevice(netdev);
 
index 60eccaf..f8416e1 100644 (file)
@@ -941,7 +941,8 @@ enum vnic_state {VNIC_PROBING = 1,
                 VNIC_CLOSING,
                 VNIC_CLOSED,
                 VNIC_REMOVING,
-                VNIC_REMOVED};
+                VNIC_REMOVED,
+                VNIC_RESETTING};
 
 enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
                           VNIC_RESET_MOBILITY,
@@ -1090,4 +1091,7 @@ struct ibmvnic_adapter {
 
        struct ibmvnic_tunables desired;
        struct ibmvnic_tunables fallback;
+
+       /* Used for serializatin of state field */
+       spinlock_t state_lock;
 };
index db4ea58..0f02c7a 100644 (file)
@@ -3280,10 +3280,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
 
                dev_info(&adapter->pdev->dev,
                         "Some CPU C-states have been disabled in order to enable jumbo frames\n");
-               pm_qos_update_request(&adapter->pm_qos_req, lat);
+               cpu_latency_qos_update_request(&adapter->pm_qos_req, lat);
        } else {
-               pm_qos_update_request(&adapter->pm_qos_req,
-                                     PM_QOS_DEFAULT_VALUE);
+               cpu_latency_qos_update_request(&adapter->pm_qos_req,
+                                              PM_QOS_DEFAULT_VALUE);
        }
 
        /* Enable Receives */
@@ -4636,8 +4636,7 @@ int e1000e_open(struct net_device *netdev)
                e1000_update_mng_vlan(adapter);
 
        /* DMA latency requirement to workaround jumbo issue */
-       pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
-                          PM_QOS_DEFAULT_VALUE);
+       cpu_latency_qos_add_request(&adapter->pm_qos_req, PM_QOS_DEFAULT_VALUE);
 
        /* before we allocate an interrupt, we must be ready to handle it.
         * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -4679,7 +4678,7 @@ int e1000e_open(struct net_device *netdev)
        return 0;
 
 err_req_irq:
-       pm_qos_remove_request(&adapter->pm_qos_req);
+       cpu_latency_qos_remove_request(&adapter->pm_qos_req);
        e1000e_release_hw_control(adapter);
        e1000_power_down_phy(adapter);
        e1000e_free_rx_resources(adapter->rx_ring);
@@ -4743,7 +4742,7 @@ int e1000e_close(struct net_device *netdev)
            !test_bit(__E1000_TESTING, &adapter->state))
                e1000e_release_hw_control(adapter);
 
-       pm_qos_remove_request(&adapter->pm_qos_req);
+       cpu_latency_qos_remove_request(&adapter->pm_qos_req);
 
        pm_runtime_put_sync(&pdev->dev);
 
index 0b9e851..d2e2dc5 100644 (file)
@@ -347,7 +347,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
        }
 
 
-       dev->err_interrupt = platform_get_irq(pdev, 0);
+       dev->err_interrupt = platform_get_irq_optional(pdev, 0);
        if (dev->err_interrupt > 0 &&
            resource_size(r) < MVMDIO_ERR_INT_MASK + 4) {
                dev_err(&pdev->dev,
@@ -364,8 +364,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
                writel(MVMDIO_ERR_INT_SMI_DONE,
                        dev->regs + MVMDIO_ERR_INT_MASK);
 
-       } else if (dev->err_interrupt == -EPROBE_DEFER) {
-               ret = -EPROBE_DEFER;
+       } else if (dev->err_interrupt < 0) {
+               ret = dev->err_interrupt;
                goto out_mdio;
        }
 
index 86d543a..d3b7373 100644 (file)
@@ -2176,24 +2176,29 @@ static int ocelot_init_timestamp(struct ocelot *ocelot)
        return 0;
 }
 
-static void ocelot_port_set_mtu(struct ocelot *ocelot, int port, size_t mtu)
+/* Configure the maximum SDU (L2 payload) on RX to the value specified in @sdu.
+ * The length of VLAN tags is accounted for automatically via DEV_MAC_TAGS_CFG.
+ */
+static void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu)
 {
        struct ocelot_port *ocelot_port = ocelot->ports[port];
+       int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN;
        int atop_wm;
 
-       ocelot_port_writel(ocelot_port, mtu, DEV_MAC_MAXLEN_CFG);
+       ocelot_port_writel(ocelot_port, maxlen, DEV_MAC_MAXLEN_CFG);
 
        /* Set Pause WM hysteresis
-        * 152 = 6 * mtu / OCELOT_BUFFER_CELL_SZ
-        * 101 = 4 * mtu / OCELOT_BUFFER_CELL_SZ
+        * 152 = 6 * maxlen / OCELOT_BUFFER_CELL_SZ
+        * 101 = 4 * maxlen / OCELOT_BUFFER_CELL_SZ
         */
        ocelot_write_rix(ocelot, SYS_PAUSE_CFG_PAUSE_ENA |
                         SYS_PAUSE_CFG_PAUSE_STOP(101) |
                         SYS_PAUSE_CFG_PAUSE_START(152), SYS_PAUSE_CFG, port);
 
        /* Tail dropping watermark */
-       atop_wm = (ocelot->shared_queue_sz - 9 * mtu) / OCELOT_BUFFER_CELL_SZ;
-       ocelot_write_rix(ocelot, ocelot_wm_enc(9 * mtu),
+       atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) /
+                  OCELOT_BUFFER_CELL_SZ;
+       ocelot_write_rix(ocelot, ocelot_wm_enc(9 * maxlen),
                         SYS_ATOP, port);
        ocelot_write(ocelot, ocelot_wm_enc(atop_wm), SYS_ATOP_TOT_CFG);
 }
@@ -2222,9 +2227,10 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
                           DEV_MAC_HDX_CFG);
 
        /* Set Max Length and maximum tags allowed */
-       ocelot_port_set_mtu(ocelot, port, VLAN_ETH_FRAME_LEN);
+       ocelot_port_set_maxlen(ocelot, port, ETH_DATA_LEN);
        ocelot_port_writel(ocelot_port, DEV_MAC_TAGS_CFG_TAG_ID(ETH_P_8021AD) |
                           DEV_MAC_TAGS_CFG_VLAN_AWR_ENA |
+                          DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA |
                           DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA,
                           DEV_MAC_TAGS_CFG);
 
@@ -2310,18 +2316,18 @@ void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
         * Only one port can be an NPI at the same time.
         */
        if (cpu < ocelot->num_phys_ports) {
-               int mtu = VLAN_ETH_FRAME_LEN + OCELOT_TAG_LEN;
+               int sdu = ETH_DATA_LEN + OCELOT_TAG_LEN;
 
                ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
                             QSYS_EXT_CPU_CFG_EXT_CPU_PORT(cpu),
                             QSYS_EXT_CPU_CFG);
 
                if (injection == OCELOT_TAG_PREFIX_SHORT)
-                       mtu += OCELOT_SHORT_PREFIX_LEN;
+                       sdu += OCELOT_SHORT_PREFIX_LEN;
                else if (injection == OCELOT_TAG_PREFIX_LONG)
-                       mtu += OCELOT_LONG_PREFIX_LEN;
+                       sdu += OCELOT_LONG_PREFIX_LEN;
 
-               ocelot_port_set_mtu(ocelot, cpu, mtu);
+               ocelot_port_set_maxlen(ocelot, cpu, sdu);
        }
 
        /* CPU port Injection/Extraction configuration */
index 191271f..c2f5b69 100644 (file)
@@ -1688,7 +1688,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
                return -EINVAL;
 
-       down_read(&ionic->vf_op_lock);
+       down_write(&ionic->vf_op_lock);
 
        if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
                ret = -EINVAL;
@@ -1698,7 +1698,7 @@ static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
                        ether_addr_copy(ionic->vfs[vf].macaddr, mac);
        }
 
-       up_read(&ionic->vf_op_lock);
+       up_write(&ionic->vf_op_lock);
        return ret;
 }
 
@@ -1719,7 +1719,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
        if (proto != htons(ETH_P_8021Q))
                return -EPROTONOSUPPORT;
 
-       down_read(&ionic->vf_op_lock);
+       down_write(&ionic->vf_op_lock);
 
        if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
                ret = -EINVAL;
@@ -1730,7 +1730,7 @@ static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
                        ionic->vfs[vf].vlanid = vlan;
        }
 
-       up_read(&ionic->vf_op_lock);
+       up_write(&ionic->vf_op_lock);
        return ret;
 }
 
index c705743..2cc8184 100644 (file)
@@ -2277,7 +2277,7 @@ static int __init sxgbe_cmdline_opt(char *str)
        if (!str || !*str)
                return -EINVAL;
        while ((opt = strsep(&str, ",")) != NULL) {
-               if (!strncmp(opt, "eee_timer:", 6)) {
+               if (!strncmp(opt, "eee_timer:", 10)) {
                        if (kstrtoint(opt + 10, 0, &eee_timer))
                                goto err;
                }
index 52113b7..3f16bd8 100644 (file)
@@ -2853,11 +2853,24 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
        }
 
        /* Transmit timestamps are only available for 8XXX series. They result
-        * in three events per packet. These occur in order, and are:
-        *  - the normal completion event
+        * in up to three events per packet. These occur in order, and are:
+        *  - the normal completion event (may be omitted)
         *  - the low part of the timestamp
         *  - the high part of the timestamp
         *
+        * It's possible for multiple completion events to appear before the
+        * corresponding timestamps. So we can for example get:
+        *  COMP N
+        *  COMP N+1
+        *  TS_LO N
+        *  TS_HI N
+        *  TS_LO N+1
+        *  TS_HI N+1
+        *
+        * In addition it's also possible for the adjacent completions to be
+        * merged, so we may not see COMP N above. As such, the completion
+        * events are not very useful here.
+        *
         * Each part of the timestamp is itself split across two 16 bit
         * fields in the event.
         */
@@ -2865,17 +2878,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
 
        switch (tx_ev_type) {
        case TX_TIMESTAMP_EVENT_TX_EV_COMPLETION:
-               /* In case of Queue flush or FLR, we might have received
-                * the previous TX completion event but not the Timestamp
-                * events.
-                */
-               if (tx_queue->completed_desc_ptr != tx_queue->ptr_mask)
-                       efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
-
-               tx_ev_desc_ptr = EFX_QWORD_FIELD(*event,
-                                                ESF_DZ_TX_DESCR_INDX);
-               tx_queue->completed_desc_ptr =
-                                       tx_ev_desc_ptr & tx_queue->ptr_mask;
+               /* Ignore this event - see above. */
                break;
 
        case TX_TIMESTAMP_EVENT_TX_EV_TSTAMP_LO:
@@ -2887,8 +2890,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
                ts_part = efx_ef10_extract_event_ts(event);
                tx_queue->completed_timestamp_major = ts_part;
 
-               efx_xmit_done(tx_queue, tx_queue->completed_desc_ptr);
-               tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
+               efx_xmit_done_single(tx_queue);
                break;
 
        default:
index f1bdb04..95395d6 100644 (file)
@@ -20,6 +20,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
                                struct net_device *net_dev);
 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue);
 int efx_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
                 void *type_data);
 extern unsigned int efx_piobuf_size;
index aeb5e8a..73d4e39 100644 (file)
@@ -583,6 +583,7 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
                if (tx_queue->channel)
                        tx_queue->channel = channel;
                tx_queue->buffer = NULL;
+               tx_queue->cb_page = NULL;
                memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
        }
 
index 9f9886f..8164f0e 100644 (file)
@@ -208,8 +208,6 @@ struct efx_tx_buffer {
  *     avoid cache-line ping-pong between the xmit path and the
  *     completion path.
  * @merge_events: Number of TX merged completion events
- * @completed_desc_ptr: Most recent completed pointer - only used with
- *      timestamping.
  * @completed_timestamp_major: Top part of the most recent tx timestamp.
  * @completed_timestamp_minor: Low part of the most recent tx timestamp.
  * @insert_count: Current insert pointer
@@ -269,7 +267,6 @@ struct efx_tx_queue {
        unsigned int merge_events;
        unsigned int bytes_compl;
        unsigned int pkts_compl;
-       unsigned int completed_desc_ptr;
        u32 completed_timestamp_major;
        u32 completed_timestamp_minor;
 
index 04d7f41..8aafc54 100644 (file)
@@ -535,6 +535,44 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
        return efx_enqueue_skb(tx_queue, skb);
 }
 
+void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
+{
+       unsigned int pkts_compl = 0, bytes_compl = 0;
+       unsigned int read_ptr;
+       bool finished = false;
+
+       read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+
+       while (!finished) {
+               struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+
+               if (!efx_tx_buffer_in_use(buffer)) {
+                       struct efx_nic *efx = tx_queue->efx;
+
+                       netif_err(efx, hw, efx->net_dev,
+                                 "TX queue %d spurious single TX completion\n",
+                                 tx_queue->queue);
+                       efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+                       return;
+               }
+
+               /* Need to check the flag before dequeueing. */
+               if (buffer->flags & EFX_TX_BUF_SKB)
+                       finished = true;
+               efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
+
+               ++tx_queue->read_count;
+               read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
+       }
+
+       tx_queue->pkts_compl += pkts_compl;
+       tx_queue->bytes_compl += bytes_compl;
+
+       EFX_WARN_ON_PARANOID(pkts_compl != 1);
+
+       efx_xmit_done_check_empty(tx_queue);
+}
+
 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
 {
        struct efx_nic *efx = tx_queue->efx;
index b1571e9..70876df 100644 (file)
@@ -80,7 +80,6 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
        tx_queue->xmit_more_available = false;
        tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
                                  tx_queue->channel == efx_ptp_channel(efx));
-       tx_queue->completed_desc_ptr = tx_queue->ptr_mask;
        tx_queue->completed_timestamp_major = 0;
        tx_queue->completed_timestamp_minor = 0;
 
@@ -210,10 +209,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
        while (read_ptr != stop_index) {
                struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
 
-               if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
-                   unlikely(buffer->len == 0)) {
+               if (!efx_tx_buffer_in_use(buffer)) {
                        netif_err(efx, tx_err, efx->net_dev,
-                                 "TX queue %d spurious TX completion id %x\n",
+                                 "TX queue %d spurious TX completion id %d\n",
                                  tx_queue->queue, read_ptr);
                        efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
                        return;
@@ -226,6 +224,19 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
        }
 }
 
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
+{
+       if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+               tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
+               if (tx_queue->read_count == tx_queue->old_write_count) {
+                       /* Ensure that read_count is flushed. */
+                       smp_mb();
+                       tx_queue->empty_read_count =
+                               tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
+               }
+       }
+}
+
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
 {
        unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
@@ -256,15 +267,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
                        netif_tx_wake_queue(tx_queue->core_txq);
        }
 
-       /* Check whether the hardware queue is now empty */
-       if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
-               tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
-               if (tx_queue->read_count == tx_queue->old_write_count) {
-                       smp_mb();
-                       tx_queue->empty_read_count =
-                               tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
-               }
-       }
+       efx_xmit_done_check_empty(tx_queue);
 }
 
 /* Remove buffers put into a tx_queue for the current packet.
index f92f1fe..99cf7ce 100644 (file)
@@ -21,6 +21,12 @@ void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
                        unsigned int *pkts_compl,
                        unsigned int *bytes_compl);
 
+static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
+{
+       return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION);
+}
+
+void efx_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
 
 void efx_enqueue_unwind(struct efx_tx_queue *tx_queue,
index d0356fb..5427843 100644 (file)
@@ -24,6 +24,7 @@
 static void dwmac1000_core_init(struct mac_device_info *hw,
                                struct net_device *dev)
 {
+       struct stmmac_priv *priv = netdev_priv(dev);
        void __iomem *ioaddr = hw->pcsr;
        u32 value = readl(ioaddr + GMAC_CONTROL);
        int mtu = dev->mtu;
@@ -35,7 +36,7 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
         * Broadcom tags can look like invalid LLC/SNAP packets and cause the
         * hardware to truncate packets on reception.
         */
-       if (netdev_uses_dsa(dev))
+       if (netdev_uses_dsa(dev) || !priv->plat->enh_desc)
                value &= ~GMAC_CONTROL_ACS;
 
        if (mtu > 1500)
index 30cd0c4..8801d09 100644 (file)
@@ -293,6 +293,7 @@ void ipvlan_process_multicast(struct work_struct *work)
                }
                if (dev)
                        dev_put(dev);
+               cond_resched();
        }
 }
 
@@ -498,19 +499,21 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
        struct ethhdr *ethh = eth_hdr(skb);
        int ret = NET_XMIT_DROP;
 
-       /* In this mode we dont care about multicast and broadcast traffic */
-       if (is_multicast_ether_addr(ethh->h_dest)) {
-               pr_debug_ratelimited("Dropped {multi|broad}cast of type=[%x]\n",
-                                    ntohs(skb->protocol));
-               kfree_skb(skb);
-               goto out;
-       }
-
        /* The ipvlan is a pseudo-L2 device, so the packets that we receive
         * will have L2; which need to discarded and processed further
         * in the net-ns of the main-device.
         */
        if (skb_mac_header_was_set(skb)) {
+               /* In this mode we dont care about
+                * multicast and broadcast traffic */
+               if (is_multicast_ether_addr(ethh->h_dest)) {
+                       pr_debug_ratelimited(
+                               "Dropped {multi|broad}cast of type=[%x]\n",
+                               ntohs(skb->protocol));
+                       kfree_skb(skb);
+                       goto out;
+               }
+
                skb_pull(skb, sizeof(*ethh));
                skb->mac_header = (typeof(skb->mac_header))~0U;
                skb_reset_network_header(skb);
index a706622..f195f27 100644 (file)
@@ -164,7 +164,6 @@ static void ipvlan_uninit(struct net_device *dev)
 static int ipvlan_open(struct net_device *dev)
 {
        struct ipvl_dev *ipvlan = netdev_priv(dev);
-       struct net_device *phy_dev = ipvlan->phy_dev;
        struct ipvl_addr *addr;
 
        if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
@@ -178,7 +177,7 @@ static int ipvlan_open(struct net_device *dev)
                ipvlan_ht_addr_add(ipvlan, addr);
        rcu_read_unlock();
 
-       return dev_uc_add(phy_dev, phy_dev->dev_addr);
+       return 0;
 }
 
 static int ipvlan_stop(struct net_device *dev)
@@ -190,8 +189,6 @@ static int ipvlan_stop(struct net_device *dev)
        dev_uc_unsync(phy_dev, dev);
        dev_mc_unsync(phy_dev, dev);
 
-       dev_uc_del(phy_dev, phy_dev->dev_addr);
-
        rcu_read_lock();
        list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
                ipvlan_ht_addr_del(addr);
index 45bfd99..6ec6fc1 100644 (file)
@@ -424,6 +424,11 @@ static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
        return (struct macsec_eth_header *)skb_mac_header(skb);
 }
 
+static sci_t dev_to_sci(struct net_device *dev, __be16 port)
+{
+       return make_sci(dev->dev_addr, port);
+}
+
 static void __macsec_pn_wrapped(struct macsec_secy *secy,
                                struct macsec_tx_sa *tx_sa)
 {
@@ -3268,6 +3273,20 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
 
 out:
        ether_addr_copy(dev->dev_addr, addr->sa_data);
+       macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
+
+       /* If h/w offloading is available, propagate to the device */
+       if (macsec_is_offloaded(macsec)) {
+               const struct macsec_ops *ops;
+               struct macsec_context ctx;
+
+               ops = macsec_get_ops(macsec, &ctx);
+               if (ops) {
+                       ctx.secy = &macsec->secy;
+                       macsec_offload(ops->mdo_upd_secy, &ctx);
+               }
+       }
+
        return 0;
 }
 
@@ -3342,6 +3361,7 @@ static const struct device_type macsec_type = {
 
 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
        [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
+       [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
        [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
        [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
        [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
@@ -3592,11 +3612,6 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
        return false;
 }
 
-static sci_t dev_to_sci(struct net_device *dev, __be16 port)
-{
-       return make_sci(dev->dev_addr, port);
-}
-
 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
 {
        struct macsec_dev *macsec = macsec_priv(dev);
index 81aa7ad..e7289d6 100644 (file)
@@ -334,6 +334,8 @@ static void macvlan_process_broadcast(struct work_struct *w)
                if (src)
                        dev_put(src->dev);
                consume_skb(skb);
+
+               cond_resched();
        }
 }
 
index 23f1958..459fb20 100644 (file)
@@ -73,6 +73,7 @@ static struct phy_driver bcm63xx_driver[] = {
        /* same phy as above, with just a different OUI */
        .phy_id         = 0x002bdc00,
        .phy_id_mask    = 0xfffffc00,
+       .name           = "Broadcom BCM63XX (2)",
        /* PHY_BASIC_FEATURES */
        .flags          = PHY_IS_INTERNAL,
        .config_init    = bcm63xx_config_init,
index d76e038..355bfde 100644 (file)
@@ -727,7 +727,8 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
                phy_trigger_machine(phydev);
        }
 
-       if (phy_clear_interrupt(phydev))
+       /* did_interrupt() may have cleared the interrupt already */
+       if (!phydev->drv->did_interrupt && phy_clear_interrupt(phydev))
                goto phy_err;
        return IRQ_HANDLED;
 
index c8b0c34..28e3c5c 100644 (file)
@@ -286,6 +286,8 @@ static int mdio_bus_phy_suspend(struct device *dev)
        if (!mdio_bus_phy_may_suspend(phydev))
                return 0;
 
+       phydev->suspended_by_mdio_bus = 1;
+
        return phy_suspend(phydev);
 }
 
@@ -294,9 +296,11 @@ static int mdio_bus_phy_resume(struct device *dev)
        struct phy_device *phydev = to_phy_device(dev);
        int ret;
 
-       if (!mdio_bus_phy_may_suspend(phydev))
+       if (!phydev->suspended_by_mdio_bus)
                goto no_resume;
 
+       phydev->suspended_by_mdio_bus = 0;
+
        ret = phy_resume(phydev);
        if (ret < 0)
                return ret;
index 70b9a14..6e66b8e 100644 (file)
@@ -761,8 +761,14 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
                config.interface = interface;
 
        ret = phylink_validate(pl, supported, &config);
-       if (ret)
+       if (ret) {
+               phylink_warn(pl, "validation of %s with support %*pb and advertisement %*pb failed: %d\n",
+                            phy_modes(config.interface),
+                            __ETHTOOL_LINK_MODE_MASK_NBITS, phy->supported,
+                            __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising,
+                            ret);
                return ret;
+       }
 
        phy->phylink = pl;
        phy->phy_link_change = phylink_phy_change;
index 58a69f8..f78ceba 100644 (file)
@@ -232,7 +232,7 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
        struct cstate *cs = lcs->next;
        unsigned long deltaS, deltaA;
        short changes = 0;
-       int hlen;
+       int nlen, hlen;
        unsigned char new_seq[16];
        unsigned char *cp = new_seq;
        struct iphdr *ip;
@@ -248,6 +248,8 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
                return isize;
 
        ip = (struct iphdr *) icp;
+       if (ip->version != 4 || ip->ihl < 5)
+               return isize;
 
        /* Bail if this packet isn't TCP, or is an IP fragment */
        if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) {
@@ -258,10 +260,14 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
                        comp->sls_o_tcp++;
                return isize;
        }
-       /* Extract TCP header */
+       nlen = ip->ihl * 4;
+       if (isize < nlen + sizeof(*th))
+               return isize;
 
-       th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4);
-       hlen = ip->ihl*4 + th->doff*4;
+       th = (struct tcphdr *)(icp + nlen);
+       if (th->doff < sizeof(struct tcphdr) / 4)
+               return isize;
+       hlen = nlen + th->doff * 4;
 
        /*  Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or
         *  some other control bit is set). Also uncompressible if
index ca70a1d..4004f98 100644 (file)
@@ -2240,6 +2240,8 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
        [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
        [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
        [TEAM_ATTR_OPTION_DATA]                 = { .type = NLA_BINARY },
+       [TEAM_ATTR_OPTION_PORT_IFINDEX]         = { .type = NLA_U32 },
+       [TEAM_ATTR_OPTION_ARRAY_INDEX]          = { .type = NLA_U32 },
 };
 
 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
index 78ddbaf..95b19ce 100644 (file)
@@ -3221,6 +3221,8 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
                }
 
                msleep(20);
+               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+                       break;
        }
 
        return data;
@@ -5402,7 +5404,10 @@ static void r8153_init(struct r8152 *tp)
                if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
                    AUTOLOAD_DONE)
                        break;
+
                msleep(20);
+               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+                       break;
        }
 
        data = r8153_phy_status(tp, 0);
@@ -5539,7 +5544,10 @@ static void r8153b_init(struct r8152 *tp)
                if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
                    AUTOLOAD_DONE)
                        break;
+
                msleep(20);
+               if (test_bit(RTL8152_UNPLUG, &tp->flags))
+                       break;
        }
 
        data = r8153_phy_status(tp, 0);
index 8cdc441..d4cbb9e 100644 (file)
@@ -328,7 +328,7 @@ static void veth_get_stats64(struct net_device *dev,
        rcu_read_lock();
        peer = rcu_dereference(priv->peer);
        if (peer) {
-               tot->rx_dropped += veth_stats_tx(peer, &packets, &bytes);
+               veth_stats_tx(peer, &packets, &bytes);
                tot->rx_bytes += bytes;
                tot->rx_packets += packets;
 
index 5ec16ce..a202a4e 100644 (file)
@@ -1052,11 +1052,11 @@ static int ath10k_download_fw(struct ath10k *ar)
        }
 
        memset(&latency_qos, 0, sizeof(latency_qos));
-       pm_qos_add_request(&latency_qos, PM_QOS_CPU_DMA_LATENCY, 0);
+       cpu_latency_qos_add_request(&latency_qos, 0);
 
        ret = ath10k_bmi_fast_download(ar, address, data, data_len);
 
-       pm_qos_remove_request(&latency_qos);
+       cpu_latency_qos_remove_request(&latency_qos);
 
        return ret;
 }
index 536cd72..5dfcce7 100644 (file)
@@ -1730,7 +1730,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
        /* the ipw2100 hardware really doesn't want power management delays
         * longer than 175usec
         */
-       pm_qos_update_request(&ipw2100_pm_qos_req, 175);
+       cpu_latency_qos_update_request(&ipw2100_pm_qos_req, 175);
 
        /* If the interrupt is enabled, turn it off... */
        spin_lock_irqsave(&priv->low_lock, flags);
@@ -1875,7 +1875,8 @@ static void ipw2100_down(struct ipw2100_priv *priv)
        ipw2100_disable_interrupts(priv);
        spin_unlock_irqrestore(&priv->low_lock, flags);
 
-       pm_qos_update_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE);
+       cpu_latency_qos_update_request(&ipw2100_pm_qos_req,
+                                      PM_QOS_DEFAULT_VALUE);
 
        /* We have to signal any supplicant if we are disassociating */
        if (associated)
@@ -6566,8 +6567,7 @@ static int __init ipw2100_init(void)
        printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
        printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT);
 
-       pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
-                          PM_QOS_DEFAULT_VALUE);
+       cpu_latency_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE);
 
        ret = pci_register_driver(&ipw2100_pci_driver);
        if (ret)
@@ -6594,7 +6594,7 @@ static void __exit ipw2100_exit(void)
                           &driver_attr_debug_level);
 #endif
        pci_unregister_driver(&ipw2100_pci_driver);
-       pm_qos_remove_request(&ipw2100_pm_qos_req);
+       cpu_latency_qos_remove_request(&ipw2100_pm_qos_req);
 }
 
 module_init(ipw2100_init);
index 70b29bf..60296a7 100644 (file)
@@ -308,7 +308,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
                }
 
                /* PHY_SKU section is mandatory in B0 */
-               if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
+               if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT &&
+                   !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
                        IWL_ERR(mvm,
                                "Can't parse phy_sku in B0, empty sections\n");
                        return NULL;
index 6173c80..1847f55 100644 (file)
@@ -447,10 +447,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
        struct page *page = virt_to_head_page(data);
        int offset = data - page_address(page);
        struct sk_buff *skb = q->rx_head;
+       struct skb_shared_info *shinfo = skb_shinfo(skb);
 
-       offset += q->buf_offset;
-       skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
-                       q->buf_size);
+       if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
+               offset += q->buf_offset;
+               skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
+                               q->buf_size);
+       }
 
        if (more)
                return;
index 3e85c5c..0fe08c4 100644 (file)
@@ -850,9 +850,11 @@ out_free_tagset:
        if (new)
                blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
 out_free_async_qe:
-       nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
-               sizeof(struct nvme_command), DMA_TO_DEVICE);
-       ctrl->async_event_sqe.data = NULL;
+       if (ctrl->async_event_sqe.data) {
+               nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+                       sizeof(struct nvme_command), DMA_TO_DEVICE);
+               ctrl->async_event_sqe.data = NULL;
+       }
 out_free_queue:
        nvme_rdma_free_queue(&ctrl->queues[0]);
        return error;
index af674fc..5bb5342 100644 (file)
@@ -515,7 +515,7 @@ static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
        return 1;
 }
 
-static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
+static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
 {
        struct nvmet_tcp_queue *queue = cmd->queue;
        int ret;
@@ -523,9 +523,15 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
        while (cmd->cur_sg) {
                struct page *page = sg_page(cmd->cur_sg);
                u32 left = cmd->cur_sg->length - cmd->offset;
+               int flags = MSG_DONTWAIT;
+
+               if ((!last_in_batch && cmd->queue->send_list_len) ||
+                   cmd->wbytes_done + left < cmd->req.transfer_len ||
+                   queue->data_digest || !queue->nvme_sq.sqhd_disabled)
+                       flags |= MSG_MORE;
 
                ret = kernel_sendpage(cmd->queue->sock, page, cmd->offset,
-                                       left, MSG_DONTWAIT | MSG_MORE);
+                                       left, flags);
                if (ret <= 0)
                        return ret;
 
@@ -660,7 +666,7 @@ static int nvmet_tcp_try_send_one(struct nvmet_tcp_queue *queue,
        }
 
        if (cmd->state == NVMET_TCP_SEND_DATA) {
-               ret = nvmet_try_send_data(cmd);
+               ret = nvmet_try_send_data(cmd, last_in_batch);
                if (ret <= 0)
                        goto done_send;
        }
index 8270bbf..9f982c0 100644 (file)
@@ -306,6 +306,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
                                rc = of_mdiobus_register_phy(mdio, child, addr);
                                if (rc && rc != -ENODEV)
                                        goto unregister;
+                               break;
                        }
                }
        }
index 7b6409e..dce2626 100644 (file)
@@ -1073,13 +1073,26 @@ static int madera_pin_probe(struct platform_device *pdev)
                return ret;
        }
 
+       platform_set_drvdata(pdev, priv);
+
        dev_dbg(priv->dev, "pinctrl probed ok\n");
 
        return 0;
 }
 
+static int madera_pin_remove(struct platform_device *pdev)
+{
+       struct madera_pin_private *priv = platform_get_drvdata(pdev);
+
+       if (priv->madera->pdata.gpio_configs)
+               pinctrl_unregister_mappings(priv->madera->pdata.gpio_configs);
+
+       return 0;
+}
+
 static struct platform_driver madera_pin_driver = {
        .probe = madera_pin_probe,
+       .remove = madera_pin_remove,
        .driver = {
                .name = "madera-pinctrl",
        },
index 446d84f..f23c55e 100644 (file)
@@ -2021,7 +2021,6 @@ static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
                return PTR_ERR(pctldev->p);
        }
 
-       kref_get(&pctldev->p->users);
        pctldev->hog_default =
                pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
        if (IS_ERR(pctldev->hog_default)) {
index 73bf1d9..23cf04b 100644 (file)
@@ -23,12 +23,12 @@ struct imx_sc_msg_req_pad_set {
        struct imx_sc_rpc_msg hdr;
        u32 val;
        u16 pad;
-} __packed;
+} __packed __aligned(4);
 
 struct imx_sc_msg_req_pad_get {
        struct imx_sc_rpc_msg hdr;
        u16 pad;
-} __packed;
+} __packed __aligned(4);
 
 struct imx_sc_msg_resp_pad_get {
        struct imx_sc_rpc_msg hdr;
index 1b6e864..2ac921c 100644 (file)
@@ -147,8 +147,8 @@ static const unsigned int sdio_d0_pins[]    = { GPIOX_0 };
 static const unsigned int sdio_d1_pins[]       = { GPIOX_1 };
 static const unsigned int sdio_d2_pins[]       = { GPIOX_2 };
 static const unsigned int sdio_d3_pins[]       = { GPIOX_3 };
-static const unsigned int sdio_cmd_pins[]      = { GPIOX_4 };
-static const unsigned int sdio_clk_pins[]      = { GPIOX_5 };
+static const unsigned int sdio_clk_pins[]      = { GPIOX_4 };
+static const unsigned int sdio_cmd_pins[]      = { GPIOX_5 };
 static const unsigned int sdio_irq_pins[]      = { GPIOX_7 };
 
 static const unsigned int nand_ce0_pins[]      = { BOOT_8 };
index a454f57..62c02b9 100644 (file)
@@ -451,7 +451,7 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
                falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL);
                if (IS_ERR(falcon_info.clk[*bank])) {
                        dev_err(&ppdev->dev, "failed to get clock\n");
-                       of_node_put(np)
+                       of_node_put(np);
                        return PTR_ERR(falcon_info.clk[*bank]);
                }
                falcon_info.membase[*bank] = devm_ioremap_resource(&pdev->dev,
index 9a8daa2..1a948c3 100644 (file)
@@ -1104,7 +1104,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
        pctrl->irq_chip.irq_mask = msm_gpio_irq_mask;
        pctrl->irq_chip.irq_unmask = msm_gpio_irq_unmask;
        pctrl->irq_chip.irq_ack = msm_gpio_irq_ack;
-       pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
        pctrl->irq_chip.irq_set_type = msm_gpio_irq_set_type;
        pctrl->irq_chip.irq_set_wake = msm_gpio_irq_set_wake;
        pctrl->irq_chip.irq_request_resources = msm_gpio_irq_reqres;
@@ -1118,7 +1117,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
                if (!chip->irq.parent_domain)
                        return -EPROBE_DEFER;
                chip->irq.child_to_parent_hwirq = msm_gpio_wakeirq;
-
+               pctrl->irq_chip.irq_eoi = irq_chip_eoi_parent;
                /*
                 * Let's skip handling the GPIOs, if the parent irqchip
                 * is handling the direct connect IRQ of the GPIO.
index fba1d41..338a15d 100644 (file)
@@ -794,7 +794,7 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
        girq->fwnode = of_node_to_fwnode(pctrl->dev->of_node);
        girq->parent_domain = parent_domain;
        girq->child_to_parent_hwirq = pm8xxx_child_to_parent_hwirq;
-       girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
+       girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_twocell;
        girq->child_offset_to_irq = pm8xxx_child_offset_to_irq;
        girq->child_irq_domain_ops.translate = pm8xxx_domain_translate;
 
index 34c8b6c..8e50388 100644 (file)
@@ -327,6 +327,7 @@ config RTC_DRV_MAX6900
 config RTC_DRV_MAX8907
        tristate "Maxim MAX8907"
        depends on MFD_MAX8907 || COMPILE_TEST
+       select REGMAP_IRQ
        help
          If you say yes here you will get support for the
          RTC of Maxim MAX8907 PMIC.
index 6cca727..cf87eb2 100644 (file)
@@ -178,6 +178,8 @@ struct dasd_block *dasd_alloc_block(void)
                     (unsigned long) block);
        INIT_LIST_HEAD(&block->ccw_queue);
        spin_lock_init(&block->queue_lock);
+       INIT_LIST_HEAD(&block->format_list);
+       spin_lock_init(&block->format_lock);
        timer_setup(&block->timer, dasd_block_timeout, 0);
        spin_lock_init(&block->profile.lock);
 
@@ -1779,20 +1781,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
 
        if (dasd_ese_needs_format(cqr->block, irb)) {
                if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
-                       device->discipline->ese_read(cqr);
+                       device->discipline->ese_read(cqr, irb);
                        cqr->status = DASD_CQR_SUCCESS;
                        cqr->stopclk = now;
                        dasd_device_clear_timer(device);
                        dasd_schedule_device_bh(device);
                        return;
                }
-               fcqr = device->discipline->ese_format(device, cqr);
+               fcqr = device->discipline->ese_format(device, cqr, irb);
                if (IS_ERR(fcqr)) {
+                       if (PTR_ERR(fcqr) == -EINVAL) {
+                               cqr->status = DASD_CQR_ERROR;
+                               return;
+                       }
                        /*
                         * If we can't format now, let the request go
                         * one extra round. Maybe we can format later.
                         */
                        cqr->status = DASD_CQR_QUEUED;
+                       dasd_schedule_device_bh(device);
+                       return;
                } else {
                        fcqr->status = DASD_CQR_QUEUED;
                        cqr->status = DASD_CQR_QUEUED;
@@ -2748,11 +2756,13 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
 {
        struct request *req;
        blk_status_t error = BLK_STS_OK;
+       unsigned int proc_bytes;
        int status;
 
        req = (struct request *) cqr->callback_data;
        dasd_profile_end(cqr->block, cqr, req);
 
+       proc_bytes = cqr->proc_bytes;
        status = cqr->block->base->discipline->free_cp(cqr, req);
        if (status < 0)
                error = errno_to_blk_status(status);
@@ -2783,7 +2793,18 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
                blk_mq_end_request(req, error);
                blk_mq_run_hw_queues(req->q, true);
        } else {
-               blk_mq_complete_request(req);
+               /*
+                * Partial completed requests can happen with ESE devices.
+                * During read we might have gotten a NRF error and have to
+                * complete a request partially.
+                */
+               if (proc_bytes) {
+                       blk_update_request(req, BLK_STS_OK,
+                                          blk_rq_bytes(req) - proc_bytes);
+                       blk_mq_requeue_request(req, true);
+               } else {
+                       blk_mq_complete_request(req);
+               }
        }
 }
 
index a28b9ff..ad44d22 100644 (file)
@@ -207,6 +207,45 @@ static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
        geo->head |= head;
 }
 
+/*
+ * calculate failing track from sense data depending if
+ * it is an EAV device or not
+ */
+static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
+                                   sector_t *track)
+{
+       struct dasd_eckd_private *private = device->private;
+       u8 *sense = NULL;
+       u32 cyl;
+       u8 head;
+
+       sense = dasd_get_sense(irb);
+       if (!sense) {
+               DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+                             "ESE error no sense data\n");
+               return -EINVAL;
+       }
+       if (!(sense[27] & DASD_SENSE_BIT_2)) {
+               DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+                             "ESE error no valid track data\n");
+               return -EINVAL;
+       }
+
+       if (sense[27] & DASD_SENSE_BIT_3) {
+               /* enhanced addressing */
+               cyl = sense[30] << 20;
+               cyl |= (sense[31] & 0xF0) << 12;
+               cyl |= sense[28] << 8;
+               cyl |= sense[29];
+       } else {
+               cyl = sense[29] << 8;
+               cyl |= sense[30];
+       }
+       head = sense[31] & 0x0F;
+       *track = cyl * private->rdc_data.trk_per_cyl + head;
+       return 0;
+}
+
 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
                     struct dasd_device *device)
 {
@@ -2986,6 +3025,37 @@ static int dasd_eckd_format_device(struct dasd_device *base,
                                             0, NULL);
 }
 
+static bool test_and_set_format_track(struct dasd_format_entry *to_format,
+                                     struct dasd_block *block)
+{
+       struct dasd_format_entry *format;
+       unsigned long flags;
+       bool rc = false;
+
+       spin_lock_irqsave(&block->format_lock, flags);
+       list_for_each_entry(format, &block->format_list, list) {
+               if (format->track == to_format->track) {
+                       rc = true;
+                       goto out;
+               }
+       }
+       list_add_tail(&to_format->list, &block->format_list);
+
+out:
+       spin_unlock_irqrestore(&block->format_lock, flags);
+       return rc;
+}
+
+static void clear_format_track(struct dasd_format_entry *format,
+                             struct dasd_block *block)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&block->format_lock, flags);
+       list_del_init(&format->list);
+       spin_unlock_irqrestore(&block->format_lock, flags);
+}
+
 /*
  * Callback function to free ESE format requests.
  */
@@ -2993,15 +3063,19 @@ static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
 {
        struct dasd_device *device = cqr->startdev;
        struct dasd_eckd_private *private = device->private;
+       struct dasd_format_entry *format = data;
 
+       clear_format_track(format, cqr->basedev->block);
        private->count--;
        dasd_ffree_request(cqr, device);
 }
 
 static struct dasd_ccw_req *
-dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
+dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
+                    struct irb *irb)
 {
        struct dasd_eckd_private *private;
+       struct dasd_format_entry *format;
        struct format_data_t fdata;
        unsigned int recs_per_trk;
        struct dasd_ccw_req *fcqr;
@@ -3011,23 +3085,39 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
        struct request *req;
        sector_t first_trk;
        sector_t last_trk;
+       sector_t curr_trk;
        int rc;
 
        req = cqr->callback_data;
-       base = cqr->block->base;
+       block = cqr->block;
+       base = block->base;
        private = base->private;
-       block = base->block;
        blksize = block->bp_block;
        recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+       format = &startdev->format_entry;
 
        first_trk = blk_rq_pos(req) >> block->s2b_shift;
        sector_div(first_trk, recs_per_trk);
        last_trk =
                (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
        sector_div(last_trk, recs_per_trk);
+       rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
+       if (rc)
+               return ERR_PTR(rc);
 
-       fdata.start_unit = first_trk;
-       fdata.stop_unit = last_trk;
+       if (curr_trk < first_trk || curr_trk > last_trk) {
+               DBF_DEV_EVENT(DBF_WARNING, startdev,
+                             "ESE error track %llu not within range %llu - %llu\n",
+                             curr_trk, first_trk, last_trk);
+               return ERR_PTR(-EINVAL);
+       }
+       format->track = curr_trk;
+       /* test if track is already in formatting by another thread */
+       if (test_and_set_format_track(format, block))
+               return ERR_PTR(-EEXIST);
+
+       fdata.start_unit = curr_trk;
+       fdata.stop_unit = curr_trk;
        fdata.blksize = blksize;
        fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
 
@@ -3044,6 +3134,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
                return fcqr;
 
        fcqr->callback = dasd_eckd_ese_format_cb;
+       fcqr->callback_data = (void *) format;
 
        return fcqr;
 }
@@ -3051,29 +3142,87 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
 /*
  * When data is read from an unformatted area of an ESE volume, this function
  * returns zeroed data and thereby mimics a read of zero data.
+ *
+ * The first unformatted track is the one that got the NRF error, the address is
+ * encoded in the sense data.
+ *
+ * All tracks before have returned valid data and should not be touched.
+ * All tracks after the unformatted track might be formatted or not. This is
+ * currently not known, remember the processed data and return the remainder of
+ * the request to the blocklayer in __dasd_cleanup_cqr().
  */
-static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr)
+static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
 {
+       struct dasd_eckd_private *private;
+       sector_t first_trk, last_trk;
+       sector_t first_blk, last_blk;
        unsigned int blksize, off;
+       unsigned int recs_per_trk;
        struct dasd_device *base;
        struct req_iterator iter;
+       struct dasd_block *block;
+       unsigned int skip_block;
+       unsigned int blk_count;
        struct request *req;
        struct bio_vec bv;
+       sector_t curr_trk;
+       sector_t end_blk;
        char *dst;
+       int rc;
 
        req = (struct request *) cqr->callback_data;
        base = cqr->block->base;
        blksize = base->block->bp_block;
+       block =  cqr->block;
+       private = base->private;
+       skip_block = 0;
+       blk_count = 0;
+
+       recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
+       first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
+       sector_div(first_trk, recs_per_trk);
+       last_trk = last_blk =
+               (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
+       sector_div(last_trk, recs_per_trk);
+       rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
+       if (rc)
+               return rc;
+
+       /* sanity check if the current track from sense data is valid */
+       if (curr_trk < first_trk || curr_trk > last_trk) {
+               DBF_DEV_EVENT(DBF_WARNING, base,
+                             "ESE error track %llu not within range %llu - %llu\n",
+                             curr_trk, first_trk, last_trk);
+               return -EINVAL;
+       }
+
+       /*
+        * if not the first track got the NRF error we have to skip over valid
+        * blocks
+        */
+       if (curr_trk != first_trk)
+               skip_block = curr_trk * recs_per_trk - first_blk;
+
+       /* we have no information beyond the current track */
+       end_blk = (curr_trk + 1) * recs_per_trk;
 
        rq_for_each_segment(bv, req, iter) {
                dst = page_address(bv.bv_page) + bv.bv_offset;
                for (off = 0; off < bv.bv_len; off += blksize) {
-                       if (dst && rq_data_dir(req) == READ) {
+                       if (first_blk + blk_count >= end_blk) {
+                               cqr->proc_bytes = blk_count * blksize;
+                               return 0;
+                       }
+                       if (dst && !skip_block) {
                                dst += off;
                                memset(dst, 0, blksize);
+                       } else {
+                               skip_block--;
                        }
+                       blk_count++;
                }
        }
+       return 0;
 }
 
 /*
index 91c9f95..fa552f9 100644 (file)
@@ -187,6 +187,7 @@ struct dasd_ccw_req {
 
        void (*callback)(struct dasd_ccw_req *, void *data);
        void *callback_data;
+       unsigned int proc_bytes;        /* bytes for partial completion */
 };
 
 /*
@@ -387,8 +388,9 @@ struct dasd_discipline {
        int (*ext_pool_warn_thrshld)(struct dasd_device *);
        int (*ext_pool_oos)(struct dasd_device *);
        int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *);
-       struct dasd_ccw_req *(*ese_format)(struct dasd_device *, struct dasd_ccw_req *);
-       void (*ese_read)(struct dasd_ccw_req *);
+       struct dasd_ccw_req *(*ese_format)(struct dasd_device *,
+                                          struct dasd_ccw_req *, struct irb *);
+       int (*ese_read)(struct dasd_ccw_req *, struct irb *);
 };
 
 extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -474,6 +476,11 @@ struct dasd_profile {
        spinlock_t lock;
 };
 
+struct dasd_format_entry {
+       struct list_head list;
+       sector_t track;
+};
+
 struct dasd_device {
        /* Block device stuff. */
        struct dasd_block *block;
@@ -539,6 +546,7 @@ struct dasd_device {
        struct dentry *debugfs_dentry;
        struct dentry *hosts_dentry;
        struct dasd_profile profile;
+       struct dasd_format_entry format_entry;
 };
 
 struct dasd_block {
@@ -564,6 +572,9 @@ struct dasd_block {
 
        struct dentry *debugfs_dentry;
        struct dasd_profile profile;
+
+       struct list_head format_list;
+       spinlock_t format_lock;
 };
 
 struct dasd_attention_data {
index 9575a62..468cada 100644 (file)
@@ -369,7 +369,7 @@ enum qeth_qdio_info_states {
 struct qeth_buffer_pool_entry {
        struct list_head list;
        struct list_head init_list;
-       void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
+       struct page *elements[QDIO_MAX_ELEMENTS_PER_BUFFER];
 };
 
 struct qeth_qdio_buffer_pool {
@@ -983,7 +983,7 @@ extern const struct attribute_group qeth_device_blkt_group;
 extern const struct device_type qeth_generic_devtype;
 
 const char *qeth_get_cardname_short(struct qeth_card *);
-int qeth_realloc_buffer_pool(struct qeth_card *, int);
+int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count);
 int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id);
 void qeth_core_free_discipline(struct qeth_card *);
 
index 8ca85c8..6d3f2f1 100644 (file)
@@ -65,7 +65,6 @@ static struct lock_class_key qdio_out_skb_queue_key;
 static void qeth_issue_next_read_cb(struct qeth_card *card,
                                    struct qeth_cmd_buffer *iob,
                                    unsigned int data_length);
-static void qeth_free_buffer_pool(struct qeth_card *);
 static int qeth_qdio_establish(struct qeth_card *);
 static void qeth_free_qdio_queues(struct qeth_card *card);
 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
@@ -212,49 +211,121 @@ void qeth_clear_working_pool_list(struct qeth_card *card)
 }
 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
 
+static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(entry->elements); i++) {
+               if (entry->elements[i])
+                       __free_page(entry->elements[i]);
+       }
+
+       kfree(entry);
+}
+
+static void qeth_free_buffer_pool(struct qeth_card *card)
+{
+       struct qeth_buffer_pool_entry *entry, *tmp;
+
+       list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list,
+                                init_list) {
+               list_del(&entry->init_list);
+               qeth_free_pool_entry(entry);
+       }
+}
+
+static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
+{
+       struct qeth_buffer_pool_entry *entry;
+       unsigned int i;
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return NULL;
+
+       for (i = 0; i < pages; i++) {
+               entry->elements[i] = alloc_page(GFP_KERNEL);
+
+               if (!entry->elements[i]) {
+                       qeth_free_pool_entry(entry);
+                       return NULL;
+               }
+       }
+
+       return entry;
+}
+
 static int qeth_alloc_buffer_pool(struct qeth_card *card)
 {
-       struct qeth_buffer_pool_entry *pool_entry;
-       void *ptr;
-       int i, j;
+       unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+       unsigned int i;
 
        QETH_CARD_TEXT(card, 5, "alocpool");
        for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
-               pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
-               if (!pool_entry) {
+               struct qeth_buffer_pool_entry *entry;
+
+               entry = qeth_alloc_pool_entry(buf_elements);
+               if (!entry) {
                        qeth_free_buffer_pool(card);
                        return -ENOMEM;
                }
-               for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
-                       ptr = (void *) __get_free_page(GFP_KERNEL);
-                       if (!ptr) {
-                               while (j > 0)
-                                       free_page((unsigned long)
-                                                 pool_entry->elements[--j]);
-                               kfree(pool_entry);
-                               qeth_free_buffer_pool(card);
-                               return -ENOMEM;
-                       }
-                       pool_entry->elements[j] = ptr;
-               }
-               list_add(&pool_entry->init_list,
-                        &card->qdio.init_pool.entry_list);
+
+               list_add(&entry->init_list, &card->qdio.init_pool.entry_list);
        }
        return 0;
 }
 
-int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
+int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count)
 {
+       unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+       struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool;
+       struct qeth_buffer_pool_entry *entry, *tmp;
+       int delta = count - pool->buf_count;
+       LIST_HEAD(entries);
+
        QETH_CARD_TEXT(card, 2, "realcbp");
 
-       /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
-       qeth_clear_working_pool_list(card);
-       qeth_free_buffer_pool(card);
-       card->qdio.in_buf_pool.buf_count = bufcnt;
-       card->qdio.init_pool.buf_count = bufcnt;
-       return qeth_alloc_buffer_pool(card);
+       /* Defer until queue is allocated: */
+       if (!card->qdio.in_q)
+               goto out;
+
+       /* Remove entries from the pool: */
+       while (delta < 0) {
+               entry = list_first_entry(&pool->entry_list,
+                                        struct qeth_buffer_pool_entry,
+                                        init_list);
+               list_del(&entry->init_list);
+               qeth_free_pool_entry(entry);
+
+               delta++;
+       }
+
+       /* Allocate additional entries: */
+       while (delta > 0) {
+               entry = qeth_alloc_pool_entry(buf_elements);
+               if (!entry) {
+                       list_for_each_entry_safe(entry, tmp, &entries,
+                                                init_list) {
+                               list_del(&entry->init_list);
+                               qeth_free_pool_entry(entry);
+                       }
+
+                       return -ENOMEM;
+               }
+
+               list_add(&entry->init_list, &entries);
+
+               delta--;
+       }
+
+       list_splice(&entries, &pool->entry_list);
+
+out:
+       card->qdio.in_buf_pool.buf_count = count;
+       pool->buf_count = count;
+       return 0;
 }
-EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
+EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool);
 
 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
 {
@@ -1170,19 +1241,6 @@ void qeth_drain_output_queues(struct qeth_card *card)
 }
 EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
 
-static void qeth_free_buffer_pool(struct qeth_card *card)
-{
-       struct qeth_buffer_pool_entry *pool_entry, *tmp;
-       int i = 0;
-       list_for_each_entry_safe(pool_entry, tmp,
-                                &card->qdio.init_pool.entry_list, init_list){
-               for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
-                       free_page((unsigned long)pool_entry->elements[i]);
-               list_del(&pool_entry->init_list);
-               kfree(pool_entry);
-       }
-}
-
 static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
 {
        unsigned int count = single ? 1 : card->dev->num_tx_queues;
@@ -1204,7 +1262,6 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
        if (count == 1)
                dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
 
-       card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
        card->qdio.no_out_queues = count;
        return 0;
 }
@@ -2393,7 +2450,6 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
                return;
 
        qeth_free_cq(card);
-       cancel_delayed_work_sync(&card->buffer_reclaim_work);
        for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
                if (card->qdio.in_q->bufs[j].rx_skb)
                        dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
@@ -2575,7 +2631,6 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
        struct list_head *plh;
        struct qeth_buffer_pool_entry *entry;
        int i, free;
-       struct page *page;
 
        if (list_empty(&card->qdio.in_buf_pool.entry_list))
                return NULL;
@@ -2584,7 +2639,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
                entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
                free = 1;
                for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
-                       if (page_count(virt_to_page(entry->elements[i])) > 1) {
+                       if (page_count(entry->elements[i]) > 1) {
                                free = 0;
                                break;
                        }
@@ -2599,15 +2654,15 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
        entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
                        struct qeth_buffer_pool_entry, list);
        for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
-               if (page_count(virt_to_page(entry->elements[i])) > 1) {
-                       page = alloc_page(GFP_ATOMIC);
-                       if (!page) {
+               if (page_count(entry->elements[i]) > 1) {
+                       struct page *page = alloc_page(GFP_ATOMIC);
+
+                       if (!page)
                                return NULL;
-                       } else {
-                               free_page((unsigned long)entry->elements[i]);
-                               entry->elements[i] = page_address(page);
-                               QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
-                       }
+
+                       __free_page(entry->elements[i]);
+                       entry->elements[i] = page;
+                       QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
                }
        }
        list_del_init(&entry->list);
@@ -2625,12 +2680,12 @@ static int qeth_init_input_buffer(struct qeth_card *card,
                                               ETH_HLEN +
                                               sizeof(struct ipv6hdr));
                if (!buf->rx_skb)
-                       return 1;
+                       return -ENOMEM;
        }
 
        pool_entry = qeth_find_free_buffer_pool_entry(card);
        if (!pool_entry)
-               return 1;
+               return -ENOBUFS;
 
        /*
         * since the buffer is accessed only from the input_tasklet
@@ -2643,7 +2698,7 @@ static int qeth_init_input_buffer(struct qeth_card *card,
        for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
                buf->buffer->element[i].length = PAGE_SIZE;
                buf->buffer->element[i].addr =
-                       virt_to_phys(pool_entry->elements[i]);
+                       page_to_phys(pool_entry->elements[i]);
                if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
                        buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
                else
@@ -2675,10 +2730,15 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
        /* inbound queue */
        qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
        memset(&card->rx, 0, sizeof(struct qeth_rx));
+
        qeth_initialize_working_pool_list(card);
        /*give only as many buffers to hardware as we have buffer pool entries*/
-       for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
-               qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
+       for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) {
+               rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
+               if (rc)
+                       return rc;
+       }
+
        card->qdio.in_q->next_buf_to_init =
                card->qdio.in_buf_pool.buf_count - 1;
        rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
index 2bd9993..78cae61 100644 (file)
@@ -247,8 +247,8 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct qeth_card *card = dev_get_drvdata(dev);
+       unsigned int cnt;
        char *tmp;
-       int cnt, old_cnt;
        int rc = 0;
 
        mutex_lock(&card->conf_mutex);
@@ -257,13 +257,12 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
                goto out;
        }
 
-       old_cnt = card->qdio.in_buf_pool.buf_count;
        cnt = simple_strtoul(buf, &tmp, 10);
        cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
                ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
-       if (old_cnt != cnt) {
-               rc = qeth_realloc_buffer_pool(card, cnt);
-       }
+
+       rc = qeth_resize_buffer_pool(card, cnt);
+
 out:
        mutex_unlock(&card->conf_mutex);
        return rc ? rc : count;
index 9972d96..8fb2937 100644 (file)
@@ -284,6 +284,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
        if (card->state == CARD_STATE_SOFTSETUP) {
                qeth_clear_ipacmd_list(card);
                qeth_drain_output_queues(card);
+               cancel_delayed_work_sync(&card->buffer_reclaim_work);
                card->state = CARD_STATE_DOWN;
        }
 
index 317d566..82f800d 100644 (file)
@@ -1178,6 +1178,7 @@ static void qeth_l3_stop_card(struct qeth_card *card)
                qeth_l3_clear_ip_htable(card, 1);
                qeth_clear_ipacmd_list(card);
                qeth_drain_output_queues(card);
+               cancel_delayed_work_sync(&card->buffer_reclaim_work);
                card->state = CARD_STATE_DOWN;
        }
 
index 29f2517..a3d1c3b 100644 (file)
@@ -206,12 +206,11 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
                qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
                if (card->ssqd.qdioac2 & CHSC_AC2_SNIFFER_AVAILABLE) {
                        card->options.sniffer = i;
-                       if (card->qdio.init_pool.buf_count !=
-                                       QETH_IN_BUF_COUNT_MAX)
-                               qeth_realloc_buffer_pool(card,
-                                       QETH_IN_BUF_COUNT_MAX);
-               } else
+                       qeth_resize_buffer_pool(card, QETH_IN_BUF_COUNT_MAX);
+               } else {
                        rc = -EPERM;
+               }
+
                break;
        default:
                rc = -EINVAL;
index ae45cbe..cd8db13 100644 (file)
@@ -9950,6 +9950,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
        ioa_cfg->max_devs_supported = ipr_max_devs;
 
        if (ioa_cfg->sis64) {
+               host->max_channel = IPR_MAX_SIS64_BUSES;
                host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
                host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
                if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
@@ -9958,6 +9959,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
                                           + ((sizeof(struct ipr_config_table_entry64)
                                               * ioa_cfg->max_devs_supported)));
        } else {
+               host->max_channel = IPR_VSET_BUS;
                host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
                host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
                if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
@@ -9967,7 +9969,6 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
                                               * ioa_cfg->max_devs_supported)));
        }
 
-       host->max_channel = IPR_VSET_BUS;
        host->unique_id = host->host_no;
        host->max_cmd_len = IPR_MAX_CDB_LEN;
        host->can_queue = ioa_cfg->max_cmds;
index a67baeb..b97aa9a 100644 (file)
@@ -1300,6 +1300,7 @@ struct ipr_resource_entry {
 #define IPR_ARRAY_VIRTUAL_BUS                  0x1
 #define IPR_VSET_VIRTUAL_BUS                   0x2
 #define IPR_IOAFP_VIRTUAL_BUS                  0x3
+#define IPR_MAX_SIS64_BUSES                    0x4
 
 #define IPR_GET_RES_PHYS_LOC(res) \
        (((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
index abd0e6b..2d70569 100644 (file)
@@ -3884,18 +3884,25 @@ EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
 {
        unsigned long flags;
+       bool update = false;
 
-       if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT))
+       if (!ufshcd_is_auto_hibern8_supported(hba))
                return;
 
        spin_lock_irqsave(hba->host->host_lock, flags);
-       if (hba->ahit == ahit)
-               goto out_unlock;
-       hba->ahit = ahit;
-       if (!pm_runtime_suspended(hba->dev))
-               ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
-out_unlock:
+       if (hba->ahit != ahit) {
+               hba->ahit = ahit;
+               update = true;
+       }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       if (update && !pm_runtime_suspended(hba->dev)) {
+               pm_runtime_get_sync(hba->dev);
+               ufshcd_hold(hba, false);
+               ufshcd_auto_hibern8_enable(hba);
+               ufshcd_release(hba);
+               pm_runtime_put(hba->dev);
+       }
 }
 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
 
index e3f5ebc..fc2575f 100644 (file)
@@ -1320,6 +1320,9 @@ static const struct of_device_id qcom_slim_ngd_dt_match[] = {
        {
                .compatible = "qcom,slim-ngd-v1.5.0",
                .data = &ngd_v1_5_offset_info,
+       },{
+               .compatible = "qcom,slim-ngd-v2.1.0",
+               .data = &ngd_v1_5_offset_info,
        },
        {}
 };
index e8a499c..02e5cba 100644 (file)
@@ -484,7 +484,7 @@ static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
        }
 
        if (needs_wakeup_wait_mode(q))
-               pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
+               cpu_latency_qos_add_request(&q->pm_qos_req, 0);
 
        return 0;
 }
@@ -492,7 +492,7 @@ static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
 static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
 {
        if (needs_wakeup_wait_mode(q))
-               pm_qos_remove_request(&q->pm_qos_req);
+               cpu_latency_qos_remove_request(&q->pm_qos_req);
 
        clk_disable_unprepare(q->clk);
        clk_disable_unprepare(q->clk_en);
index ba6f905..69c6dce 100644 (file)
@@ -19,6 +19,7 @@
 #include <signal.h>
 
 #define MAX_NUM_DEVICES 10
+#define MAX_SYSFS_PREFIX 0x80
 #define MAX_SYSFS_PATH 0x200
 #define CSV_MAX_LINE   0x1000
 #define SYSFS_MAX_INT  0x20
@@ -67,7 +68,7 @@ struct loopback_results {
 };
 
 struct loopback_device {
-       char name[MAX_SYSFS_PATH];
+       char name[MAX_STR_LEN];
        char sysfs_entry[MAX_SYSFS_PATH];
        char debugfs_entry[MAX_SYSFS_PATH];
        struct loopback_results results;
@@ -93,8 +94,8 @@ struct loopback_test {
        int stop_all;
        int poll_count;
        char test_name[MAX_STR_LEN];
-       char sysfs_prefix[MAX_SYSFS_PATH];
-       char debugfs_prefix[MAX_SYSFS_PATH];
+       char sysfs_prefix[MAX_SYSFS_PREFIX];
+       char debugfs_prefix[MAX_SYSFS_PREFIX];
        struct timespec poll_timeout;
        struct loopback_device devices[MAX_NUM_DEVICES];
        struct loopback_results aggregate_results;
@@ -637,7 +638,7 @@ baddir:
 static int open_poll_files(struct loopback_test *t)
 {
        struct loopback_device *dev;
-       char buf[MAX_STR_LEN];
+       char buf[MAX_SYSFS_PATH + MAX_STR_LEN];
        char dummy;
        int fds_idx = 0;
        int i;
@@ -655,7 +656,7 @@ static int open_poll_files(struct loopback_test *t)
                        goto err;
                }
                read(t->fds[fds_idx].fd, &dummy, 1);
-               t->fds[fds_idx].events = EPOLLERR|EPOLLPRI;
+               t->fds[fds_idx].events = POLLERR | POLLPRI;
                t->fds[fds_idx].revents = 0;
                fds_idx++;
        }
@@ -748,7 +749,7 @@ static int wait_for_complete(struct loopback_test *t)
                }
 
                for (i = 0; i < t->poll_count; i++) {
-                       if (t->fds[i].revents & EPOLLPRI) {
+                       if (t->fds[i].revents & POLLPRI) {
                                /* Dummy read to clear the event */
                                read(t->fds[i].fd, &dummy, 1);
                                number_of_events++;
@@ -907,10 +908,10 @@ int main(int argc, char *argv[])
                        t.iteration_max = atoi(optarg);
                        break;
                case 'S':
-                       snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
+                       snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg);
                        break;
                case 'D':
-                       snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", optarg);
+                       snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", optarg);
                        break;
                case 'm':
                        t.mask = atol(optarg);
@@ -961,10 +962,10 @@ int main(int argc, char *argv[])
        }
 
        if (!strcmp(t.sysfs_prefix, ""))
-               snprintf(t.sysfs_prefix, MAX_SYSFS_PATH, "%s", sysfs_prefix);
+               snprintf(t.sysfs_prefix, MAX_SYSFS_PREFIX, "%s", sysfs_prefix);
 
        if (!strcmp(t.debugfs_prefix, ""))
-               snprintf(t.debugfs_prefix, MAX_SYSFS_PATH, "%s", debugfs_prefix);
+               snprintf(t.debugfs_prefix, MAX_SYSFS_PREFIX, "%s", debugfs_prefix);
 
        ret = find_loopback_devices(&t);
        if (ret)
index b5d42f4..845c881 100644 (file)
@@ -38,6 +38,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
        {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
        {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
        {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
+       {USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */
        {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
        {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
        {}      /* Terminating entry */
index 488f253..81ecfd1 100644 (file)
@@ -561,7 +561,7 @@ static u_long get_word(struct vc_data *vc)
                return 0;
        } else if (tmpx < vc->vc_cols - 2 &&
                   (ch == SPACE || ch == 0 || (ch < 0x100 && IS_WDLM(ch))) &&
-                  get_char(vc, (u_short *)&tmp_pos + 1, &temp) > SPACE) {
+                  get_char(vc, (u_short *)tmp_pos + 1, &temp) > SPACE) {
                tmp_pos += 2;
                tmpx++;
        } else {
index 2428363..77bca43 100644 (file)
@@ -140,6 +140,7 @@ int hif_shutdown(struct wfx_dev *wdev)
        else
                control_reg_write(wdev, 0);
        mutex_unlock(&wdev->hif_cmd.lock);
+       mutex_unlock(&wdev->hif_cmd.key_renew_lock);
        kfree(hif);
        return ret;
 }
@@ -289,7 +290,7 @@ int hif_stop_scan(struct wfx_vif *wvif)
 }
 
 int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
-            const struct ieee80211_channel *channel, const u8 *ssidie)
+            struct ieee80211_channel *channel, const u8 *ssid, int ssidlen)
 {
        int ret;
        struct hif_msg *hif;
@@ -307,9 +308,9 @@ int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
        body->basic_rate_set =
                cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
        memcpy(body->bssid, conf->bssid, sizeof(body->bssid));
-       if (!conf->ibss_joined && ssidie) {
-               body->ssid_length = cpu_to_le32(ssidie[1]);
-               memcpy(body->ssid, &ssidie[2], ssidie[1]);
+       if (!conf->ibss_joined && ssid) {
+               body->ssid_length = cpu_to_le32(ssidlen);
+               memcpy(body->ssid, ssid, ssidlen);
        }
        wfx_fill_header(hif, wvif->id, HIF_REQ_ID_JOIN, sizeof(*body));
        ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -427,9 +428,9 @@ int hif_start(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
        struct hif_msg *hif;
        struct hif_req_start *body = wfx_alloc_hif(sizeof(*body), &hif);
 
-       body->dtim_period = conf->dtim_period,
-       body->short_preamble = conf->use_short_preamble,
-       body->channel_number = cpu_to_le16(channel->hw_value),
+       body->dtim_period = conf->dtim_period;
+       body->short_preamble = conf->use_short_preamble;
+       body->channel_number = cpu_to_le16(channel->hw_value);
        body->beacon_interval = cpu_to_le32(conf->beacon_int);
        body->basic_rate_set =
                cpu_to_le32(wfx_rate_mask_to_hw(wvif->wdev, conf->basic_rates));
index 20977e4..f8520a1 100644 (file)
@@ -46,7 +46,7 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211,
             int chan_start, int chan_num);
 int hif_stop_scan(struct wfx_vif *wvif);
 int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
-            const struct ieee80211_channel *channel, const u8 *ssidie);
+            struct ieee80211_channel *channel, const u8 *ssid, int ssidlen);
 int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout);
 int hif_set_bss_params(struct wfx_vif *wvif,
                       const struct hif_req_set_bss_params *arg);
index bf3769c..26b1406 100644 (file)
@@ -191,10 +191,10 @@ static inline int hif_set_block_ack_policy(struct wfx_vif *wvif,
 }
 
 static inline int hif_set_association_mode(struct wfx_vif *wvif,
-                                          struct ieee80211_bss_conf *info,
-                                          struct ieee80211_sta_ht_cap *ht_cap)
+                                          struct ieee80211_bss_conf *info)
 {
        int basic_rates = wfx_rate_mask_to_hw(wvif->wdev, info->basic_rates);
+       struct ieee80211_sta *sta = NULL;
        struct hif_mib_set_association_mode val = {
                .preambtype_use = 1,
                .mode = 1,
@@ -204,12 +204,17 @@ static inline int hif_set_association_mode(struct wfx_vif *wvif,
                .basic_rate_set = cpu_to_le32(basic_rates)
        };
 
+       rcu_read_lock(); // protect sta
+       if (info->bssid && !info->ibss_joined)
+               sta = ieee80211_find_sta(wvif->vif, info->bssid);
+
        // FIXME: it is strange to not retrieve all information from bss_info
-       if (ht_cap && ht_cap->ht_supported) {
-               val.mpdu_start_spacing = ht_cap->ampdu_density;
+       if (sta && sta->ht_cap.ht_supported) {
+               val.mpdu_start_spacing = sta->ht_cap.ampdu_density;
                if (!(info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT))
-                       val.greenfield = !!(ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD);
+                       val.greenfield = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
        }
+       rcu_read_unlock();
 
        return hif_write_mib(wvif->wdev, wvif->id,
                             HIF_MIB_ID_SET_ASSOCIATION_MODE, &val, sizeof(val));
index 03d0f22..af4f4bb 100644 (file)
@@ -491,9 +491,11 @@ static void wfx_set_mfp(struct wfx_vif *wvif,
 static void wfx_do_join(struct wfx_vif *wvif)
 {
        int ret;
-       const u8 *ssidie;
        struct ieee80211_bss_conf *conf = &wvif->vif->bss_conf;
        struct cfg80211_bss *bss = NULL;
+       u8 ssid[IEEE80211_MAX_SSID_LEN];
+       const u8 *ssidie = NULL;
+       int ssidlen = 0;
 
        wfx_tx_lock_flush(wvif->wdev);
 
@@ -514,11 +516,14 @@ static void wfx_do_join(struct wfx_vif *wvif)
        if (!wvif->beacon_int)
                wvif->beacon_int = 1;
 
-       rcu_read_lock();
+       rcu_read_lock(); // protect ssidie
        if (!conf->ibss_joined)
                ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
-       else
-               ssidie = NULL;
+       if (ssidie) {
+               ssidlen = ssidie[1];
+               memcpy(ssid, &ssidie[2], ssidie[1]);
+       }
+       rcu_read_unlock();
 
        wfx_tx_flush(wvif->wdev);
 
@@ -527,10 +532,8 @@ static void wfx_do_join(struct wfx_vif *wvif)
 
        wfx_set_mfp(wvif, bss);
 
-       /* Perform actual join */
        wvif->wdev->tx_burst_idx = -1;
-       ret = hif_join(wvif, conf, wvif->channel, ssidie);
-       rcu_read_unlock();
+       ret = hif_join(wvif, conf, wvif->channel, ssid, ssidlen);
        if (ret) {
                ieee80211_connection_loss(wvif->vif);
                wvif->join_complete_status = -1;
@@ -605,7 +608,9 @@ int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
        int i;
 
        for (i = 0; i < ARRAY_SIZE(sta_priv->buffered); i++)
-               WARN(sta_priv->buffered[i], "release station while Tx is in progress");
+               if (sta_priv->buffered[i])
+                       dev_warn(wvif->wdev->dev, "release station while %d pending frame on queue %d",
+                                sta_priv->buffered[i], i);
        // FIXME: see note in wfx_sta_add()
        if (vif->type == NL80211_IFTYPE_STATION)
                return 0;
@@ -689,6 +694,7 @@ static void wfx_join_finalize(struct wfx_vif *wvif,
                        wfx_rate_mask_to_hw(wvif->wdev, sta->supp_rates[wvif->channel->band]);
        else
                wvif->bss_params.operational_rate_set = -1;
+       rcu_read_unlock();
        if (sta &&
            info->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT)
                hif_dual_cts_protection(wvif, true);
@@ -701,8 +707,7 @@ static void wfx_join_finalize(struct wfx_vif *wvif,
        wvif->bss_params.beacon_lost_count = 20;
        wvif->bss_params.aid = info->aid;
 
-       hif_set_association_mode(wvif, info, sta ? &sta->ht_cap : NULL);
-       rcu_read_unlock();
+       hif_set_association_mode(wvif, info);
 
        if (!info->ibss_joined) {
                hif_keep_alive_period(wvif, 30 /* sec */);
index 7d6ecc3..a2ce990 100644 (file)
@@ -954,7 +954,7 @@ static bool tb_port_is_width_supported(struct tb_port *port, int width)
        ret = tb_port_read(port, &phy, TB_CFG_PORT,
                           port->cap_phy + LANE_ADP_CS_0, 1);
        if (ret)
-               return ret;
+               return false;
 
        widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
                LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
index 6f343ca..76fe72b 100644 (file)
@@ -569,7 +569,7 @@ static void omap8250_uart_qos_work(struct work_struct *work)
        struct omap8250_priv *priv;
 
        priv = container_of(work, struct omap8250_priv, qos_work);
-       pm_qos_update_request(&priv->pm_qos_request, priv->latency);
+       cpu_latency_qos_update_request(&priv->pm_qos_request, priv->latency);
 }
 
 #ifdef CONFIG_SERIAL_8250_DMA
@@ -1222,10 +1222,9 @@ static int omap8250_probe(struct platform_device *pdev)
                         DEFAULT_CLK_SPEED);
        }
 
-       priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
-       priv->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
-       pm_qos_add_request(&priv->pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
-                          priv->latency);
+       priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+       priv->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+       cpu_latency_qos_add_request(&priv->pm_qos_request, priv->latency);
        INIT_WORK(&priv->qos_work, omap8250_uart_qos_work);
 
        spin_lock_init(&priv->rx_dma_lock);
@@ -1295,7 +1294,7 @@ static int omap8250_remove(struct platform_device *pdev)
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        serial8250_unregister_port(priv->line);
-       pm_qos_remove_request(&priv->pm_qos_request);
+       cpu_latency_qos_remove_request(&priv->pm_qos_request);
        device_init_wakeup(&pdev->dev, false);
        return 0;
 }
@@ -1445,7 +1444,7 @@ static int omap8250_runtime_suspend(struct device *dev)
        if (up->dma && up->dma->rxchan)
                omap_8250_rx_dma_flush(up);
 
-       priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+       priv->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
        schedule_work(&priv->qos_work);
 
        return 0;
index 48017ce..e0b720a 100644 (file)
@@ -831,7 +831,7 @@ static void serial_omap_uart_qos_work(struct work_struct *work)
        struct uart_omap_port *up = container_of(work, struct uart_omap_port,
                                                qos_work);
 
-       pm_qos_update_request(&up->pm_qos_request, up->latency);
+       cpu_latency_qos_update_request(&up->pm_qos_request, up->latency);
 }
 
 static void
@@ -1722,10 +1722,9 @@ static int serial_omap_probe(struct platform_device *pdev)
                         DEFAULT_CLK_SPEED);
        }
 
-       up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
-       up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
-       pm_qos_add_request(&up->pm_qos_request,
-               PM_QOS_CPU_DMA_LATENCY, up->latency);
+       up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+       up->calc_latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
+       cpu_latency_qos_add_request(&up->pm_qos_request, up->latency);
        INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);
 
        platform_set_drvdata(pdev, up);
@@ -1759,7 +1758,7 @@ err_add_port:
        pm_runtime_dont_use_autosuspend(&pdev->dev);
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       pm_qos_remove_request(&up->pm_qos_request);
+       cpu_latency_qos_remove_request(&up->pm_qos_request);
        device_init_wakeup(up->dev, false);
 err_rs485:
 err_port_line:
@@ -1777,7 +1776,7 @@ static int serial_omap_remove(struct platform_device *dev)
        pm_runtime_dont_use_autosuspend(up->dev);
        pm_runtime_put_sync(up->dev);
        pm_runtime_disable(up->dev);
-       pm_qos_remove_request(&up->pm_qos_request);
+       cpu_latency_qos_remove_request(&up->pm_qos_request);
        device_init_wakeup(&dev->dev, false);
 
        return 0;
@@ -1869,7 +1868,7 @@ static int serial_omap_runtime_suspend(struct device *dev)
 
        serial_omap_enable_wakeup(up, true);
 
-       up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+       up->latency = PM_QOS_CPU_LATENCY_DEFAULT_VALUE;
        schedule_work(&up->qos_work);
 
        return 0;
index a1453fe..5a6f36b 100644 (file)
@@ -1589,9 +1589,7 @@ void tty_kclose(struct tty_struct *tty)
        tty_debug_hangup(tty, "freeing structure\n");
        /*
         * The release_tty function takes care of the details of clearing
-        * the slots and preserving the termios structure. The tty_unlock_pair
-        * should be safe as we keep a kref while the tty is locked (so the
-        * unlock never unlocks a freed tty).
+        * the slots and preserving the termios structure.
         */
        mutex_lock(&tty_mutex);
        tty_port_set_kopened(tty->port, 0);
@@ -1621,9 +1619,7 @@ void tty_release_struct(struct tty_struct *tty, int idx)
        tty_debug_hangup(tty, "freeing structure\n");
        /*
         * The release_tty function takes care of the details of clearing
-        * the slots and preserving the termios structure. The tty_unlock_pair
-        * should be safe as we keep a kref while the tty is locked (so the
-        * unlock never unlocks a freed tty).
+        * the slots and preserving the termios structure.
         */
        mutex_lock(&tty_mutex);
        release_tty(tty, idx);
@@ -2734,9 +2730,11 @@ static int compat_tty_tiocgserial(struct tty_struct *tty,
        struct serial_struct32 v32;
        struct serial_struct v;
        int err;
-       memset(&v, 0, sizeof(struct serial_struct));
 
-       if (!tty->ops->set_serial)
+       memset(&v, 0, sizeof(v));
+       memset(&v32, 0, sizeof(v32));
+
+       if (!tty->ops->get_serial)
                return -ENOTTY;
        err = tty->ops->get_serial(tty, &v);
        if (!err) {
index d8e7eb2..a479af3 100644 (file)
@@ -393,8 +393,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
        }
 
        if (pdata.flags & CI_HDRC_PMQOS)
-               pm_qos_add_request(&data->pm_qos_req,
-                       PM_QOS_CPU_DMA_LATENCY, 0);
+               cpu_latency_qos_add_request(&data->pm_qos_req, 0);
 
        ret = imx_get_clks(dev);
        if (ret)
@@ -478,7 +477,7 @@ disable_hsic_regulator:
                /* don't overwrite original ret (cf. EPROBE_DEFER) */
                regulator_disable(data->hsic_pad_regulator);
        if (pdata.flags & CI_HDRC_PMQOS)
-               pm_qos_remove_request(&data->pm_qos_req);
+               cpu_latency_qos_remove_request(&data->pm_qos_req);
        data->ci_pdev = NULL;
        return ret;
 }
@@ -499,7 +498,7 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
        if (data->ci_pdev) {
                imx_disable_unprepare_clks(&pdev->dev);
                if (data->plat_data->flags & CI_HDRC_PMQOS)
-                       pm_qos_remove_request(&data->pm_qos_req);
+                       cpu_latency_qos_remove_request(&data->pm_qos_req);
                if (data->hsic_pad_regulator)
                        regulator_disable(data->hsic_pad_regulator);
        }
@@ -527,7 +526,7 @@ static int __maybe_unused imx_controller_suspend(struct device *dev)
 
        imx_disable_unprepare_clks(dev);
        if (data->plat_data->flags & CI_HDRC_PMQOS)
-               pm_qos_remove_request(&data->pm_qos_req);
+               cpu_latency_qos_remove_request(&data->pm_qos_req);
 
        data->in_lpm = true;
 
@@ -547,8 +546,7 @@ static int __maybe_unused imx_controller_resume(struct device *dev)
        }
 
        if (data->plat_data->flags & CI_HDRC_PMQOS)
-               pm_qos_add_request(&data->pm_qos_req,
-                       PM_QOS_CPU_DMA_LATENCY, 0);
+               cpu_latency_qos_add_request(&data->pm_qos_req, 0);
 
        ret = imx_prepare_enable_clks(dev);
        if (ret)
index ffaf46f..4c4ac30 100644 (file)
@@ -1530,18 +1530,19 @@ static const struct usb_ep_ops usb_ep_ops = {
 static void ci_hdrc_gadget_connect(struct usb_gadget *_gadget, int is_active)
 {
        struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
-       unsigned long flags;
 
        if (is_active) {
                pm_runtime_get_sync(&_gadget->dev);
                hw_device_reset(ci);
-               spin_lock_irqsave(&ci->lock, flags);
+               spin_lock_irq(&ci->lock);
                if (ci->driver) {
                        hw_device_state(ci, ci->ep0out->qh.dma);
                        usb_gadget_set_state(_gadget, USB_STATE_POWERED);
+                       spin_unlock_irq(&ci->lock);
                        usb_udc_vbus_handler(_gadget, true);
+               } else {
+                       spin_unlock_irq(&ci->lock);
                }
-               spin_unlock_irqrestore(&ci->lock, flags);
        } else {
                usb_udc_vbus_handler(_gadget, false);
                if (ci->driver)
index 62f4fb9..47f09a6 100644 (file)
@@ -896,10 +896,10 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
 
        ss->xmit_fifo_size = acm->writesize;
        ss->baud_base = le32_to_cpu(acm->line.dwDTERate);
-       ss->close_delay = acm->port.close_delay / 10;
+       ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
        ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
                                ASYNC_CLOSING_WAIT_NONE :
-                               acm->port.closing_wait / 10;
+                               jiffies_to_msecs(acm->port.closing_wait) / 10;
        return 0;
 }
 
@@ -907,24 +907,32 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
 {
        struct acm *acm = tty->driver_data;
        unsigned int closing_wait, close_delay;
+       unsigned int old_closing_wait, old_close_delay;
        int retval = 0;
 
-       close_delay = ss->close_delay * 10;
+       close_delay = msecs_to_jiffies(ss->close_delay * 10);
        closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
-                       ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
+                       ASYNC_CLOSING_WAIT_NONE :
+                       msecs_to_jiffies(ss->closing_wait * 10);
+
+       /* we must redo the rounding here, so that the values match */
+       old_close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
+       old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
+                               ASYNC_CLOSING_WAIT_NONE :
+                               jiffies_to_msecs(acm->port.closing_wait) / 10;
 
        mutex_lock(&acm->port.mutex);
 
-       if (!capable(CAP_SYS_ADMIN)) {
-               if ((close_delay != acm->port.close_delay) ||
-                   (closing_wait != acm->port.closing_wait))
+       if ((ss->close_delay != old_close_delay) ||
+            (ss->closing_wait != old_closing_wait)) {
+               if (!capable(CAP_SYS_ADMIN))
                        retval = -EPERM;
-               else
-                       retval = -EOPNOTSUPP;
-       } else {
-               acm->port.close_delay  = close_delay;
-               acm->port.closing_wait = closing_wait;
-       }
+               else {
+                       acm->port.close_delay  = close_delay;
+                       acm->port.closing_wait = closing_wait;
+               }
+       } else
+               retval = -EOPNOTSUPP;
 
        mutex_unlock(&acm->port.mutex);
        return retval;
index 2dac3e7..da30b56 100644 (file)
@@ -378,6 +378,12 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
                        USB_QUIRK_IGNORE_REMOTE_WAKEUP },
 
+       /* Realtek hub in Dell WD19 (Type-C) */
+       { USB_DEVICE(0x0bda, 0x0487), .driver_info = USB_QUIRK_NO_LPM },
+
+       /* Generic RTL8153 based ethernet adapters */
+       { USB_DEVICE(0x0bda, 0x8153), .driver_info = USB_QUIRK_NO_LPM },
+
        /* Action Semiconductor flash disk */
        { USB_DEVICE(0x10d6, 0x2200), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
index 5e9b537..1fddc41 100644 (file)
@@ -136,7 +136,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_AMD_PLL_FIX;
 
        if (pdev->vendor == PCI_VENDOR_ID_AMD &&
-               (pdev->device == 0x15e0 ||
+               (pdev->device == 0x145c ||
+                pdev->device == 0x15e0 ||
                 pdev->device == 0x15e1 ||
                 pdev->device == 0x43bb))
                xhci->quirks |= XHCI_SUSPEND_DELAY;
index d90cd5e..315b455 100644 (file)
@@ -445,6 +445,7 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
 static struct platform_driver usb_xhci_driver = {
        .probe  = xhci_plat_probe,
        .remove = xhci_plat_remove,
+       .shutdown = usb_hcd_platform_shutdown,
        .driver = {
                .name = "xhci-hcd",
                .pm = &xhci_plat_pm_ops,
index 56eb867..b19582b 100644 (file)
@@ -289,23 +289,12 @@ DECLARE_EVENT_CLASS(xhci_log_urb,
        ),
        TP_printk("ep%d%s-%s: urb %p pipe %u slot %d length %d/%d sgs %d/%d stream %d flags %08x",
                        __entry->epnum, __entry->dir_in ? "in" : "out",
-                       ({ char *s;
-                       switch (__entry->type) {
-                       case USB_ENDPOINT_XFER_INT:
-                               s = "intr";
-                               break;
-                       case USB_ENDPOINT_XFER_CONTROL:
-                               s = "control";
-                               break;
-                       case USB_ENDPOINT_XFER_BULK:
-                               s = "bulk";
-                               break;
-                       case USB_ENDPOINT_XFER_ISOC:
-                               s = "isoc";
-                               break;
-                       default:
-                               s = "UNKNOWN";
-                       } s; }), __entry->urb, __entry->pipe, __entry->slot_id,
+                       __print_symbolic(__entry->type,
+                                  { USB_ENDPOINT_XFER_INT,     "intr" },
+                                  { USB_ENDPOINT_XFER_CONTROL, "control" },
+                                  { USB_ENDPOINT_XFER_BULK,    "bulk" },
+                                  { USB_ENDPOINT_XFER_ISOC,    "isoc" }),
+                       __entry->urb, __entry->pipe, __entry->slot_id,
                        __entry->actual, __entry->length, __entry->num_mapped_sgs,
                        __entry->num_sgs, __entry->stream, __entry->flags
                )
index 084cc2f..0b5dcf9 100644 (file)
@@ -1183,6 +1183,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110a, 0xff),    /* Telit ME910G1 */
          .driver_info = NCTRL(0) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x110b, 0xff),    /* Telit ME910G1 (ECM) */
+         .driver_info = NCTRL(0) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
index aab737e..c5a2995 100644 (file)
@@ -99,6 +99,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
        { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
        { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
+       { USB_DEVICE(HP_VENDOR_ID, HP_LD381_PRODUCT_ID) },
        { USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
        { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
        { USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
index a019ea7..52db551 100644 (file)
 #define HP_LM920_PRODUCT_ID    0x026b
 #define HP_TD620_PRODUCT_ID    0x0956
 #define HP_LD960_PRODUCT_ID    0x0b39
+#define HP_LD381_PRODUCT_ID    0x0f7f
 #define HP_LCM220_PRODUCT_ID   0x3139
 #define HP_LCM960_PRODUCT_ID   0x3239
 #define HP_LD220_PRODUCT_ID    0x3524
index 0f1273a..048381c 100644 (file)
@@ -271,6 +271,9 @@ void ucsi_displayport_remove_partner(struct typec_altmode *alt)
                return;
 
        dp = typec_altmode_get_drvdata(alt);
+       if (!dp)
+               return;
+
        dp->data.conf = 0;
        dp->data.status = 0;
        dp->initialized = false;
@@ -285,6 +288,8 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
        struct typec_altmode *alt;
        struct ucsi_dp *dp;
 
+       mutex_lock(&con->lock);
+
        /* We can't rely on the firmware with the capabilities. */
        desc->vdo |= DP_CAP_DP_SIGNALING | DP_CAP_RECEPTACLE;
 
@@ -293,12 +298,15 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
        desc->vdo |= all_assignments << 16;
 
        alt = typec_port_register_altmode(con->port, desc);
-       if (IS_ERR(alt))
+       if (IS_ERR(alt)) {
+               mutex_unlock(&con->lock);
                return alt;
+       }
 
        dp = devm_kzalloc(&alt->dev, sizeof(*dp), GFP_KERNEL);
        if (!dp) {
                typec_unregister_altmode(alt);
+               mutex_unlock(&con->lock);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -311,5 +319,7 @@ struct typec_altmode *ucsi_register_displayport(struct ucsi_connector *con,
        alt->ops = &ucsi_displayport_ops;
        typec_altmode_set_drvdata(alt, dp);
 
+       mutex_unlock(&con->lock);
+
        return alt;
 }
index 7bfe365..341458f 100644 (file)
@@ -959,8 +959,8 @@ out_iput:
        iput(vb->vb_dev_info.inode);
 out_kern_unmount:
        kern_unmount(balloon_mnt);
-#endif
 out_del_vqs:
+#endif
        vdev->config->del_vqs(vdev);
 out_free_vb:
        kfree(vb);
index 867c7eb..58b96ba 100644 (file)
@@ -2203,10 +2203,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
                                         vq->split.queue_size_in_bytes,
                                         vq->split.vring.desc,
                                         vq->split.queue_dma_addr);
-
-                       kfree(vq->split.desc_state);
                }
        }
+       if (!vq->packed_ring)
+               kfree(vq->split.desc_state);
        list_del(&_vq->list);
        kfree(vq);
 }
index 0f7373b..69e92e6 100644 (file)
@@ -1,10 +1,12 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /* iTCO Vendor Specific Support hooks */
 #ifdef CONFIG_ITCO_VENDOR_SUPPORT
+extern int iTCO_vendorsupport;
 extern void iTCO_vendor_pre_start(struct resource *, unsigned int);
 extern void iTCO_vendor_pre_stop(struct resource *);
 extern int iTCO_vendor_check_noreboot_on(void);
 #else
+#define iTCO_vendorsupport                             0
 #define iTCO_vendor_pre_start(acpibase, heartbeat)     {}
 #define iTCO_vendor_pre_stop(acpibase)                 {}
 #define iTCO_vendor_check_noreboot_on()                        1
index 4f1b96f..cf0eaa0 100644 (file)
 /* Broken BIOS */
 #define BROKEN_BIOS            911
 
-static int vendorsupport;
-module_param(vendorsupport, int, 0);
+int iTCO_vendorsupport;
+EXPORT_SYMBOL(iTCO_vendorsupport);
+
+module_param_named(vendorsupport, iTCO_vendorsupport, int, 0);
 MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default="
                        "0 (none), 1=SuperMicro Pent3, 911=Broken SMI BIOS");
 
@@ -152,7 +154,7 @@ static void broken_bios_stop(struct resource *smires)
 void iTCO_vendor_pre_start(struct resource *smires,
                           unsigned int heartbeat)
 {
-       switch (vendorsupport) {
+       switch (iTCO_vendorsupport) {
        case SUPERMICRO_OLD_BOARD:
                supermicro_old_pre_start(smires);
                break;
@@ -165,7 +167,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_start);
 
 void iTCO_vendor_pre_stop(struct resource *smires)
 {
-       switch (vendorsupport) {
+       switch (iTCO_vendorsupport) {
        case SUPERMICRO_OLD_BOARD:
                supermicro_old_pre_stop(smires);
                break;
@@ -178,7 +180,7 @@ EXPORT_SYMBOL(iTCO_vendor_pre_stop);
 
 int iTCO_vendor_check_noreboot_on(void)
 {
-       switch (vendorsupport) {
+       switch (iTCO_vendorsupport) {
        case SUPERMICRO_OLD_BOARD:
                return 0;
        default:
@@ -189,13 +191,13 @@ EXPORT_SYMBOL(iTCO_vendor_check_noreboot_on);
 
 static int __init iTCO_vendor_init_module(void)
 {
-       if (vendorsupport == SUPERMICRO_NEW_BOARD) {
+       if (iTCO_vendorsupport == SUPERMICRO_NEW_BOARD) {
                pr_warn("Option vendorsupport=%d is no longer supported, "
                        "please use the w83627hf_wdt driver instead\n",
                        SUPERMICRO_NEW_BOARD);
                return -EINVAL;
        }
-       pr_info("vendor-support=%d\n", vendorsupport);
+       pr_info("vendor-support=%d\n", iTCO_vendorsupport);
        return 0;
 }
 
index 156360e..e707c47 100644 (file)
@@ -459,13 +459,25 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
        if (!p->tco_res)
                return -ENODEV;
 
-       p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI);
-       if (!p->smi_res)
-               return -ENODEV;
-
        p->iTCO_version = pdata->version;
        p->pci_dev = to_pci_dev(dev->parent);
 
+       p->smi_res = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_IO_SMI);
+       if (p->smi_res) {
+               /* The TCO logic uses the TCO_EN bit in the SMI_EN register */
+               if (!devm_request_region(dev, p->smi_res->start,
+                                        resource_size(p->smi_res),
+                                        pdev->name)) {
+                       pr_err("I/O address 0x%04llx already in use, device disabled\n",
+                              (u64)SMI_EN(p));
+                       return -EBUSY;
+               }
+       } else if (iTCO_vendorsupport ||
+                  turn_SMI_watchdog_clear_off >= p->iTCO_version) {
+               pr_err("SMI I/O resource is missing\n");
+               return -ENODEV;
+       }
+
        iTCO_wdt_no_reboot_bit_setup(p, pdata);
 
        /*
@@ -492,14 +504,6 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
        /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
        p->update_no_reboot_bit(p->no_reboot_priv, true);
 
-       /* The TCO logic uses the TCO_EN bit in the SMI_EN register */
-       if (!devm_request_region(dev, p->smi_res->start,
-                                resource_size(p->smi_res),
-                                pdev->name)) {
-               pr_err("I/O address 0x%04llx already in use, device disabled\n",
-                      (u64)SMI_EN(p));
-               return -EBUSY;
-       }
        if (turn_SMI_watchdog_clear_off >= p->iTCO_version) {
                /*
                 * Bit 13: TCO_EN -> 0
index df415c0..de1ae0b 100644 (file)
@@ -19,7 +19,7 @@
 void afs_put_addrlist(struct afs_addr_list *alist)
 {
        if (alist && refcount_dec_and_test(&alist->usage))
-               call_rcu(&alist->rcu, (rcu_callback_t)kfree);
+               kfree_rcu(alist, rcu);
 }
 
 /*
index 1d81fc4..35f951a 100644 (file)
@@ -81,7 +81,7 @@ enum afs_call_state {
  * List of server addresses.
  */
 struct afs_addr_list {
-       struct rcu_head         rcu;            /* Must be first */
+       struct rcu_head         rcu;
        refcount_t              usage;
        u32                     version;        /* Version */
        unsigned char           max_addrs;
index 404e050..7f09147 100644 (file)
@@ -856,9 +856,9 @@ static void clear_incompat_bg_bits(struct btrfs_fs_info *fs_info, u64 flags)
                                found_raid1c34 = true;
                        up_read(&sinfo->groups_sem);
                }
-               if (found_raid56)
+               if (!found_raid56)
                        btrfs_clear_fs_incompat(fs_info, RAID56);
-               if (found_raid1c34)
+               if (!found_raid1c34)
                        btrfs_clear_fs_incompat(fs_info, RAID1C34);
        }
 }
index 27076eb..d267eb5 100644 (file)
@@ -9496,6 +9496,10 @@ out_fail:
                ret = btrfs_sync_log(trans, BTRFS_I(old_inode)->root, &ctx);
                if (ret)
                        commit_transaction = true;
+       } else if (sync_log) {
+               mutex_lock(&root->log_mutex);
+               list_del(&ctx.list);
+               mutex_unlock(&root->log_mutex);
        }
        if (commit_transaction) {
                ret = btrfs_commit_transaction(trans);
index 0ef0994..36e7b2f 100644 (file)
@@ -555,7 +555,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
                if (server->ops->close)
                        server->ops->close(xid, tcon, &fid);
                cifs_del_pending_open(&open);
-               fput(file);
                rc = -ENOMEM;
        }
 
index 3b942ec..8f9d849 100644 (file)
@@ -1169,7 +1169,8 @@ try_again:
        rc = posix_lock_file(file, flock, NULL);
        up_write(&cinode->lock_sem);
        if (rc == FILE_LOCK_DEFERRED) {
-               rc = wait_event_interruptible(flock->fl_wait, !flock->fl_blocker);
+               rc = wait_event_interruptible(flock->fl_wait,
+                                       list_empty(&flock->fl_blocked_member));
                if (!rc)
                        goto try_again;
                locks_delete_block(flock);
index 1e8a4b1..b16f8d2 100644 (file)
@@ -2191,7 +2191,7 @@ int cifs_getattr(const struct path *path, struct kstat *stat,
                if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID))
                        stat->gid = current_fsgid();
        }
-       return rc;
+       return 0;
 }
 
 int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
index c31e84e..cfe9b80 100644 (file)
@@ -2222,6 +2222,8 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
                goto qdf_free;
        }
 
+       atomic_inc(&tcon->num_remote_opens);
+
        qd_rsp = (struct smb2_query_directory_rsp *)rsp_iov[1].iov_base;
        if (qd_rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) {
                trace_smb3_query_dir_done(xid, fid->persistent_fid,
@@ -3417,7 +3419,7 @@ static int smb3_fiemap(struct cifs_tcon *tcon,
        if (rc)
                goto out;
 
-       if (out_data_len < sizeof(struct file_allocated_range_buffer)) {
+       if (out_data_len && out_data_len < sizeof(struct file_allocated_range_buffer)) {
                rc = -EINVAL;
                goto out;
        }
index 65cb09f..08c9f21 100644 (file)
@@ -539,6 +539,15 @@ int fscrypt_drop_inode(struct inode *inode)
        mk = ci->ci_master_key->payload.data[0];
 
        /*
+        * With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes
+        * protected by the key were cleaned by sync_filesystem().  But if
+        * userspace is still using the files, inodes can be dirtied between
+        * then and now.  We mustn't lose any writes, so skip dirty inodes here.
+        */
+       if (inode->i_state & I_DIRTY_ALL)
+               return 0;
+
+       /*
         * Note: since we aren't holding ->mk_secret_sem, the result here can
         * immediately become outdated.  But there's no correctness problem with
         * unnecessarily evicting.  Nor is there a correctness problem with not
index b041b66..eee3c92 100644 (file)
@@ -1854,9 +1854,9 @@ fetch_events:
                waiter = true;
                init_waitqueue_entry(&wait, current);
 
-               spin_lock_irq(&ep->wq.lock);
+               write_lock_irq(&ep->lock);
                __add_wait_queue_exclusive(&ep->wq, &wait);
-               spin_unlock_irq(&ep->wq.lock);
+               write_unlock_irq(&ep->lock);
        }
 
        for (;;) {
@@ -1904,9 +1904,9 @@ send_events:
                goto fetch_events;
 
        if (waiter) {
-               spin_lock_irq(&ep->wq.lock);
+               write_lock_irq(&ep->lock);
                __remove_wait_queue(&ep->wq, &wait);
-               spin_unlock_irq(&ep->wq.lock);
+               write_unlock_irq(&ep->lock);
        }
 
        return res;
index a364e1a..c8a4e4c 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -540,9 +540,14 @@ static int alloc_fd(unsigned start, unsigned flags)
        return __alloc_fd(current->files, start, rlimit(RLIMIT_NOFILE), flags);
 }
 
+int __get_unused_fd_flags(unsigned flags, unsigned long nofile)
+{
+       return __alloc_fd(current->files, 0, nofile, flags);
+}
+
 int get_unused_fd_flags(unsigned flags)
 {
-       return __alloc_fd(current->files, 0, rlimit(RLIMIT_NOFILE), flags);
+       return __get_unused_fd_flags(flags, rlimit(RLIMIT_NOFILE));
 }
 EXPORT_SYMBOL(get_unused_fd_flags);
 
index 8e02d76..97eec75 100644 (file)
@@ -276,12 +276,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
 void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
 {
        struct fuse_iqueue *fiq = &fc->iq;
-       bool async;
 
        if (test_and_set_bit(FR_FINISHED, &req->flags))
                goto put_request;
 
-       async = req->args->end;
        /*
         * test_and_set_bit() implies smp_mb() between bit
         * changing and below intr_entry check. Pairs with
@@ -324,7 +322,7 @@ void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
                wake_up(&req->waitq);
        }
 
-       if (async)
+       if (test_bit(FR_ASYNC, &req->flags))
                req->args->end(fc, req->args, req->out.h.error);
 put_request:
        fuse_put_request(fc, req);
@@ -471,6 +469,8 @@ static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
        req->in.h.opcode = args->opcode;
        req->in.h.nodeid = args->nodeid;
        req->args = args;
+       if (args->end)
+               __set_bit(FR_ASYNC, &req->flags);
 }
 
 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
index aa75e23..ca344bf 100644 (file)
@@ -301,6 +301,7 @@ struct fuse_io_priv {
  * FR_SENT:            request is in userspace, waiting for an answer
  * FR_FINISHED:                request is finished
  * FR_PRIVATE:         request is on private list
+ * FR_ASYNC:           request is asynchronous
  */
 enum fuse_req_flag {
        FR_ISREPLY,
@@ -314,6 +315,7 @@ enum fuse_req_flag {
        FR_SENT,
        FR_FINISHED,
        FR_PRIVATE,
+       FR_ASYNC,
 };
 
 /**
index 2716d56..8294851 100644 (file)
@@ -1248,7 +1248,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
                if (!(file->f_mode & FMODE_OPENED))
                        return finish_no_open(file, d);
                dput(d);
-               return 0;
+               return excl && (flags & O_CREAT) ? -EEXIST : 0;
        }
 
        BUG_ON(d != NULL);
index 7d57068..93d9252 100644 (file)
@@ -138,6 +138,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        inode->i_sb = sb;
        inode->i_blkbits = sb->s_blocksize_bits;
        inode->i_flags = 0;
+       atomic64_set(&inode->i_sequence, 0);
        atomic_set(&inode->i_count, 1);
        inode->i_op = &empty_iops;
        inode->i_fop = &no_open_fops;
index c06082b..3affd96 100644 (file)
@@ -191,7 +191,6 @@ struct fixed_file_data {
        struct llist_head               put_llist;
        struct work_struct              ref_work;
        struct completion               done;
-       struct rcu_head                 rcu;
 };
 
 struct io_ring_ctx {
@@ -344,6 +343,7 @@ struct io_accept {
        struct sockaddr __user          *addr;
        int __user                      *addr_len;
        int                             flags;
+       unsigned long                   nofile;
 };
 
 struct io_sync {
@@ -398,6 +398,7 @@ struct io_open {
        struct filename                 *filename;
        struct statx __user             *buffer;
        struct open_how                 how;
+       unsigned long                   nofile;
 };
 
 struct io_files_update {
@@ -2578,6 +2579,7 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                return ret;
        }
 
+       req->open.nofile = rlimit(RLIMIT_NOFILE);
        req->flags |= REQ_F_NEED_CLEANUP;
        return 0;
 }
@@ -2619,6 +2621,7 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                return ret;
        }
 
+       req->open.nofile = rlimit(RLIMIT_NOFILE);
        req->flags |= REQ_F_NEED_CLEANUP;
        return 0;
 }
@@ -2637,7 +2640,7 @@ static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt,
        if (ret)
                goto err;
 
-       ret = get_unused_fd_flags(req->open.how.flags);
+       ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
        if (ret < 0)
                goto err;
 
@@ -3322,6 +3325,7 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
        accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
        accept->flags = READ_ONCE(sqe->accept_flags);
+       accept->nofile = rlimit(RLIMIT_NOFILE);
        return 0;
 #else
        return -EOPNOTSUPP;
@@ -3338,7 +3342,8 @@ static int __io_accept(struct io_kiocb *req, struct io_kiocb **nxt,
 
        file_flags = force_nonblock ? O_NONBLOCK : 0;
        ret = __sys_accept4_file(req->file, file_flags, accept->addr,
-                                       accept->addr_len, accept->flags);
+                                       accept->addr_len, accept->flags,
+                                       accept->nofile);
        if (ret == -EAGAIN && force_nonblock)
                return -EAGAIN;
        if (ret == -ERESTARTSYS)
@@ -4132,6 +4137,9 @@ static int io_req_defer_prep(struct io_kiocb *req,
 {
        ssize_t ret = 0;
 
+       if (!sqe)
+               return 0;
+
        if (io_op_defs[req->opcode].file_table) {
                ret = io_grab_files(req);
                if (unlikely(ret))
@@ -4908,6 +4916,11 @@ err_req:
                if (sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK)) {
                        req->flags |= REQ_F_LINK;
                        INIT_LIST_HEAD(&req->link_list);
+
+                       if (io_alloc_async_ctx(req)) {
+                               ret = -EAGAIN;
+                               goto err_req;
+                       }
                        ret = io_req_defer_prep(req, sqe);
                        if (ret)
                                req->flags |= REQ_F_FAIL_LINK;
@@ -5331,24 +5344,21 @@ static void io_file_ref_kill(struct percpu_ref *ref)
        complete(&data->done);
 }
 
-static void __io_file_ref_exit_and_free(struct rcu_head *rcu)
+static void io_file_ref_exit_and_free(struct work_struct *work)
 {
-       struct fixed_file_data *data = container_of(rcu, struct fixed_file_data,
-                                                       rcu);
-       percpu_ref_exit(&data->refs);
-       kfree(data);
-}
+       struct fixed_file_data *data;
+
+       data = container_of(work, struct fixed_file_data, ref_work);
 
-static void io_file_ref_exit_and_free(struct rcu_head *rcu)
-{
        /*
-        * We need to order our exit+free call against the potentially
-        * existing call_rcu() for switching to atomic. One way to do that
-        * is to have this rcu callback queue the final put and free, as we
-        * could otherwise have a pre-existing atomic switch complete _after_
-        * the free callback we queued.
+        * Ensure any percpu-ref atomic switch callback has run, it could have
+        * been in progress when the files were being unregistered. Once
+        * that's done, we can safely exit and free the ref and containing
+        * data structure.
         */
-       call_rcu(rcu, __io_file_ref_exit_and_free);
+       rcu_barrier();
+       percpu_ref_exit(&data->refs);
+       kfree(data);
 }
 
 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
@@ -5369,7 +5379,8 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
        for (i = 0; i < nr_tables; i++)
                kfree(data->table[i].files);
        kfree(data->table);
-       call_rcu(&data->rcu, io_file_ref_exit_and_free);
+       INIT_WORK(&data->ref_work, io_file_ref_exit_and_free);
+       queue_work(system_wq, &data->ref_work);
        ctx->file_data = NULL;
        ctx->nr_user_files = 0;
        return 0;
index 426b55d..b8a31c1 100644 (file)
@@ -725,7 +725,6 @@ static void __locks_delete_block(struct file_lock *waiter)
 {
        locks_delete_global_blocked(waiter);
        list_del_init(&waiter->fl_blocked_member);
-       waiter->fl_blocker = NULL;
 }
 
 static void __locks_wake_up_blocks(struct file_lock *blocker)
@@ -740,6 +739,13 @@ static void __locks_wake_up_blocks(struct file_lock *blocker)
                        waiter->fl_lmops->lm_notify(waiter);
                else
                        wake_up(&waiter->fl_wait);
+
+               /*
+                * The setting of fl_blocker to NULL marks the "done"
+                * point in deleting a block. Paired with acquire at the top
+                * of locks_delete_block().
+                */
+               smp_store_release(&waiter->fl_blocker, NULL);
        }
 }
 
@@ -753,11 +759,42 @@ int locks_delete_block(struct file_lock *waiter)
 {
        int status = -ENOENT;
 
+       /*
+        * If fl_blocker is NULL, it won't be set again as this thread "owns"
+        * the lock and is the only one that might try to claim the lock.
+        *
+        * We use acquire/release to manage fl_blocker so that we can
+        * optimize away taking the blocked_lock_lock in many cases.
+        *
+        * The smp_load_acquire guarantees two things:
+        *
+        * 1/ that fl_blocked_requests can be tested locklessly. If something
+        * was recently added to that list it must have been in a locked region
+        * *before* the locked region when fl_blocker was set to NULL.
+        *
+        * 2/ that no other thread is accessing 'waiter', so it is safe to free
+        * it.  __locks_wake_up_blocks is careful not to touch waiter after
+        * fl_blocker is released.
+        *
+        * If a lockless check of fl_blocker shows it to be NULL, we know that
+        * no new locks can be inserted into its fl_blocked_requests list, and
+        * can avoid doing anything further if the list is empty.
+        */
+       if (!smp_load_acquire(&waiter->fl_blocker) &&
+           list_empty(&waiter->fl_blocked_requests))
+               return status;
+
        spin_lock(&blocked_lock_lock);
        if (waiter->fl_blocker)
                status = 0;
        __locks_wake_up_blocks(waiter);
        __locks_delete_block(waiter);
+
+       /*
+        * The setting of fl_blocker to NULL marks the "done" point in deleting
+        * a block. Paired with acquire at the top of this function.
+        */
+       smp_store_release(&waiter->fl_blocker, NULL);
        spin_unlock(&blocked_lock_lock);
        return status;
 }
@@ -1350,7 +1387,8 @@ static int posix_lock_inode_wait(struct inode *inode, struct file_lock *fl)
                error = posix_lock_inode(inode, fl, NULL);
                if (error != FILE_LOCK_DEFERRED)
                        break;
-               error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
+               error = wait_event_interruptible(fl->fl_wait,
+                                       list_empty(&fl->fl_blocked_member));
                if (error)
                        break;
        }
@@ -1435,7 +1473,8 @@ int locks_mandatory_area(struct inode *inode, struct file *filp, loff_t start,
                error = posix_lock_inode(inode, &fl, NULL);
                if (error != FILE_LOCK_DEFERRED)
                        break;
-               error = wait_event_interruptible(fl.fl_wait, !fl.fl_blocker);
+               error = wait_event_interruptible(fl.fl_wait,
+                                       list_empty(&fl.fl_blocked_member));
                if (!error) {
                        /*
                         * If we've been sleeping someone might have
@@ -1638,7 +1677,8 @@ restart:
 
        locks_dispose_list(&dispose);
        error = wait_event_interruptible_timeout(new_fl->fl_wait,
-                                               !new_fl->fl_blocker, break_time);
+                                       list_empty(&new_fl->fl_blocked_member),
+                                       break_time);
 
        percpu_down_read(&file_rwsem);
        spin_lock(&ctx->flc_lock);
@@ -2122,7 +2162,8 @@ static int flock_lock_inode_wait(struct inode *inode, struct file_lock *fl)
                error = flock_lock_inode(inode, fl);
                if (error != FILE_LOCK_DEFERRED)
                        break;
-               error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
+               error = wait_event_interruptible(fl->fl_wait,
+                               list_empty(&fl->fl_blocked_member));
                if (error)
                        break;
        }
@@ -2399,7 +2440,8 @@ static int do_lock_file_wait(struct file *filp, unsigned int cmd,
                error = vfs_lock_file(filp, cmd, fl, NULL);
                if (error != FILE_LOCK_DEFERRED)
                        break;
-               error = wait_event_interruptible(fl->fl_wait, !fl->fl_blocker);
+               error = wait_event_interruptible(fl->fl_wait,
+                                       list_empty(&fl->fl_blocked_member));
                if (error)
                        break;
        }
index 989c30c..f1ff307 100644 (file)
@@ -153,6 +153,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
        if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
                goto error_0;
 
+       clp->cl_minorversion = cl_init->minorversion;
        clp->cl_nfs_mod = cl_init->nfs_mod;
        if (!try_module_get(clp->cl_nfs_mod->owner))
                goto error_dealloc;
index e1b9384..e113fcb 100644 (file)
@@ -832,6 +832,8 @@ static int nfs_parse_source(struct fs_context *fc,
        if (len > maxnamlen)
                goto out_hostname;
 
+       kfree(ctx->nfs_server.hostname);
+
        /* N.B. caller will free nfs_server.hostname in all cases */
        ctx->nfs_server.hostname = kmemdup_nul(dev_name, len, GFP_KERNEL);
        if (!ctx->nfs_server.hostname)
@@ -1240,6 +1242,13 @@ static int nfs_fs_context_validate(struct fs_context *fc)
                }
                ctx->nfs_mod = nfs_mod;
        }
+
+       /* Ensure the filesystem context has the correct fs_type */
+       if (fc->fs_type != ctx->nfs_mod->nfs_fs) {
+               module_put(fc->fs_type->owner);
+               __module_get(ctx->nfs_mod->nfs_fs->owner);
+               fc->fs_type = ctx->nfs_mod->nfs_fs;
+       }
        return 0;
 
 out_no_device_name:
index 52270bf..1abf126 100644 (file)
@@ -31,6 +31,7 @@ static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
 struct nfs_server_key {
        struct {
                uint16_t        nfsversion;             /* NFS protocol version */
+               uint32_t        minorversion;           /* NFSv4 minor version */
                uint16_t        family;                 /* address family */
                __be16          port;                   /* IP port */
        } hdr;
@@ -55,6 +56,7 @@ void nfs_fscache_get_client_cookie(struct nfs_client *clp)
 
        memset(&key, 0, sizeof(key));
        key.hdr.nfsversion = clp->rpc_ops->version;
+       key.hdr.minorversion = clp->cl_minorversion;
        key.hdr.family = clp->cl_addr.ss_family;
 
        switch (clp->cl_addr.ss_family) {
index ad60774..f3ece8e 100644 (file)
@@ -153,7 +153,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
        /* Open a new filesystem context, transferring parameters from the
         * parent superblock, including the network namespace.
         */
-       fc = fs_context_for_submount(&nfs_fs_type, path->dentry);
+       fc = fs_context_for_submount(path->mnt->mnt_sb->s_type, path->dentry);
        if (IS_ERR(fc))
                return ERR_CAST(fc);
 
index 0cd767e..0bd77cc 100644 (file)
@@ -216,7 +216,6 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
        INIT_LIST_HEAD(&clp->cl_ds_clients);
        rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
        clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
-       clp->cl_minorversion = cl_init->minorversion;
        clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
        clp->cl_mig_gen = 1;
 #if IS_ENABLED(CONFIG_NFS_V4_1)
index 0788b37..b69d6ee 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -860,9 +860,6 @@ cleanup_file:
  * the return value of d_splice_alias(), then the caller needs to perform dput()
  * on it after finish_open().
  *
- * On successful return @file is a fully instantiated open file.  After this, if
- * an error occurs in ->atomic_open(), it needs to clean up with fput().
- *
  * Returns zero on success or -errno if the open failed.
  */
 int finish_open(struct file *file, struct dentry *dentry,
index 444e2da..714c14c 100644 (file)
@@ -93,6 +93,7 @@ config OVERLAY_FS_XINO_AUTO
        bool "Overlayfs: auto enable inode number mapping"
        default n
        depends on OVERLAY_FS
+       depends on 64BIT
        help
          If this config option is enabled then overlay filesystems will use
          unused high bits in undelying filesystem inode numbers to map all
index a531721..87c362f 100644 (file)
@@ -244,6 +244,9 @@ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
        if (iocb->ki_flags & IOCB_WRITE) {
                struct inode *inode = file_inode(orig_iocb->ki_filp);
 
+               /* Actually acquired in ovl_write_iter() */
+               __sb_writers_acquired(file_inode(iocb->ki_filp)->i_sb,
+                                     SB_FREEZE_WRITE);
                file_end_write(iocb->ki_filp);
                ovl_copyattr(ovl_inode_real(inode), inode);
        }
@@ -346,6 +349,9 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
                        goto out;
 
                file_start_write(real.file);
+               /* Pacify lockdep, same trick as done in aio_write() */
+               __sb_writers_release(file_inode(real.file)->i_sb,
+                                    SB_FREEZE_WRITE);
                aio_req->fd = real;
                real.flags = 0;
                aio_req->orig_iocb = iocb;
index 3623d28..3d3f2b8 100644 (file)
@@ -318,7 +318,12 @@ static inline unsigned int ovl_xino_bits(struct super_block *sb)
        return ovl_same_dev(sb) ? OVL_FS(sb)->xino_mode : 0;
 }
 
-static inline int ovl_inode_lock(struct inode *inode)
+static inline void ovl_inode_lock(struct inode *inode)
+{
+       mutex_lock(&OVL_I(inode)->lock);
+}
+
+static inline int ovl_inode_lock_interruptible(struct inode *inode)
 {
        return mutex_lock_interruptible(&OVL_I(inode)->lock);
 }
index 319fe0d..ac967f1 100644 (file)
@@ -1411,6 +1411,8 @@ static int ovl_get_layers(struct super_block *sb, struct ovl_fs *ofs,
                if (ofs->config.xino == OVL_XINO_ON)
                        pr_info("\"xino=on\" is useless with all layers on same fs, ignore.\n");
                ofs->xino_mode = 0;
+       } else if (ofs->config.xino == OVL_XINO_OFF) {
+               ofs->xino_mode = -1;
        } else if (ofs->config.xino == OVL_XINO_ON && ofs->xino_mode < 0) {
                /*
                 * This is a roundup of number of bits needed for encoding
@@ -1623,8 +1625,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        sb->s_stack_depth = 0;
        sb->s_maxbytes = MAX_LFS_FILESIZE;
        /* Assume underlaying fs uses 32bit inodes unless proven otherwise */
-       if (ofs->config.xino != OVL_XINO_OFF)
+       if (ofs->config.xino != OVL_XINO_OFF) {
                ofs->xino_mode = BITS_PER_LONG - 32;
+               if (!ofs->xino_mode) {
+                       pr_warn("xino not supported on 32bit kernel, falling back to xino=off.\n");
+                       ofs->config.xino = OVL_XINO_OFF;
+               }
+       }
 
        /* alloc/destroy_inode needed for setting up traps in inode cache */
        sb->s_op = &ovl_super_operations;
index ea00508..042f7eb 100644 (file)
@@ -509,7 +509,7 @@ int ovl_copy_up_start(struct dentry *dentry, int flags)
        struct inode *inode = d_inode(dentry);
        int err;
 
-       err = ovl_inode_lock(inode);
+       err = ovl_inode_lock_interruptible(inode);
        if (!err && ovl_already_copied_up_locked(dentry, flags)) {
                err = 1; /* Already copied up */
                ovl_inode_unlock(inode);
@@ -764,7 +764,7 @@ int ovl_nlink_start(struct dentry *dentry)
                        return err;
        }
 
-       err = ovl_inode_lock(inode);
+       err = ovl_inode_lock_interruptible(inode);
        if (err)
                return err;
 
index 4e6dc84..9ecb3c1 100644 (file)
@@ -33,7 +33,8 @@ bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
                             const u8 secret[CURVE25519_KEY_SIZE],
                             const u8 basepoint[CURVE25519_KEY_SIZE])
 {
-       if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
+       if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) &&
+           (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX)))
                curve25519_arch(mypublic, secret, basepoint);
        else
                curve25519_generic(mypublic, secret, basepoint);
@@ -49,7 +50,8 @@ __must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
                                    CURVE25519_KEY_SIZE)))
                return false;
 
-       if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519))
+       if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) &&
+           (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX)))
                curve25519_base_arch(pub, secret);
        else
                curve25519_generic(pub, secret, curve25519_base_point);
index bcb39da..41725d8 100644 (file)
@@ -81,7 +81,7 @@ struct drm_dp_vcpi {
  * &drm_dp_mst_topology_mgr.base.lock.
  * @num_sdp_stream_sinks: Number of stream sinks. Protected by
  * &drm_dp_mst_topology_mgr.base.lock.
- * @available_pbn: Available bandwidth for this port. Protected by
+ * @full_pbn: Max possible bandwidth for this port. Protected by
  * &drm_dp_mst_topology_mgr.base.lock.
  * @next: link to next port on this branch device
  * @aux: i2c aux transport to talk to device connected to this port, protected
@@ -126,7 +126,7 @@ struct drm_dp_mst_port {
        u8 dpcd_rev;
        u8 num_sdp_streams;
        u8 num_sdp_stream_sinks;
-       uint16_t available_pbn;
+       uint16_t full_pbn;
        struct list_head next;
        /**
         * @mstb: the branch device connected to this port, if there is one.
index 0f2b842..65ac6eb 100644 (file)
 #define IMX8MN_CLK_I2C1                                105
 #define IMX8MN_CLK_I2C2                                106
 #define IMX8MN_CLK_I2C3                                107
-#define IMX8MN_CLK_I2C4                                118
-#define IMX8MN_CLK_UART1                       119
+#define IMX8MN_CLK_I2C4                                108
+#define IMX8MN_CLK_UART1                       109
 #define IMX8MN_CLK_UART2                       110
 #define IMX8MN_CLK_UART3                       111
 #define IMX8MN_CLK_UART4                       112
index d7ddebd..e75d219 100644 (file)
@@ -62,6 +62,7 @@ struct css_task_iter {
        struct list_head                *mg_tasks_head;
        struct list_head                *dying_tasks_head;
 
+       struct list_head                *cur_tasks_head;
        struct css_set                  *cur_cset;
        struct css_set                  *cur_dcset;
        struct task_struct              *cur_task;
index f64ca27..d7bf029 100644 (file)
@@ -69,19 +69,23 @@ struct dmar_pci_notify_info {
 extern struct rw_semaphore dmar_global_lock;
 extern struct list_head dmar_drhd_units;
 
-#define for_each_drhd_unit(drhd) \
-       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
+#define for_each_drhd_unit(drhd)                                       \
+       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
+                               dmar_rcu_check())
 
 #define for_each_active_drhd_unit(drhd)                                        \
-       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)           \
+       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
+                               dmar_rcu_check())                       \
                if (drhd->ignored) {} else
 
 #define for_each_active_iommu(i, drhd)                                 \
-       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)           \
+       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
+                               dmar_rcu_check())                       \
                if (i=drhd->iommu, drhd->ignored) {} else
 
 #define for_each_iommu(i, drhd)                                                \
-       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)           \
+       list_for_each_entry_rcu(drhd, &dmar_drhd_units, list,           \
+                               dmar_rcu_check())                       \
                if (i=drhd->iommu, 0) {} else 
 
 static inline bool dmar_rcu_check(void)
index c6c7b24..142d102 100644 (file)
@@ -85,6 +85,7 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
 extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
 extern void set_close_on_exec(unsigned int fd, int flag);
 extern bool get_close_on_exec(unsigned int fd);
+extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
 extern int get_unused_fd_flags(unsigned flags);
 extern void put_unused_fd(unsigned int fd);
 
index 3cd4fe6..abedbff 100644 (file)
@@ -698,6 +698,7 @@ struct inode {
                struct rcu_head         i_rcu;
        };
        atomic64_t              i_version;
+       atomic64_t              i_sequence; /* see futex */
        atomic_t                i_count;
        atomic_t                i_dio_count;
        atomic_t                i_writecount;
index 5cc3fed..b70df27 100644 (file)
@@ -31,23 +31,26 @@ struct task_struct;
 
 union futex_key {
        struct {
+               u64 i_seq;
                unsigned long pgoff;
-               struct inode *inode;
-               int offset;
+               unsigned int offset;
        } shared;
        struct {
+               union {
+                       struct mm_struct *mm;
+                       u64 __tmp;
+               };
                unsigned long address;
-               struct mm_struct *mm;
-               int offset;
+               unsigned int offset;
        } private;
        struct {
+               u64 ptr;
                unsigned long word;
-               void *ptr;
-               int offset;
+               unsigned int offset;
        } both;
 };
 
-#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
 
 #ifdef CONFIG_FUTEX
 enum {
index 6fbe585..07dc918 100644 (file)
@@ -245,18 +245,6 @@ static inline bool disk_part_scan_enabled(struct gendisk *disk)
                !(disk->flags & GENHD_FL_NO_PART_SCAN);
 }
 
-static inline bool disk_has_partitions(struct gendisk *disk)
-{
-       bool ret = false;
-
-       rcu_read_lock();
-       if (rcu_dereference(disk->part_tbl)->len > 1)
-               ret = true;
-       rcu_read_unlock();
-
-       return ret;
-}
-
 static inline dev_t disk_devt(struct gendisk *disk)
 {
        return MKDEV(disk->major, disk->first_minor);
@@ -298,6 +286,7 @@ extern void disk_part_iter_exit(struct disk_part_iter *piter);
 
 extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
                                             sector_t sector);
+bool disk_has_partitions(struct gendisk *disk);
 
 /*
  * Macros to operate on percpu disk statistics:
index 39faaaf..c91cf2d 100644 (file)
@@ -2,15 +2,10 @@
 #ifndef _INET_DIAG_H_
 #define _INET_DIAG_H_ 1
 
+#include <net/netlink.h>
 #include <uapi/linux/inet_diag.h>
 
-struct net;
-struct sock;
 struct inet_hashinfo;
-struct nlattr;
-struct nlmsghdr;
-struct sk_buff;
-struct netlink_callback;
 
 struct inet_diag_handler {
        void            (*dump)(struct sk_buff *skb,
@@ -62,6 +57,17 @@ int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
 
 void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
 
+static inline size_t inet_diag_msg_attrs_size(void)
+{
+       return    nla_total_size(1)  /* INET_DIAG_SHUTDOWN */
+               + nla_total_size(1)  /* INET_DIAG_TOS */
+#if IS_ENABLED(CONFIG_IPV6)
+               + nla_total_size(1)  /* INET_DIAG_TCLASS */
+               + nla_total_size(1)  /* INET_DIAG_SKV6ONLY */
+#endif
+               + nla_total_size(4)  /* INET_DIAG_MARK */
+               + nla_total_size(4); /* INET_DIAG_CLASS_ID */
+}
 int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
                             struct inet_diag_msg *r, int ext,
                             struct user_namespace *user_ns, bool net_admin);
index 4a16b39..980234a 100644 (file)
 
 #define dmar_readq(a) readq(a)
 #define dmar_writeq(a,v) writeq(v,a)
+#define dmar_readl(a) readl(a)
+#define dmar_writel(a, v) writel(v, a)
 
 #define DMAR_VER_MAJOR(v)              (((v) & 0xf0) >> 4)
 #define DMAR_VER_MINOR(v)              ((v) & 0x0f)
index ba70338..4c5eb3a 100644 (file)
@@ -333,6 +333,7 @@ struct mmc_host {
                                 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \
                                 MMC_CAP_UHS_DDR50)
 #define MMC_CAP_SYNC_RUNTIME_PM        (1 << 21)       /* Synced runtime PM suspends. */
+#define MMC_CAP_NEED_RSP_BUSY  (1 << 22)       /* Commands with R1B can't use R1. */
 #define MMC_CAP_DRIVER_TYPE_A  (1 << 23)       /* Host supports Driver Type A */
 #define MMC_CAP_DRIVER_TYPE_C  (1 << 24)       /* Host supports Driver Type C */
 #define MMC_CAP_DRIVER_TYPE_D  (1 << 25)       /* Host supports Driver Type D */
index c86fcad..31b73a0 100644 (file)
@@ -11,17 +11,17 @@ struct of_device_id;
 
 #if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF)
 
-unsigned int of_clk_get_parent_count(struct device_node *np);
-const char *of_clk_get_parent_name(struct device_node *np, int index);
+unsigned int of_clk_get_parent_count(const struct device_node *np);
+const char *of_clk_get_parent_name(const struct device_node *np, int index);
 void of_clk_init(const struct of_device_id *matches);
 
 #else /* !CONFIG_COMMON_CLK || !CONFIG_OF */
 
-static inline unsigned int of_clk_get_parent_count(struct device_node *np)
+static inline unsigned int of_clk_get_parent_count(const struct device_node *np)
 {
        return 0;
 }
-static inline const char *of_clk_get_parent_name(struct device_node *np,
+static inline const char *of_clk_get_parent_name(const struct device_node *np,
                                                 int index)
 {
        return NULL;
index 1bf83c8..77de28b 100644 (file)
@@ -311,7 +311,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
 
 __PAGEFLAG(Locked, locked, PF_NO_TAIL)
 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
-PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
+PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
 PAGEFLAG(Referenced, referenced, PF_HEAD)
        TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
        __SETPAGEFLAG(Referenced, referenced, PF_HEAD)
index c570e16..452e8ba 100644 (file)
@@ -357,6 +357,7 @@ struct macsec_ops;
  * is_gigabit_capable: Set to true if PHY supports 1000Mbps
  * has_fixups: Set to true if this phy has fixups/quirks.
  * suspended: Set to true if this phy has been suspended successfully.
+ * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
  * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
  * loopback_enabled: Set true if this phy has been loopbacked successfully.
  * state: state of the PHY for management purposes
@@ -396,6 +397,7 @@ struct phy_device {
        unsigned is_gigabit_capable:1;
        unsigned has_fixups:1;
        unsigned suspended:1;
+       unsigned suspended_by_mdio_bus:1;
        unsigned sysfs_links:1;
        unsigned loopback_enabled:1;
 
@@ -557,6 +559,7 @@ struct phy_driver {
        /*
         * Checks if the PHY generated an interrupt.
         * For multi-PHY devices with shared PHY interrupt pin
+        * Set interrupt bits have to be cleared.
         */
        int (*did_interrupt)(struct phy_device *phydev);
 
index 276a03c..041bfa4 100644 (file)
@@ -24,7 +24,7 @@ struct platform_device {
        int             id;
        bool            id_auto;
        struct device   dev;
-       u64             dma_mask;
+       u64             platform_dma_mask;
        u32             num_resources;
        struct resource *resource;
 
index 19eafca..4a69d4a 100644 (file)
@@ -1,22 +1,20 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PM_QOS_H
-#define _LINUX_PM_QOS_H
-/* interface for the pm_qos_power infrastructure of the linux kernel.
+/*
+ * Definitions related to Power Management Quality of Service (PM QoS).
  *
- * Mark Gross <mgross@linux.intel.com>
+ * Copyright (C) 2020 Intel Corporation
+ *
+ * Authors:
+ *     Mark Gross <mgross@linux.intel.com>
+ *     Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  */
+
+#ifndef _LINUX_PM_QOS_H
+#define _LINUX_PM_QOS_H
+
 #include <linux/plist.h>
 #include <linux/notifier.h>
 #include <linux/device.h>
-#include <linux/workqueue.h>
-
-enum {
-       PM_QOS_RESERVED = 0,
-       PM_QOS_CPU_DMA_LATENCY,
-
-       /* insert new class ID */
-       PM_QOS_NUM_CLASSES,
-};
 
 enum pm_qos_flags_status {
        PM_QOS_FLAGS_UNDEFINED = -1,
@@ -29,7 +27,7 @@ enum pm_qos_flags_status {
 #define PM_QOS_LATENCY_ANY     S32_MAX
 #define PM_QOS_LATENCY_ANY_NS  ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC)
 
-#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE       (2000 * USEC_PER_SEC)
+#define PM_QOS_CPU_LATENCY_DEFAULT_VALUE       (2000 * USEC_PER_SEC)
 #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE    PM_QOS_LATENCY_ANY
 #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT    PM_QOS_LATENCY_ANY
 #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
@@ -40,22 +38,10 @@ enum pm_qos_flags_status {
 
 #define PM_QOS_FLAG_NO_POWER_OFF       (1 << 0)
 
-struct pm_qos_request {
-       struct plist_node node;
-       int pm_qos_class;
-       struct delayed_work work; /* for pm_qos_update_request_timeout */
-};
-
-struct pm_qos_flags_request {
-       struct list_head node;
-       s32 flags;      /* Do not change to 64 bit */
-};
-
 enum pm_qos_type {
        PM_QOS_UNITIALIZED,
        PM_QOS_MAX,             /* return the largest value */
        PM_QOS_MIN,             /* return the smallest value */
-       PM_QOS_SUM              /* return the sum */
 };
 
 /*
@@ -72,6 +58,16 @@ struct pm_qos_constraints {
        struct blocking_notifier_head *notifiers;
 };
 
+struct pm_qos_request {
+       struct plist_node node;
+       struct pm_qos_constraints *qos;
+};
+
+struct pm_qos_flags_request {
+       struct list_head node;
+       s32 flags;      /* Do not change to 64 bit */
+};
+
 struct pm_qos_flags {
        struct list_head list;
        s32 effective_flags;    /* Do not change to 64 bit */
@@ -140,24 +136,31 @@ static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
        return req->dev != NULL;
 }
 
+s32 pm_qos_read_value(struct pm_qos_constraints *c);
 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
                         enum pm_qos_req_action action, int value);
 bool pm_qos_update_flags(struct pm_qos_flags *pqf,
                         struct pm_qos_flags_request *req,
                         enum pm_qos_req_action action, s32 val);
-void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
-                       s32 value);
-void pm_qos_update_request(struct pm_qos_request *req,
-                          s32 new_value);
-void pm_qos_update_request_timeout(struct pm_qos_request *req,
-                                  s32 new_value, unsigned long timeout_us);
-void pm_qos_remove_request(struct pm_qos_request *req);
-
-int pm_qos_request(int pm_qos_class);
-int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
-int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
-int pm_qos_request_active(struct pm_qos_request *req);
-s32 pm_qos_read_value(struct pm_qos_constraints *c);
+
+#ifdef CONFIG_CPU_IDLE
+s32 cpu_latency_qos_limit(void);
+bool cpu_latency_qos_request_active(struct pm_qos_request *req);
+void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value);
+void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value);
+void cpu_latency_qos_remove_request(struct pm_qos_request *req);
+#else
+static inline s32 cpu_latency_qos_limit(void) { return INT_MAX; }
+static inline bool cpu_latency_qos_request_active(struct pm_qos_request *req)
+{
+       return false;
+}
+static inline void cpu_latency_qos_add_request(struct pm_qos_request *req,
+                                              s32 value) {}
+static inline void cpu_latency_qos_update_request(struct pm_qos_request *req,
+                                                 s32 new_value) {}
+static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) {}
+#endif
 
 #ifdef CONFIG_PM
 enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
index beb9a9d..70ebef8 100644 (file)
@@ -972,9 +972,9 @@ static inline int rhashtable_lookup_insert_key(
 /**
  * rhashtable_lookup_get_insert_key - lookup and insert object into hash table
  * @ht:                hash table
+ * @key:       key
  * @obj:       pointer to hash head inside object
  * @params:    hash table parameters
- * @data:      pointer to element data already in hashes
  *
  * Just like rhashtable_lookup_insert_key(), but this function returns the
  * object if it exists, NULL if it does not and the insertion was successful,
index 2d23134..15f3412 100644 (file)
@@ -401,7 +401,8 @@ extern int __sys_sendto(int fd, void __user *buff, size_t len,
                        int addr_len);
 extern int __sys_accept4_file(struct file *file, unsigned file_flags,
                        struct sockaddr __user *upeer_sockaddr,
-                        int __user *upeer_addrlen, int flags);
+                        int __user *upeer_addrlen, int flags,
+                        unsigned long nofile);
 extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
                         int __user *upeer_addrlen, int flags);
 extern int __sys_socket(int family, int type, int protocol);
index ec38132..0507a16 100644 (file)
@@ -141,8 +141,9 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
 
 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
                                                        unsigned long pgoff);
-void vmalloc_sync_all(void);
+void vmalloc_sync_mappings(void);
+void vmalloc_sync_unmappings(void);
+
 /*
  *     Lowlevel-APIs (not for driver use!)
  */
index 4261d1c..e48554e 100644 (file)
@@ -487,6 +487,19 @@ extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
  *
  * We queue the work to the CPU on which it was submitted, but if the CPU dies
  * it can be processed by another CPU.
+ *
+ * Memory-ordering properties:  If it returns %true, guarantees that all stores
+ * preceding the call to queue_work() in the program order will be visible from
+ * the CPU which will execute @work by the time such work executes, e.g.,
+ *
+ * { x is initially 0 }
+ *
+ *   CPU0                              CPU1
+ *
+ *   WRITE_ONCE(x, 1);                 [ @work is being executed ]
+ *   r0 = queue_work(wq, work);                  r1 = READ_ONCE(x);
+ *
+ * Forbids: r0 == true && r1 == 0
  */
 static inline bool queue_work(struct workqueue_struct *wq,
                              struct work_struct *work)
@@ -546,6 +559,9 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
  * This puts a job in the kernel-global workqueue if it was not already
  * queued and leaves it in the same position on the kernel-global
  * workqueue otherwise.
+ *
+ * Shares the same memory-ordering properties of queue_work(), cf. the
+ * DocBook header of queue_work().
  */
 static inline bool schedule_work(struct work_struct *work)
 {
index 54e227e..a259050 100644 (file)
@@ -108,6 +108,7 @@ struct fib_rule_notifier_info {
        [FRA_OIFNAME]   = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
        [FRA_PRIORITY]  = { .type = NLA_U32 }, \
        [FRA_FWMARK]    = { .type = NLA_U32 }, \
+       [FRA_TUN_ID]    = { .type = NLA_U64 }, \
        [FRA_FWMASK]    = { .type = NLA_U32 }, \
        [FRA_TABLE]     = { .type = NLA_U32 }, \
        [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
index 0a50d53..7c08437 100644 (file)
@@ -74,7 +74,7 @@
 #define DEV_MAC_TAGS_CFG_TAG_ID_M                         GENMASK(31, 16)
 #define DEV_MAC_TAGS_CFG_TAG_ID_X(x)                      (((x) & GENMASK(31, 16)) >> 16)
 #define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA                 BIT(2)
-#define DEV_MAC_TAGS_CFG_PB_ENA                           BIT(1)
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA                 BIT(1)
 #define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA                     BIT(0)
 
 #define DEV_MAC_ADV_CHK_CFG                               0x2c
index 7457e23..af5018a 100644 (file)
@@ -359,75 +359,50 @@ DEFINE_EVENT(power_domain, power_domain_target,
 );
 
 /*
- * The pm qos events are used for pm qos update
+ * CPU latency QoS events used for global CPU latency QoS list updates
  */
-DECLARE_EVENT_CLASS(pm_qos_request,
+DECLARE_EVENT_CLASS(cpu_latency_qos_request,
 
-       TP_PROTO(int pm_qos_class, s32 value),
+       TP_PROTO(s32 value),
 
-       TP_ARGS(pm_qos_class, value),
+       TP_ARGS(value),
 
        TP_STRUCT__entry(
-               __field( int,                    pm_qos_class   )
                __field( s32,                    value          )
        ),
 
        TP_fast_assign(
-               __entry->pm_qos_class = pm_qos_class;
                __entry->value = value;
        ),
 
-       TP_printk("pm_qos_class=%s value=%d",
-                 __print_symbolic(__entry->pm_qos_class,
-                       { PM_QOS_CPU_DMA_LATENCY,       "CPU_DMA_LATENCY" }),
+       TP_printk("CPU_DMA_LATENCY value=%d",
                  __entry->value)
 );
 
-DEFINE_EVENT(pm_qos_request, pm_qos_add_request,
+DEFINE_EVENT(cpu_latency_qos_request, pm_qos_add_request,
 
-       TP_PROTO(int pm_qos_class, s32 value),
+       TP_PROTO(s32 value),
 
-       TP_ARGS(pm_qos_class, value)
+       TP_ARGS(value)
 );
 
-DEFINE_EVENT(pm_qos_request, pm_qos_update_request,
+DEFINE_EVENT(cpu_latency_qos_request, pm_qos_update_request,
 
-       TP_PROTO(int pm_qos_class, s32 value),
+       TP_PROTO(s32 value),
 
-       TP_ARGS(pm_qos_class, value)
+       TP_ARGS(value)
 );
 
-DEFINE_EVENT(pm_qos_request, pm_qos_remove_request,
+DEFINE_EVENT(cpu_latency_qos_request, pm_qos_remove_request,
 
-       TP_PROTO(int pm_qos_class, s32 value),
+       TP_PROTO(s32 value),
 
-       TP_ARGS(pm_qos_class, value)
-);
-
-TRACE_EVENT(pm_qos_update_request_timeout,
-
-       TP_PROTO(int pm_qos_class, s32 value, unsigned long timeout_us),
-
-       TP_ARGS(pm_qos_class, value, timeout_us),
-
-       TP_STRUCT__entry(
-               __field( int,                    pm_qos_class   )
-               __field( s32,                    value          )
-               __field( unsigned long,          timeout_us     )
-       ),
-
-       TP_fast_assign(
-               __entry->pm_qos_class = pm_qos_class;
-               __entry->value = value;
-               __entry->timeout_us = timeout_us;
-       ),
-
-       TP_printk("pm_qos_class=%s value=%d, timeout_us=%ld",
-                 __print_symbolic(__entry->pm_qos_class,
-                       { PM_QOS_CPU_DMA_LATENCY,       "CPU_DMA_LATENCY" }),
-                 __entry->value, __entry->timeout_us)
+       TP_ARGS(value)
 );
 
+/*
+ * General PM QoS events used for updates of PM QoS request lists
+ */
 DECLARE_EVENT_CLASS(pm_qos_update,
 
        TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
index 1521073..8533bf0 100644 (file)
@@ -74,6 +74,8 @@ enum {
 #define IPPROTO_UDPLITE                IPPROTO_UDPLITE
   IPPROTO_MPLS = 137,          /* MPLS in IP (RFC 4023)                */
 #define IPPROTO_MPLS           IPPROTO_MPLS
+  IPPROTO_ETHERNET = 143,      /* Ethernet-within-IPv6 Encapsulation   */
+#define IPPROTO_ETHERNET       IPPROTO_ETHERNET
   IPPROTO_RAW = 255,           /* Raw IP packets                       */
 #define IPPROTO_RAW            IPPROTO_RAW
   IPPROTO_MPTCP = 262,         /* Multipath TCP connection             */
index 20a6ac3..4f717bf 100644 (file)
@@ -767,8 +767,7 @@ config ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
        bool
 
 config CC_HAS_INT128
-       def_bool y
-       depends on !$(cc-option,-D__SIZEOF_INT128__=0)
+       def_bool !$(cc-option,$(m64-flag) -D__SIZEOF_INT128__=0) && 64BIT
 
 #
 # For architectures that know their GCC __int128 support is sound
index be1a1c8..f2d7cea 100644 (file)
@@ -471,6 +471,7 @@ static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
         */
        p++;
        if (p >= end) {
+               (*pos)++;
                return NULL;
        } else {
                *pos = *p;
@@ -782,7 +783,7 @@ void cgroup1_release_agent(struct work_struct *work)
 
        pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
        agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
-       if (!pathbuf || !agentbuf)
+       if (!pathbuf || !agentbuf || !strlen(agentbuf))
                goto out;
 
        spin_lock_irq(&css_set_lock);
index 75f6873..3dead04 100644 (file)
@@ -3542,21 +3542,21 @@ static int cpu_stat_show(struct seq_file *seq, void *v)
 static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
-       struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
+       struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
 
        return psi_show(seq, psi, PSI_IO);
 }
 static int cgroup_memory_pressure_show(struct seq_file *seq, void *v)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
-       struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
+       struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
 
        return psi_show(seq, psi, PSI_MEM);
 }
 static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
-       struct psi_group *psi = cgroup_id(cgrp) == 1 ? &psi_system : &cgrp->psi;
+       struct psi_group *psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
 
        return psi_show(seq, psi, PSI_CPU);
 }
@@ -4400,12 +4400,16 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
                }
        } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
 
-       if (!list_empty(&cset->tasks))
+       if (!list_empty(&cset->tasks)) {
                it->task_pos = cset->tasks.next;
-       else if (!list_empty(&cset->mg_tasks))
+               it->cur_tasks_head = &cset->tasks;
+       } else if (!list_empty(&cset->mg_tasks)) {
                it->task_pos = cset->mg_tasks.next;
-       else
+               it->cur_tasks_head = &cset->mg_tasks;
+       } else {
                it->task_pos = cset->dying_tasks.next;
+               it->cur_tasks_head = &cset->dying_tasks;
+       }
 
        it->tasks_head = &cset->tasks;
        it->mg_tasks_head = &cset->mg_tasks;
@@ -4463,10 +4467,14 @@ repeat:
                else
                        it->task_pos = it->task_pos->next;
 
-               if (it->task_pos == it->tasks_head)
+               if (it->task_pos == it->tasks_head) {
                        it->task_pos = it->mg_tasks_head->next;
-               if (it->task_pos == it->mg_tasks_head)
+                       it->cur_tasks_head = it->mg_tasks_head;
+               }
+               if (it->task_pos == it->mg_tasks_head) {
                        it->task_pos = it->dying_tasks_head->next;
+                       it->cur_tasks_head = it->dying_tasks_head;
+               }
                if (it->task_pos == it->dying_tasks_head)
                        css_task_iter_advance_css_set(it);
        } else {
@@ -4485,11 +4493,12 @@ repeat:
                        goto repeat;
 
                /* and dying leaders w/o live member threads */
-               if (!atomic_read(&task->signal->live))
+               if (it->cur_tasks_head == it->dying_tasks_head &&
+                   !atomic_read(&task->signal->live))
                        goto repeat;
        } else {
                /* skip all dying ones */
-               if (task->flags & PF_EXITING)
+               if (it->cur_tasks_head == it->dying_tasks_head)
                        goto repeat;
        }
 }
@@ -4595,6 +4604,9 @@ static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
        struct kernfs_open_file *of = s->private;
        struct css_task_iter *it = of->priv;
 
+       if (pos)
+               (*pos)++;
+
        return css_task_iter_next(it);
 }
 
@@ -4610,7 +4622,7 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
         * from position 0, so we can simply keep iterating on !0 *pos.
         */
        if (!it) {
-               if (WARN_ON_ONCE((*pos)++))
+               if (WARN_ON_ONCE((*pos)))
                        return ERR_PTR(-EINVAL);
 
                it = kzalloc(sizeof(*it), GFP_KERNEL);
@@ -4618,10 +4630,11 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
                        return ERR_PTR(-ENOMEM);
                of->priv = it;
                css_task_iter_start(&cgrp->self, iter_flags, it);
-       } else if (!(*pos)++) {
+       } else if (!(*pos)) {
                css_task_iter_end(it);
                css_task_iter_start(&cgrp->self, iter_flags, it);
-       }
+       } else
+               return it->cur_task;
 
        return cgroup_procs_next(s, NULL, NULL);
 }
@@ -6258,6 +6271,10 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
                return;
        }
 
+       /* Don't associate the sock with unrelated interrupted task's cgroup. */
+       if (in_interrupt())
+               return;
+
        rcu_read_lock();
 
        while (true) {
index 0cf84c8..82dfacb 100644 (file)
@@ -385,9 +385,9 @@ static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
  */
 static struct futex_hash_bucket *hash_futex(union futex_key *key)
 {
-       u32 hash = jhash2((u32*)&key->both.word,
-                         (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
+       u32 hash = jhash2((u32 *)key, offsetof(typeof(*key), both.offset) / 4,
                          key->both.offset);
+
        return &futex_queues[hash & (futex_hashsize - 1)];
 }
 
@@ -429,7 +429,7 @@ static void get_futex_key_refs(union futex_key *key)
 
        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
        case FUT_OFF_INODE:
-               ihold(key->shared.inode); /* implies smp_mb(); (B) */
+               smp_mb();               /* explicit smp_mb(); (B) */
                break;
        case FUT_OFF_MMSHARED:
                futex_get_mm(key); /* implies smp_mb(); (B) */
@@ -463,7 +463,6 @@ static void drop_futex_key_refs(union futex_key *key)
 
        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
        case FUT_OFF_INODE:
-               iput(key->shared.inode);
                break;
        case FUT_OFF_MMSHARED:
                mmdrop(key->private.mm);
@@ -505,6 +504,46 @@ futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
        return timeout;
 }
 
+/*
+ * Generate a machine wide unique identifier for this inode.
+ *
+ * This relies on u64 not wrapping in the life-time of the machine; which with
+ * 1ns resolution means almost 585 years.
+ *
+ * This further relies on the fact that a well formed program will not unmap
+ * the file while it has a (shared) futex waiting on it. This mapping will have
+ * a file reference which pins the mount and inode.
+ *
+ * If for some reason an inode gets evicted and read back in again, it will get
+ * a new sequence number and will _NOT_ match, even though it is the exact same
+ * file.
+ *
+ * It is important that match_futex() will never have a false-positive, esp.
+ * for PI futexes that can mess up the state. The above argues that false-negatives
+ * are only possible for malformed programs.
+ */
+static u64 get_inode_sequence_number(struct inode *inode)
+{
+       static atomic64_t i_seq;
+       u64 old;
+
+       /* Does the inode already have a sequence number? */
+       old = atomic64_read(&inode->i_sequence);
+       if (likely(old))
+               return old;
+
+       for (;;) {
+               u64 new = atomic64_add_return(1, &i_seq);
+               if (WARN_ON_ONCE(!new))
+                       continue;
+
+               old = atomic64_cmpxchg_relaxed(&inode->i_sequence, 0, new);
+               if (old)
+                       return old;
+               return new;
+       }
+}
+
 /**
  * get_futex_key() - Get parameters which are the keys for a futex
  * @uaddr:     virtual address of the futex
@@ -517,9 +556,15 @@ futex_setup_timer(ktime_t *time, struct hrtimer_sleeper *timeout,
  *
  * The key words are stored in @key on success.
  *
- * For shared mappings, it's (page->index, file_inode(vma->vm_file),
- * offset_within_page).  For private mappings, it's (uaddr, current->mm).
- * We can usually work out the index without swapping in the page.
+ * For shared mappings (when @fshared), the key is:
+ *   ( inode->i_sequence, page->index, offset_within_page )
+ * [ also see get_inode_sequence_number() ]
+ *
+ * For private mappings (or when !@fshared), the key is:
+ *   ( current->mm, address, 0 )
+ *
+ * This allows (cross process, where applicable) identification of the futex
+ * without keeping the page pinned for the duration of the FUTEX_WAIT.
  *
  * lock_page() might sleep, the caller should not hold a spinlock.
  */
@@ -659,8 +704,6 @@ again:
                key->private.mm = mm;
                key->private.address = address;
 
-               get_futex_key_refs(key); /* implies smp_mb(); (B) */
-
        } else {
                struct inode *inode;
 
@@ -692,40 +735,14 @@ again:
                        goto again;
                }
 
-               /*
-                * Take a reference unless it is about to be freed. Previously
-                * this reference was taken by ihold under the page lock
-                * pinning the inode in place so i_lock was unnecessary. The
-                * only way for this check to fail is if the inode was
-                * truncated in parallel which is almost certainly an
-                * application bug. In such a case, just retry.
-                *
-                * We are not calling into get_futex_key_refs() in file-backed
-                * cases, therefore a successful atomic_inc return below will
-                * guarantee that get_futex_key() will still imply smp_mb(); (B).
-                */
-               if (!atomic_inc_not_zero(&inode->i_count)) {
-                       rcu_read_unlock();
-                       put_page(page);
-
-                       goto again;
-               }
-
-               /* Should be impossible but lets be paranoid for now */
-               if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
-                       err = -EFAULT;
-                       rcu_read_unlock();
-                       iput(inode);
-
-                       goto out;
-               }
-
                key->both.offset |= FUT_OFF_INODE; /* inode-based key */
-               key->shared.inode = inode;
+               key->shared.i_seq = get_inode_sequence_number(inode);
                key->shared.pgoff = basepage_index(tail);
                rcu_read_unlock();
        }
 
+       get_futex_key_refs(key); /* implies smp_mb(); (B) */
+
 out:
        put_page(page);
        return err;
index 63d7501..5989bbb 100644 (file)
@@ -519,7 +519,7 @@ NOKPROBE_SYMBOL(notify_die);
 
 int register_die_notifier(struct notifier_block *nb)
 {
-       vmalloc_sync_all();
+       vmalloc_sync_mappings();
        return atomic_notifier_chain_register(&die_chain, nb);
 }
 EXPORT_SYMBOL_GPL(register_die_notifier);
index 0f4ecb5..647b4bb 100644 (file)
@@ -247,6 +247,16 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
                tmp = tmp->parent;
        }
 
+       /*
+        * ENOMEM is not the most obvious choice especially for the case
+        * where the child subreaper has already exited and the pid
+        * namespace denies the creation of any new processes. But ENOMEM
+        * is what we have exposed to userspace for a long time and it is
+        * documented behavior for pid namespaces. So we can't easily
+        * change it even if there were an error code better suited.
+        */
+       retval = -ENOMEM;
+
        if (unlikely(is_child_reaper(pid))) {
                if (pid_ns_prepare_proc(ns))
                        goto out_free;
index 83edf86..db0bed2 100644 (file)
@@ -1,31 +1,21 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * This module exposes the interface to kernel space for specifying
- * QoS dependencies.  It provides infrastructure for registration of:
+ * Power Management Quality of Service (PM QoS) support base.
  *
- * Dependents on a QoS value : register requests
- * Watchers of QoS value : get notified when target QoS value changes
+ * Copyright (C) 2020 Intel Corporation
  *
- * This QoS design is best effort based.  Dependents register their QoS needs.
- * Watchers register to keep track of the current QoS needs of the system.
+ * Authors:
+ *     Mark Gross <mgross@linux.intel.com>
+ *     Rafael J. Wysocki <rafael.j.wysocki@intel.com>
  *
- * There are 3 basic classes of QoS parameter: latency, timeout, throughput
- * each have defined units:
- * latency: usec
- * timeout: usec <-- currently not used.
- * throughput: kbs (kilo byte / sec)
+ * Provided here is an interface for specifying PM QoS dependencies.  It allows
+ * entities depending on QoS constraints to register their requests which are
+ * aggregated as appropriate to produce effective constraints (target values)
+ * that can be monitored by entities needing to respect them, either by polling
+ * or through a built-in notification mechanism.
  *
- * There are lists of pm_qos_objects each one wrapping requests, notifiers
- *
- * User mode requests on a QOS parameter register themselves to the
- * subsystem by opening the device node /dev/... and writing there request to
- * the node.  As long as the process holds a file handle open to the node the
- * client continues to be accounted for.  Upon file release the usermode
- * request is removed and a new qos target is computed.  This way when the
- * request that the application has is cleaned up when closes the file
- * pointer or exits the pm_qos_object will get an opportunity to clean up.
- *
- * Mark Gross <mgross@linux.intel.com>
+ * In addition to the basic functionality, more specific interfaces for managing
+ * global CPU latency QoS requests and frequency QoS requests are provided.
  */
 
 /*#define DEBUG*/
  * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
  * held, taken with _irqsave.  One lock to rule them all
  */
-struct pm_qos_object {
-       struct pm_qos_constraints *constraints;
-       struct miscdevice pm_qos_power_miscdev;
-       char *name;
-};
-
 static DEFINE_SPINLOCK(pm_qos_lock);
 
-static struct pm_qos_object null_pm_qos;
-
-static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
-static struct pm_qos_constraints cpu_dma_constraints = {
-       .list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
-       .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
-       .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
-       .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
-       .type = PM_QOS_MIN,
-       .notifiers = &cpu_dma_lat_notifier,
-};
-static struct pm_qos_object cpu_dma_pm_qos = {
-       .constraints = &cpu_dma_constraints,
-       .name = "cpu_dma_latency",
-};
-
-static struct pm_qos_object *pm_qos_array[] = {
-       &null_pm_qos,
-       &cpu_dma_pm_qos,
-};
-
-static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
-               size_t count, loff_t *f_pos);
-static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
-               size_t count, loff_t *f_pos);
-static int pm_qos_power_open(struct inode *inode, struct file *filp);
-static int pm_qos_power_release(struct inode *inode, struct file *filp);
-
-static const struct file_operations pm_qos_power_fops = {
-       .write = pm_qos_power_write,
-       .read = pm_qos_power_read,
-       .open = pm_qos_power_open,
-       .release = pm_qos_power_release,
-       .llseek = noop_llseek,
-};
-
-/* unlocked internal variant */
-static inline int pm_qos_get_value(struct pm_qos_constraints *c)
+/**
+ * pm_qos_read_value - Return the current effective constraint value.
+ * @c: List of PM QoS constraint requests.
+ */
+s32 pm_qos_read_value(struct pm_qos_constraints *c)
 {
-       struct plist_node *node;
-       int total_value = 0;
+       return READ_ONCE(c->target_value);
+}
 
+static int pm_qos_get_value(struct pm_qos_constraints *c)
+{
        if (plist_head_empty(&c->list))
                return c->no_constraint_value;
 
@@ -114,111 +67,42 @@ static inline int pm_qos_get_value(struct pm_qos_constraints *c)
        case PM_QOS_MAX:
                return plist_last(&c->list)->prio;
 
-       case PM_QOS_SUM:
-               plist_for_each(node, &c->list)
-                       total_value += node->prio;
-
-               return total_value;
-
        default:
-               /* runtime check for not using enum */
-               BUG();
+               WARN(1, "Unknown PM QoS type in %s\n", __func__);
                return PM_QOS_DEFAULT_VALUE;
        }
 }
 
-s32 pm_qos_read_value(struct pm_qos_constraints *c)
-{
-       return c->target_value;
-}
-
-static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
+static void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
 {
-       c->target_value = value;
+       WRITE_ONCE(c->target_value, value);
 }
 
-static int pm_qos_debug_show(struct seq_file *s, void *unused)
-{
-       struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
-       struct pm_qos_constraints *c;
-       struct pm_qos_request *req;
-       char *type;
-       unsigned long flags;
-       int tot_reqs = 0;
-       int active_reqs = 0;
-
-       if (IS_ERR_OR_NULL(qos)) {
-               pr_err("%s: bad qos param!\n", __func__);
-               return -EINVAL;
-       }
-       c = qos->constraints;
-       if (IS_ERR_OR_NULL(c)) {
-               pr_err("%s: Bad constraints on qos?\n", __func__);
-               return -EINVAL;
-       }
-
-       /* Lock to ensure we have a snapshot */
-       spin_lock_irqsave(&pm_qos_lock, flags);
-       if (plist_head_empty(&c->list)) {
-               seq_puts(s, "Empty!\n");
-               goto out;
-       }
-
-       switch (c->type) {
-       case PM_QOS_MIN:
-               type = "Minimum";
-               break;
-       case PM_QOS_MAX:
-               type = "Maximum";
-               break;
-       case PM_QOS_SUM:
-               type = "Sum";
-               break;
-       default:
-               type = "Unknown";
-       }
-
-       plist_for_each_entry(req, &c->list, node) {
-               char *state = "Default";
-
-               if ((req->node).prio != c->default_value) {
-                       active_reqs++;
-                       state = "Active";
-               }
-               tot_reqs++;
-               seq_printf(s, "%d: %d: %s\n", tot_reqs,
-                          (req->node).prio, state);
-       }
-
-       seq_printf(s, "Type=%s, Value=%d, Requests: active=%d / total=%d\n",
-                  type, pm_qos_get_value(c), active_reqs, tot_reqs);
-
-out:
-       spin_unlock_irqrestore(&pm_qos_lock, flags);
-       return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(pm_qos_debug);
-
 /**
- * pm_qos_update_target - manages the constraints list and calls the notifiers
- *  if needed
- * @c: constraints data struct
- * @node: request to add to the list, to update or to remove
- * @action: action to take on the constraints list
- * @value: value of the request to add or update
+ * pm_qos_update_target - Update a list of PM QoS constraint requests.
+ * @c: List of PM QoS requests.
+ * @node: Target list entry.
+ * @action: Action to carry out (add, update or remove).
+ * @value: New request value for the target list entry.
  *
- * This function returns 1 if the aggregated constraint value has changed, 0
- *  otherwise.
+ * Update the given list of PM QoS constraint requests, @c, by carrying an
+ * @action involving the @node list entry and @value on it.
+ *
+ * The recognized values of @action are PM_QOS_ADD_REQ (store @value in @node
+ * and add it to the list), PM_QOS_UPDATE_REQ (remove @node from the list, store
+ * @value in it and add it to the list again), and PM_QOS_REMOVE_REQ (remove
+ * @node from the list, ignore @value).
+ *
+ * Return: 1 if the aggregate constraint value has changed, 0  otherwise.
  */
 int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
                         enum pm_qos_req_action action, int value)
 {
-       unsigned long flags;
        int prev_value, curr_value, new_value;
-       int ret;
+       unsigned long flags;
 
        spin_lock_irqsave(&pm_qos_lock, flags);
+
        prev_value = pm_qos_get_value(c);
        if (value == PM_QOS_DEFAULT_VALUE)
                new_value = c->default_value;
@@ -231,9 +115,8 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
                break;
        case PM_QOS_UPDATE_REQ:
                /*
-                * to change the list, we atomically remove, reinit
-                * with new value and add, then see if the extremal
-                * changed
+                * To change the list, atomically remove, reinit with new value
+                * and add, then see if the aggregate has changed.
                 */
                plist_del(node, &c->list);
                /* fall through */
@@ -252,16 +135,14 @@ int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
        spin_unlock_irqrestore(&pm_qos_lock, flags);
 
        trace_pm_qos_update_target(action, prev_value, curr_value);
-       if (prev_value != curr_value) {
-               ret = 1;
-               if (c->notifiers)
-                       blocking_notifier_call_chain(c->notifiers,
-                                                    (unsigned long)curr_value,
-                                                    NULL);
-       } else {
-               ret = 0;
-       }
-       return ret;
+
+       if (prev_value == curr_value)
+               return 0;
+
+       if (c->notifiers)
+               blocking_notifier_call_chain(c->notifiers, curr_value, NULL);
+
+       return 1;
 }
 
 /**
@@ -283,14 +164,12 @@ static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
 
 /**
  * pm_qos_update_flags - Update a set of PM QoS flags.
- * @pqf: Set of flags to update.
+ * @pqf: Set of PM QoS flags to update.
  * @req: Request to add to the set, to modify, or to remove from the set.
  * @action: Action to take on the set.
  * @val: Value of the request to add or modify.
  *
- * Update the given set of PM QoS flags and call notifiers if the aggregate
- * value has changed.  Returns 1 if the aggregate constraint value has changed,
- * 0 otherwise.
+ * Return: 1 if the aggregate constraint value has changed, 0 otherwise.
  */
 bool pm_qos_update_flags(struct pm_qos_flags *pqf,
                         struct pm_qos_flags_request *req,
@@ -326,288 +205,180 @@ bool pm_qos_update_flags(struct pm_qos_flags *pqf,
        spin_unlock_irqrestore(&pm_qos_lock, irqflags);
 
        trace_pm_qos_update_flags(action, prev_value, curr_value);
-       return prev_value != curr_value;
-}
 
-/**
- * pm_qos_request - returns current system wide qos expectation
- * @pm_qos_class: identification of which qos value is requested
- *
- * This function returns the current target value.
- */
-int pm_qos_request(int pm_qos_class)
-{
-       return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
-}
-EXPORT_SYMBOL_GPL(pm_qos_request);
-
-int pm_qos_request_active(struct pm_qos_request *req)
-{
-       return req->pm_qos_class != 0;
+       return prev_value != curr_value;
 }
-EXPORT_SYMBOL_GPL(pm_qos_request_active);
 
-static void __pm_qos_update_request(struct pm_qos_request *req,
-                          s32 new_value)
-{
-       trace_pm_qos_update_request(req->pm_qos_class, new_value);
+#ifdef CONFIG_CPU_IDLE
+/* Definitions related to the CPU latency QoS. */
 
-       if (new_value != req->node.prio)
-               pm_qos_update_target(
-                       pm_qos_array[req->pm_qos_class]->constraints,
-                       &req->node, PM_QOS_UPDATE_REQ, new_value);
-}
+static struct pm_qos_constraints cpu_latency_constraints = {
+       .list = PLIST_HEAD_INIT(cpu_latency_constraints.list),
+       .target_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
+       .default_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
+       .no_constraint_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
+       .type = PM_QOS_MIN,
+};
 
 /**
- * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
- * @work: work struct for the delayed work (timeout)
- *
- * This cancels the timeout request by falling back to the default at timeout.
+ * cpu_latency_qos_limit - Return current system-wide CPU latency QoS limit.
  */
-static void pm_qos_work_fn(struct work_struct *work)
+s32 cpu_latency_qos_limit(void)
 {
-       struct pm_qos_request *req = container_of(to_delayed_work(work),
-                                                 struct pm_qos_request,
-                                                 work);
-
-       __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
+       return pm_qos_read_value(&cpu_latency_constraints);
 }
 
 /**
- * pm_qos_add_request - inserts new qos request into the list
- * @req: pointer to a preallocated handle
- * @pm_qos_class: identifies which list of qos request to use
- * @value: defines the qos request
+ * cpu_latency_qos_request_active - Check the given PM QoS request.
+ * @req: PM QoS request to check.
  *
- * This function inserts a new entry in the pm_qos_class list of requested qos
- * performance characteristics.  It recomputes the aggregate QoS expectations
- * for the pm_qos_class of parameters and initializes the pm_qos_request
- * handle.  Caller needs to save this handle for later use in updates and
- * removal.
+ * Return: 'true' if @req has been added to the CPU latency QoS list, 'false'
+ * otherwise.
  */
-
-void pm_qos_add_request(struct pm_qos_request *req,
-                       int pm_qos_class, s32 value)
+bool cpu_latency_qos_request_active(struct pm_qos_request *req)
 {
-       if (!req) /*guard against callers passing in null */
-               return;
+       return req->qos == &cpu_latency_constraints;
+}
+EXPORT_SYMBOL_GPL(cpu_latency_qos_request_active);
 
-       if (pm_qos_request_active(req)) {
-               WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
-               return;
-       }
-       req->pm_qos_class = pm_qos_class;
-       INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
-       trace_pm_qos_add_request(pm_qos_class, value);
-       pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
-                            &req->node, PM_QOS_ADD_REQ, value);
+static void cpu_latency_qos_apply(struct pm_qos_request *req,
+                                 enum pm_qos_req_action action, s32 value)
+{
+       int ret = pm_qos_update_target(req->qos, &req->node, action, value);
+       if (ret > 0)
+               wake_up_all_idle_cpus();
 }
-EXPORT_SYMBOL_GPL(pm_qos_add_request);
 
 /**
- * pm_qos_update_request - modifies an existing qos request
- * @req : handle to list element holding a pm_qos request to use
- * @value: defines the qos request
+ * cpu_latency_qos_add_request - Add new CPU latency QoS request.
+ * @req: Pointer to a preallocated handle.
+ * @value: Requested constraint value.
  *
- * Updates an existing qos request for the pm_qos_class of parameters along
- * with updating the target pm_qos_class value.
+ * Use @value to initialize the request handle pointed to by @req, insert it as
+ * a new entry to the CPU latency QoS list and recompute the effective QoS
+ * constraint for that list.
  *
- * Attempts are made to make this code callable on hot code paths.
+ * Callers need to save the handle for later use in updates and removal of the
+ * QoS request represented by it.
  */
-void pm_qos_update_request(struct pm_qos_request *req,
-                          s32 new_value)
+void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value)
 {
-       if (!req) /*guard against callers passing in null */
+       if (!req)
                return;
 
-       if (!pm_qos_request_active(req)) {
-               WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
+       if (cpu_latency_qos_request_active(req)) {
+               WARN(1, KERN_ERR "%s called for already added request\n", __func__);
                return;
        }
 
-       cancel_delayed_work_sync(&req->work);
-       __pm_qos_update_request(req, new_value);
+       trace_pm_qos_add_request(value);
+
+       req->qos = &cpu_latency_constraints;
+       cpu_latency_qos_apply(req, PM_QOS_ADD_REQ, value);
 }
-EXPORT_SYMBOL_GPL(pm_qos_update_request);
+EXPORT_SYMBOL_GPL(cpu_latency_qos_add_request);
 
 /**
- * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
- * @req : handle to list element holding a pm_qos request to use
- * @new_value: defines the temporal qos request
- * @timeout_us: the effective duration of this qos request in usecs.
+ * cpu_latency_qos_update_request - Modify existing CPU latency QoS request.
+ * @req : QoS request to update.
+ * @new_value: New requested constraint value.
  *
- * After timeout_us, this qos request is cancelled automatically.
+ * Use @new_value to update the QoS request represented by @req in the CPU
+ * latency QoS list along with updating the effective constraint value for that
+ * list.
  */
-void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
-                                  unsigned long timeout_us)
+void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value)
 {
        if (!req)
                return;
-       if (WARN(!pm_qos_request_active(req),
-                "%s called for unknown object.", __func__))
+
+       if (!cpu_latency_qos_request_active(req)) {
+               WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
                return;
+       }
 
-       cancel_delayed_work_sync(&req->work);
+       trace_pm_qos_update_request(new_value);
 
-       trace_pm_qos_update_request_timeout(req->pm_qos_class,
-                                           new_value, timeout_us);
-       if (new_value != req->node.prio)
-               pm_qos_update_target(
-                       pm_qos_array[req->pm_qos_class]->constraints,
-                       &req->node, PM_QOS_UPDATE_REQ, new_value);
+       if (new_value == req->node.prio)
+               return;
 
-       schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
+       cpu_latency_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
 }
+EXPORT_SYMBOL_GPL(cpu_latency_qos_update_request);
 
 /**
- * pm_qos_remove_request - modifies an existing qos request
- * @req: handle to request list element
+ * cpu_latency_qos_remove_request - Remove existing CPU latency QoS request.
+ * @req: QoS request to remove.
  *
- * Will remove pm qos request from the list of constraints and
- * recompute the current target value for the pm_qos_class.  Call this
- * on slow code paths.
+ * Remove the CPU latency QoS request represented by @req from the CPU latency
+ * QoS list along with updating the effective constraint value for that list.
  */
-void pm_qos_remove_request(struct pm_qos_request *req)
+void cpu_latency_qos_remove_request(struct pm_qos_request *req)
 {
-       if (!req) /*guard against callers passing in null */
+       if (!req)
                return;
-               /* silent return to keep pcm code cleaner */
 
-       if (!pm_qos_request_active(req)) {
-               WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
+       if (!cpu_latency_qos_request_active(req)) {
+               WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
                return;
        }
 
-       cancel_delayed_work_sync(&req->work);
+       trace_pm_qos_remove_request(PM_QOS_DEFAULT_VALUE);
 
-       trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
-       pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
-                            &req->node, PM_QOS_REMOVE_REQ,
-                            PM_QOS_DEFAULT_VALUE);
+       cpu_latency_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
        memset(req, 0, sizeof(*req));
 }
-EXPORT_SYMBOL_GPL(pm_qos_remove_request);
-
-/**
- * pm_qos_add_notifier - sets notification entry for changes to target value
- * @pm_qos_class: identifies which qos target changes should be notified.
- * @notifier: notifier block managed by caller.
- *
- * will register the notifier into a notification chain that gets called
- * upon changes to the pm_qos_class target value.
- */
-int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
-{
-       int retval;
-
-       retval = blocking_notifier_chain_register(
-                       pm_qos_array[pm_qos_class]->constraints->notifiers,
-                       notifier);
-
-       return retval;
-}
-EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
-
-/**
- * pm_qos_remove_notifier - deletes notification entry from chain.
- * @pm_qos_class: identifies which qos target changes are notified.
- * @notifier: notifier block to be removed.
- *
- * will remove the notifier from the notification chain that gets called
- * upon changes to the pm_qos_class target value.
- */
-int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
-{
-       int retval;
+EXPORT_SYMBOL_GPL(cpu_latency_qos_remove_request);
 
-       retval = blocking_notifier_chain_unregister(
-                       pm_qos_array[pm_qos_class]->constraints->notifiers,
-                       notifier);
+/* User space interface to the CPU latency QoS via misc device. */
 
-       return retval;
-}
-EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
-
-/* User space interface to PM QoS classes via misc devices */
-static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
+static int cpu_latency_qos_open(struct inode *inode, struct file *filp)
 {
-       qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
-       qos->pm_qos_power_miscdev.name = qos->name;
-       qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
-
-       debugfs_create_file(qos->name, S_IRUGO, d, (void *)qos,
-                           &pm_qos_debug_fops);
+       struct pm_qos_request *req;
 
-       return misc_register(&qos->pm_qos_power_miscdev);
-}
+       req = kzalloc(sizeof(*req), GFP_KERNEL);
+       if (!req)
+               return -ENOMEM;
 
-static int find_pm_qos_object_by_minor(int minor)
-{
-       int pm_qos_class;
+       cpu_latency_qos_add_request(req, PM_QOS_DEFAULT_VALUE);
+       filp->private_data = req;
 
-       for (pm_qos_class = PM_QOS_CPU_DMA_LATENCY;
-               pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
-               if (minor ==
-                       pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
-                       return pm_qos_class;
-       }
-       return -1;
+       return 0;
 }
 
-static int pm_qos_power_open(struct inode *inode, struct file *filp)
+static int cpu_latency_qos_release(struct inode *inode, struct file *filp)
 {
-       long pm_qos_class;
-
-       pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
-       if (pm_qos_class >= PM_QOS_CPU_DMA_LATENCY) {
-               struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
-               if (!req)
-                       return -ENOMEM;
-
-               pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
-               filp->private_data = req;
-
-               return 0;
-       }
-       return -EPERM;
-}
+       struct pm_qos_request *req = filp->private_data;
 
-static int pm_qos_power_release(struct inode *inode, struct file *filp)
-{
-       struct pm_qos_request *req;
+       filp->private_data = NULL;
 
-       req = filp->private_data;
-       pm_qos_remove_request(req);
+       cpu_latency_qos_remove_request(req);
        kfree(req);
 
        return 0;
 }
 
-
-static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
-               size_t count, loff_t *f_pos)
+static ssize_t cpu_latency_qos_read(struct file *filp, char __user *buf,
+                                   size_t count, loff_t *f_pos)
 {
-       s32 value;
-       unsigned long flags;
        struct pm_qos_request *req = filp->private_data;
+       unsigned long flags;
+       s32 value;
 
-       if (!req)
-               return -EINVAL;
-       if (!pm_qos_request_active(req))
+       if (!req || !cpu_latency_qos_request_active(req))
                return -EINVAL;
 
        spin_lock_irqsave(&pm_qos_lock, flags);
-       value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
+       value = pm_qos_get_value(&cpu_latency_constraints);
        spin_unlock_irqrestore(&pm_qos_lock, flags);
 
        return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
 }
 
-static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
-               size_t count, loff_t *f_pos)
+static ssize_t cpu_latency_qos_write(struct file *filp, const char __user *buf,
+                                    size_t count, loff_t *f_pos)
 {
        s32 value;
-       struct pm_qos_request *req;
 
        if (count == sizeof(s32)) {
                if (copy_from_user(&value, buf, sizeof(s32)))
@@ -620,36 +391,38 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
                        return ret;
        }
 
-       req = filp->private_data;
-       pm_qos_update_request(req, value);
+       cpu_latency_qos_update_request(filp->private_data, value);
 
        return count;
 }
 
+static const struct file_operations cpu_latency_qos_fops = {
+       .write = cpu_latency_qos_write,
+       .read = cpu_latency_qos_read,
+       .open = cpu_latency_qos_open,
+       .release = cpu_latency_qos_release,
+       .llseek = noop_llseek,
+};
 
-static int __init pm_qos_power_init(void)
-{
-       int ret = 0;
-       int i;
-       struct dentry *d;
-
-       BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
+static struct miscdevice cpu_latency_qos_miscdev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "cpu_dma_latency",
+       .fops = &cpu_latency_qos_fops,
+};
 
-       d = debugfs_create_dir("pm_qos", NULL);
+static int __init cpu_latency_qos_init(void)
+{
+       int ret;
 
-       for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
-               ret = register_pm_qos_misc(pm_qos_array[i], d);
-               if (ret < 0) {
-                       pr_err("%s: %s setup failed\n",
-                              __func__, pm_qos_array[i]->name);
-                       return ret;
-               }
-       }
+       ret = misc_register(&cpu_latency_qos_miscdev);
+       if (ret < 0)
+               pr_err("%s: %s setup failed\n", __func__,
+                      cpu_latency_qos_miscdev.name);
 
        return ret;
 }
-
-late_initcall(pm_qos_power_init);
+late_initcall(cpu_latency_qos_init);
+#endif /* CONFIG_CPU_IDLE */
 
 /* Definitions related to the frequency QoS below. */
 
index f9bc5c3..d325f3a 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/syscalls.h>
 #include <linux/kprobes.h>
 #include <linux/user_namespace.h>
+#include <linux/time_namespace.h>
 #include <linux/binfmts.h>
 
 #include <linux/sched.h>
@@ -2546,6 +2547,7 @@ static int do_sysinfo(struct sysinfo *info)
        memset(info, 0, sizeof(struct sysinfo));
 
        ktime_get_boottime_ts64(&tp);
+       timens_add_boottime(&tp);
        info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
 
        get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
index 3f7ee10..fd81c7d 100644 (file)
@@ -1547,6 +1547,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
                rec = bsearch(&key, pg->records, pg->index,
                              sizeof(struct dyn_ftrace),
                              ftrace_cmp_recs);
+               if (rec)
+                       break;
        }
        return rec;
 }
index 301db44..4e01c44 100644 (file)
@@ -1411,14 +1411,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
                return;
        rcu_read_lock();
 retry:
-       if (req_cpu == WORK_CPU_UNBOUND)
-               cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-
        /* pwq which will be used unless @work is executing elsewhere */
-       if (!(wq->flags & WQ_UNBOUND))
-               pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
-       else
+       if (wq->flags & WQ_UNBOUND) {
+               if (req_cpu == WORK_CPU_UNBOUND)
+                       cpu = wq_select_unbound_cpu(raw_smp_processor_id());
                pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
+       } else {
+               if (req_cpu == WORK_CPU_UNBOUND)
+                       cpu = raw_smp_processor_id();
+               pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
+       }
 
        /*
         * If @work was previously on a different pool, it might still be
index 43b47d3..4bb30ed 100644 (file)
@@ -335,12 +335,14 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
                }
 
                page = pmd_page(orig_pmd);
+
+               /* Do not interfere with other mappings of this page */
+               if (page_mapcount(page) != 1)
+                       goto huge_unlock;
+
                if (next - addr != HPAGE_PMD_SIZE) {
                        int err;
 
-                       if (page_mapcount(page) != 1)
-                               goto huge_unlock;
-
                        get_page(page);
                        spin_unlock(ptl);
                        lock_page(page);
@@ -426,6 +428,10 @@ regular_page:
                        continue;
                }
 
+               /* Do not interfere with other mappings of this page */
+               if (page_mapcount(page) != 1)
+                       continue;
+
                VM_BUG_ON_PAGE(PageTransCompound(page), page);
 
                if (pte_young(ptent)) {
index d09776c..7a4bd8b 100644 (file)
@@ -2297,28 +2297,41 @@ static void high_work_func(struct work_struct *work)
  #define MEMCG_DELAY_SCALING_SHIFT 14
 
 /*
- * Scheduled by try_charge() to be executed from the userland return path
- * and reclaims memory over the high limit.
+ * Get the number of jiffies that we should penalise a mischievous cgroup which
+ * is exceeding its memory.high by checking both it and its ancestors.
  */
-void mem_cgroup_handle_over_high(void)
+static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
+                                         unsigned int nr_pages)
 {
-       unsigned long usage, high, clamped_high;
-       unsigned long pflags;
-       unsigned long penalty_jiffies, overage;
-       unsigned int nr_pages = current->memcg_nr_pages_over_high;
-       struct mem_cgroup *memcg;
+       unsigned long penalty_jiffies;
+       u64 max_overage = 0;
 
-       if (likely(!nr_pages))
-               return;
+       do {
+               unsigned long usage, high;
+               u64 overage;
 
-       memcg = get_mem_cgroup_from_mm(current->mm);
-       reclaim_high(memcg, nr_pages, GFP_KERNEL);
-       current->memcg_nr_pages_over_high = 0;
+               usage = page_counter_read(&memcg->memory);
+               high = READ_ONCE(memcg->high);
+
+               /*
+                * Prevent division by 0 in overage calculation by acting as if
+                * it was a threshold of 1 page
+                */
+               high = max(high, 1UL);
+
+               overage = usage - high;
+               overage <<= MEMCG_DELAY_PRECISION_SHIFT;
+               overage = div64_u64(overage, high);
+
+               if (overage > max_overage)
+                       max_overage = overage;
+       } while ((memcg = parent_mem_cgroup(memcg)) &&
+                !mem_cgroup_is_root(memcg));
+
+       if (!max_overage)
+               return 0;
 
        /*
-        * memory.high is breached and reclaim is unable to keep up. Throttle
-        * allocators proactively to slow down excessive growth.
-        *
         * We use overage compared to memory.high to calculate the number of
         * jiffies to sleep (penalty_jiffies). Ideally this value should be
         * fairly lenient on small overages, and increasingly harsh when the
@@ -2326,24 +2339,9 @@ void mem_cgroup_handle_over_high(void)
         * its crazy behaviour, so we exponentially increase the delay based on
         * overage amount.
         */
-
-       usage = page_counter_read(&memcg->memory);
-       high = READ_ONCE(memcg->high);
-
-       if (usage <= high)
-               goto out;
-
-       /*
-        * Prevent division by 0 in overage calculation by acting as if it was a
-        * threshold of 1 page
-        */
-       clamped_high = max(high, 1UL);
-
-       overage = div_u64((u64)(usage - high) << MEMCG_DELAY_PRECISION_SHIFT,
-                         clamped_high);
-
-       penalty_jiffies = ((u64)overage * overage * HZ)
-               >> (MEMCG_DELAY_PRECISION_SHIFT + MEMCG_DELAY_SCALING_SHIFT);
+       penalty_jiffies = max_overage * max_overage * HZ;
+       penalty_jiffies >>= MEMCG_DELAY_PRECISION_SHIFT;
+       penalty_jiffies >>= MEMCG_DELAY_SCALING_SHIFT;
 
        /*
         * Factor in the task's own contribution to the overage, such that four
@@ -2360,7 +2358,32 @@ void mem_cgroup_handle_over_high(void)
         * application moving forwards and also permit diagnostics, albeit
         * extremely slowly.
         */
-       penalty_jiffies = min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
+       return min(penalty_jiffies, MEMCG_MAX_HIGH_DELAY_JIFFIES);
+}
+
+/*
+ * Scheduled by try_charge() to be executed from the userland return path
+ * and reclaims memory over the high limit.
+ */
+void mem_cgroup_handle_over_high(void)
+{
+       unsigned long penalty_jiffies;
+       unsigned long pflags;
+       unsigned int nr_pages = current->memcg_nr_pages_over_high;
+       struct mem_cgroup *memcg;
+
+       if (likely(!nr_pages))
+               return;
+
+       memcg = get_mem_cgroup_from_mm(current->mm);
+       reclaim_high(memcg, nr_pages, GFP_KERNEL);
+       current->memcg_nr_pages_over_high = 0;
+
+       /*
+        * memory.high is breached and reclaim is unable to keep up. Throttle
+        * allocators proactively to slow down excessive growth.
+        */
+       penalty_jiffies = calculate_high_delay(memcg, nr_pages);
 
        /*
         * Don't sleep if the amount of jiffies this memcg owes us is so low
@@ -4027,7 +4050,7 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
        struct mem_cgroup_thresholds *thresholds;
        struct mem_cgroup_threshold_ary *new;
        unsigned long usage;
-       int i, j, size;
+       int i, j, size, entries;
 
        mutex_lock(&memcg->thresholds_lock);
 
@@ -4047,14 +4070,20 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg,
        __mem_cgroup_threshold(memcg, type == _MEMSWAP);
 
        /* Calculate new number of threshold */
-       size = 0;
+       size = entries = 0;
        for (i = 0; i < thresholds->primary->size; i++) {
                if (thresholds->primary->entries[i].eventfd != eventfd)
                        size++;
+               else
+                       entries++;
        }
 
        new = thresholds->spare;
 
+       /* If no items related to eventfd have been cleared, nothing to do */
+       if (!entries)
+               goto unlock;
+
        /* Set thresholds array to NULL if we don't have thresholds */
        if (!size) {
                kfree(new);
@@ -6682,19 +6711,9 @@ void mem_cgroup_sk_alloc(struct sock *sk)
        if (!mem_cgroup_sockets_enabled)
                return;
 
-       /*
-        * Socket cloning can throw us here with sk_memcg already
-        * filled. It won't however, necessarily happen from
-        * process context. So the test for root memcg given
-        * the current task's memcg won't help us in this case.
-        *
-        * Respecting the original socket's memcg is a better
-        * decision in this case.
-        */
-       if (sk->sk_memcg) {
-               css_get(&sk->sk_memcg->css);
+       /* Do not associate the sock with unrelated interrupted task's memcg. */
+       if (in_interrupt())
                return;
-       }
 
        rcu_read_lock();
        memcg = mem_cgroup_from_task(current);
index ef3973a..06852b8 100644 (file)
@@ -307,7 +307,8 @@ static void mn_hlist_release(struct mmu_notifier_subscriptions *subscriptions,
         * ->release returns.
         */
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist)
+       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu))
                /*
                 * If ->release runs before mmu_notifier_unregister it must be
                 * handled, as it's the only way for the driver to flush all
@@ -370,7 +371,8 @@ int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->clear_flush_young)
                        young |= subscription->ops->clear_flush_young(
                                subscription, mm, start, end);
@@ -389,7 +391,8 @@ int __mmu_notifier_clear_young(struct mm_struct *mm,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->clear_young)
                        young |= subscription->ops->clear_young(subscription,
                                                                mm, start, end);
@@ -407,7 +410,8 @@ int __mmu_notifier_test_young(struct mm_struct *mm,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->test_young) {
                        young = subscription->ops->test_young(subscription, mm,
                                                              address);
@@ -428,7 +432,8 @@ void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->change_pte)
                        subscription->ops->change_pte(subscription, mm, address,
                                                      pte);
@@ -476,7 +481,8 @@ static int mn_hlist_invalidate_range_start(
        int id;
 
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist) {
+       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                const struct mmu_notifier_ops *ops = subscription->ops;
 
                if (ops->invalidate_range_start) {
@@ -528,7 +534,8 @@ mn_hlist_invalidate_end(struct mmu_notifier_subscriptions *subscriptions,
        int id;
 
        id = srcu_read_lock(&srcu);
-       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist) {
+       hlist_for_each_entry_rcu(subscription, &subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                /*
                 * Call invalidate_range here too to avoid the need for the
                 * subsystem of having to register an invalidate_range_end
@@ -582,7 +589,8 @@ void __mmu_notifier_invalidate_range(struct mm_struct *mm,
 
        id = srcu_read_lock(&srcu);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                srcu_read_lock_held(&srcu)) {
                if (subscription->ops->invalidate_range)
                        subscription->ops->invalidate_range(subscription, mm,
                                                            start, end);
@@ -714,7 +722,8 @@ find_get_mmu_notifier(struct mm_struct *mm, const struct mmu_notifier_ops *ops)
 
        spin_lock(&mm->notifier_subscriptions->lock);
        hlist_for_each_entry_rcu(subscription,
-                                &mm->notifier_subscriptions->list, hlist) {
+                                &mm->notifier_subscriptions->list, hlist,
+                                lockdep_is_held(&mm->notifier_subscriptions->lock)) {
                if (subscription->ops != ops)
                        continue;
 
index bd2b4e5..318df4e 100644 (file)
@@ -370,10 +370,14 @@ void vm_unmap_aliases(void)
 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
 
 /*
- * Implement a stub for vmalloc_sync_all() if the architecture chose not to
- * have one.
+ * Implement a stub for vmalloc_sync_[un]mapping() if the architecture
+ * chose not to have one.
  */
-void __weak vmalloc_sync_all(void)
+void __weak vmalloc_sync_mappings(void)
+{
+}
+
+void __weak vmalloc_sync_unmappings(void)
 {
 }
 
index 17dc00e..6589b41 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1973,8 +1973,6 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
 
        if (node == NUMA_NO_NODE)
                searchnode = numa_mem_id();
-       else if (!node_present_pages(node))
-               searchnode = node_to_mem_node(node);
 
        object = get_partial_node(s, get_node(s, searchnode), c, flags);
        if (object || node != NUMA_NO_NODE)
@@ -2563,17 +2561,27 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
        struct page *page;
 
        page = c->page;
-       if (!page)
+       if (!page) {
+               /*
+                * if the node is not online or has no normal memory, just
+                * ignore the node constraint
+                */
+               if (unlikely(node != NUMA_NO_NODE &&
+                            !node_state(node, N_NORMAL_MEMORY)))
+                       node = NUMA_NO_NODE;
                goto new_slab;
+       }
 redo:
 
        if (unlikely(!node_match(page, node))) {
-               int searchnode = node;
-
-               if (node != NUMA_NO_NODE && !node_present_pages(node))
-                       searchnode = node_to_mem_node(node);
-
-               if (unlikely(!node_match(page, searchnode))) {
+               /*
+                * same as above but node_match() being false already
+                * implies node != NUMA_NO_NODE
+                */
+               if (!node_state(node, N_NORMAL_MEMORY)) {
+                       node = NUMA_NO_NODE;
+                       goto redo;
+               } else {
                        stat(s, ALLOC_NODE_MISMATCH);
                        deactivate_slab(s, page, c->freelist, c);
                        goto new_slab;
@@ -2997,11 +3005,13 @@ redo:
        barrier();
 
        if (likely(page == c->page)) {
-               set_freepointer(s, tail_obj, c->freelist);
+               void **freelist = READ_ONCE(c->freelist);
+
+               set_freepointer(s, tail_obj, freelist);
 
                if (unlikely(!this_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
-                               c->freelist, tid,
+                               freelist, tid,
                                head, next_tid(tid)))) {
 
                        note_cmpxchg_failure("slab_free", s, tid);
@@ -3175,6 +3185,15 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 
                if (unlikely(!object)) {
                        /*
+                        * We may have removed an object from c->freelist using
+                        * the fastpath in the previous iteration; in that case,
+                        * c->tid has not been bumped yet.
+                        * Since ___slab_alloc() may reenable interrupts while
+                        * allocating memory, we should bump c->tid now.
+                        */
+                       c->tid = next_tid(c->tid);
+
+                       /*
                         * Invoking slow path likely have side-effect
                         * of re-populating per CPU c->freelist
                         */
index 596b2a4..aadb729 100644 (file)
@@ -734,6 +734,7 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
        struct mem_section *ms = __pfn_to_section(pfn);
        bool section_is_early = early_section(ms);
        struct page *memmap = NULL;
+       bool empty;
        unsigned long *subsection_map = ms->usage
                ? &ms->usage->subsection_map[0] : NULL;
 
@@ -764,7 +765,8 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
         * For 2/ and 3/ the SPARSEMEM_VMEMMAP={y,n} cases are unified
         */
        bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
-       if (bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION)) {
+       empty = bitmap_empty(subsection_map, SUBSECTIONS_PER_SECTION);
+       if (empty) {
                unsigned long section_nr = pfn_to_section_nr(pfn);
 
                /*
@@ -779,13 +781,15 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
                        ms->usage = NULL;
                }
                memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
-               ms->section_mem_map = (unsigned long)NULL;
        }
 
        if (section_is_early && memmap)
                free_map_bootmem(memmap);
        else
                depopulate_section_memmap(pfn, nr_pages, altmap);
+
+       if (empty)
+               ms->section_mem_map = (unsigned long)NULL;
 }
 
 static struct page * __meminit section_activate(int nid, unsigned long pfn,
index 1f46c3b..6b8eeb0 100644 (file)
@@ -1295,7 +1295,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
         * First make sure the mappings are removed from all page-tables
         * before they are freed.
         */
-       vmalloc_sync_all();
+       vmalloc_sync_unmappings();
 
        /*
         * TODO: to calculate a flush range without looping.
@@ -3128,16 +3128,19 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
 EXPORT_SYMBOL(remap_vmalloc_range);
 
 /*
- * Implement a stub for vmalloc_sync_all() if the architecture chose not to
- * have one.
+ * Implement stubs for vmalloc_sync_[un]mappings () if the architecture chose
+ * not to have one.
  *
  * The purpose of this function is to make sure the vmalloc area
  * mappings are identical in all page-tables in the system.
  */
-void __weak vmalloc_sync_all(void)
+void __weak vmalloc_sync_mappings(void)
 {
 }
 
+void __weak vmalloc_sync_unmappings(void)
+{
+}
 
 static int f(pte_t *pte, unsigned long addr, void *data)
 {
index f020950..a7c8dd7 100644 (file)
@@ -789,6 +789,10 @@ static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
 
        lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
 
+       /* interface already disabled by batadv_iv_ogm_iface_disable */
+       if (!*ogm_buff)
+               return;
+
        /* the interface gets activated here to avoid race conditions between
         * the moment of activating the interface in
         * hardif_activate_interface() where the originator mac is set and
index 03c7cdd..195d2d6 100644 (file)
@@ -112,7 +112,8 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
            caif_device_list(dev_net(dev));
        struct caif_device_entry *caifd;
 
-       list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
+       list_for_each_entry_rcu(caifd, &caifdevs->list, list,
+                               lockdep_rtnl_is_held()) {
                if (caifd->netdev == dev)
                        return caifd;
        }
index 5e22080..b831c55 100644 (file)
@@ -3352,34 +3352,41 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
                                  struct genl_info *info,
                                  union devlink_param_value *value)
 {
+       struct nlattr *param_data;
        int len;
 
-       if (param->type != DEVLINK_PARAM_TYPE_BOOL &&
-           !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])
+       param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA];
+
+       if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data)
                return -EINVAL;
 
        switch (param->type) {
        case DEVLINK_PARAM_TYPE_U8:
-               value->vu8 = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               if (nla_len(param_data) != sizeof(u8))
+                       return -EINVAL;
+               value->vu8 = nla_get_u8(param_data);
                break;
        case DEVLINK_PARAM_TYPE_U16:
-               value->vu16 = nla_get_u16(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               if (nla_len(param_data) != sizeof(u16))
+                       return -EINVAL;
+               value->vu16 = nla_get_u16(param_data);
                break;
        case DEVLINK_PARAM_TYPE_U32:
-               value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
+               if (nla_len(param_data) != sizeof(u32))
+                       return -EINVAL;
+               value->vu32 = nla_get_u32(param_data);
                break;
        case DEVLINK_PARAM_TYPE_STRING:
-               len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]),
-                             nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
-               if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) ||
+               len = strnlen(nla_data(param_data), nla_len(param_data));
+               if (len == nla_len(param_data) ||
                    len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
                        return -EINVAL;
-               strcpy(value->vstr,
-                      nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
+               strcpy(value->vstr, nla_data(param_data));
                break;
        case DEVLINK_PARAM_TYPE_BOOL:
-               value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ?
-                              true : false;
+               if (param_data && nla_len(param_data))
+                       return -EINVAL;
+               value->vbool = nla_get_flag(param_data);
                break;
        }
        return 0;
@@ -5951,6 +5958,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
        [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
        [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
        [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
+       [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 },
+       [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 },
        [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
        [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
        [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
index 0642f91..b4c87fe 100644 (file)
@@ -53,30 +53,60 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
        kfree(css_cls_state(css));
 }
 
+/*
+ * To avoid freezing of sockets creation for tasks with big number of threads
+ * and opened sockets lets release file_lock every 1000 iterated descriptors.
+ * New sockets will already have been created with new classid.
+ */
+
+struct update_classid_context {
+       u32 classid;
+       unsigned int batch;
+};
+
+#define UPDATE_CLASSID_BATCH 1000
+
 static int update_classid_sock(const void *v, struct file *file, unsigned n)
 {
        int err;
+       struct update_classid_context *ctx = (void *)v;
        struct socket *sock = sock_from_file(file, &err);
 
        if (sock) {
                spin_lock(&cgroup_sk_update_lock);
-               sock_cgroup_set_classid(&sock->sk->sk_cgrp_data,
-                                       (unsigned long)v);
+               sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
                spin_unlock(&cgroup_sk_update_lock);
        }
+       if (--ctx->batch == 0) {
+               ctx->batch = UPDATE_CLASSID_BATCH;
+               return n + 1;
+       }
        return 0;
 }
 
+static void update_classid_task(struct task_struct *p, u32 classid)
+{
+       struct update_classid_context ctx = {
+               .classid = classid,
+               .batch = UPDATE_CLASSID_BATCH
+       };
+       unsigned int fd = 0;
+
+       do {
+               task_lock(p);
+               fd = iterate_fd(p->files, fd, update_classid_sock, &ctx);
+               task_unlock(p);
+               cond_resched();
+       } while (fd);
+}
+
 static void cgrp_attach(struct cgroup_taskset *tset)
 {
        struct cgroup_subsys_state *css;
        struct task_struct *p;
 
        cgroup_taskset_for_each(p, css, tset) {
-               task_lock(p);
-               iterate_fd(p->files, 0, update_classid_sock,
-                          (void *)(unsigned long)css_cls_state(css)->classid);
-               task_unlock(p);
+               update_classid_task(p, css_cls_state(css)->classid);
        }
 }
 
@@ -98,10 +128,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
 
        css_task_iter_start(css, 0, &it);
        while ((p = css_task_iter_next(&it))) {
-               task_lock(p);
-               iterate_fd(p->files, 0, update_classid_sock,
-                          (void *)(unsigned long)cs->classid);
-               task_unlock(p);
+               update_classid_task(p, cs->classid);
                cond_resched();
        }
        css_task_iter_end(&it);
index a4c8fac..8f71684 100644 (file)
@@ -1830,7 +1830,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
                atomic_set(&newsk->sk_zckey, 0);
 
                sock_reset_flag(newsk, SOCK_DONE);
-               mem_cgroup_sk_alloc(newsk);
+
+               /* sk->sk_memcg will be populated at accept() time */
+               newsk->sk_memcg = NULL;
+
                cgroup_sk_alloc(&newsk->sk_cgrp_data);
 
                rcu_read_lock();
index a7662e7..760e6ea 100644 (file)
@@ -117,7 +117,9 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
 /* port.c */
 int dsa_port_set_state(struct dsa_port *dp, u8 state,
                       struct switchdev_trans *trans);
+int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
+void dsa_port_disable_rt(struct dsa_port *dp);
 void dsa_port_disable(struct dsa_port *dp);
 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
index 774facb..ec13dc6 100644 (file)
@@ -63,7 +63,7 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
                pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
 }
 
-int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
+int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
 {
        struct dsa_switch *ds = dp->ds;
        int port = dp->index;
@@ -78,14 +78,31 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
        if (!dp->bridge_dev)
                dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
 
+       if (dp->pl)
+               phylink_start(dp->pl);
+
        return 0;
 }
 
-void dsa_port_disable(struct dsa_port *dp)
+int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
+{
+       int err;
+
+       rtnl_lock();
+       err = dsa_port_enable_rt(dp, phy);
+       rtnl_unlock();
+
+       return err;
+}
+
+void dsa_port_disable_rt(struct dsa_port *dp)
 {
        struct dsa_switch *ds = dp->ds;
        int port = dp->index;
 
+       if (dp->pl)
+               phylink_stop(dp->pl);
+
        if (!dp->bridge_dev)
                dsa_port_set_state_now(dp, BR_STATE_DISABLED);
 
@@ -93,6 +110,13 @@ void dsa_port_disable(struct dsa_port *dp)
                ds->ops->port_disable(ds, port);
 }
 
+void dsa_port_disable(struct dsa_port *dp)
+{
+       rtnl_lock();
+       dsa_port_disable_rt(dp);
+       rtnl_unlock();
+}
+
 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
 {
        struct dsa_notifier_bridge_info info = {
@@ -614,10 +638,6 @@ static int dsa_port_phylink_register(struct dsa_port *dp)
                goto err_phy_connect;
        }
 
-       rtnl_lock();
-       phylink_start(dp->pl);
-       rtnl_unlock();
-
        return 0;
 
 err_phy_connect:
@@ -628,9 +648,14 @@ err_phy_connect:
 int dsa_port_link_register_of(struct dsa_port *dp)
 {
        struct dsa_switch *ds = dp->ds;
+       struct device_node *phy_np;
 
-       if (!ds->ops->adjust_link)
-               return dsa_port_phylink_register(dp);
+       if (!ds->ops->adjust_link) {
+               phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
+               if (of_phy_is_fixed_link(dp->dn) || phy_np)
+                       return dsa_port_phylink_register(dp);
+               return 0;
+       }
 
        dev_warn(ds->dev,
                 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
@@ -645,11 +670,12 @@ void dsa_port_link_unregister_of(struct dsa_port *dp)
 {
        struct dsa_switch *ds = dp->ds;
 
-       if (!ds->ops->adjust_link) {
+       if (!ds->ops->adjust_link && dp->pl) {
                rtnl_lock();
                phylink_disconnect_phy(dp->pl);
                rtnl_unlock();
                phylink_destroy(dp->pl);
+               dp->pl = NULL;
                return;
        }
 
index 088c886..ddc0f92 100644 (file)
@@ -88,12 +88,10 @@ static int dsa_slave_open(struct net_device *dev)
                        goto clear_allmulti;
        }
 
-       err = dsa_port_enable(dp, dev->phydev);
+       err = dsa_port_enable_rt(dp, dev->phydev);
        if (err)
                goto clear_promisc;
 
-       phylink_start(dp->pl);
-
        return 0;
 
 clear_promisc:
@@ -114,9 +112,7 @@ static int dsa_slave_close(struct net_device *dev)
        struct net_device *master = dsa_slave_to_master(dev);
        struct dsa_port *dp = dsa_slave_to_port(dev);
 
-       phylink_stop(dp->pl);
-
-       dsa_port_disable(dp);
+       dsa_port_disable_rt(dp);
 
        dev_mc_unsync(master, dev);
        dev_uc_unsync(master, dev);
index 2c7a38d..0672b2f 100644 (file)
@@ -21,7 +21,13 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
        [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
        [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
        [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_BCN_ORD] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_SF_ORD] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_PAN_COORD] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_BAT_EXT] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_COORD_REALIGN] = { .type = NLA_U8, },
        [IEEE802154_ATTR_PAGE] = { .type = NLA_U8, },
+       [IEEE802154_ATTR_DEV_TYPE] = { .type = NLA_U8, },
        [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
        [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
        [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },
index 5fd6e8e..66fdbfe 100644 (file)
@@ -56,7 +56,9 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
 }
 EXPORT_SYMBOL_GPL(gre_del_protocol);
 
-/* Fills in tpi and returns header length to be pulled. */
+/* Fills in tpi and returns header length to be pulled.
+ * Note that caller must use pskb_may_pull() before pulling GRE header.
+ */
 int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                     bool *csum_err, __be16 proto, int nhs)
 {
@@ -110,8 +112,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
         * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
         */
        if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
+               u8 _val, *val;
+
+               val = skb_header_pointer(skb, nhs + hdr_len,
+                                        sizeof(_val), &_val);
+               if (!val)
+                       return -EINVAL;
                tpi->proto = proto;
-               if ((*(u8 *)options & 0xF0) != 0x40)
+               if ((*val & 0xF0) != 0x40)
                        hdr_len += 4;
        }
        tpi->hdr_len = hdr_len;
index a4db79b..d545fb9 100644 (file)
@@ -482,8 +482,28 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
                }
                spin_unlock_bh(&queue->fastopenq.lock);
        }
+
 out:
        release_sock(sk);
+       if (newsk && mem_cgroup_sockets_enabled) {
+               int amt;
+
+               /* atomically get the memory usage, set and charge the
+                * newsk->sk_memcg.
+                */
+               lock_sock(newsk);
+
+               /* The socket has not been accepted yet, no need to look at
+                * newsk->sk_wmem_queued.
+                */
+               amt = sk_mem_pages(newsk->sk_forward_alloc +
+                                  atomic_read(&newsk->sk_rmem_alloc));
+               mem_cgroup_sk_alloc(newsk);
+               if (newsk->sk_memcg && amt)
+                       mem_cgroup_charge_skmem(newsk->sk_memcg, amt);
+
+               release_sock(newsk);
+       }
        if (req)
                reqsk_put(req);
        return newsk;
index f11e997..8c83775 100644 (file)
@@ -100,13 +100,9 @@ static size_t inet_sk_attr_size(struct sock *sk,
                aux = handler->idiag_get_aux_size(sk, net_admin);
 
        return    nla_total_size(sizeof(struct tcp_info))
-               + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
-               + nla_total_size(1) /* INET_DIAG_TOS */
-               + nla_total_size(1) /* INET_DIAG_TCLASS */
-               + nla_total_size(4) /* INET_DIAG_MARK */
-               + nla_total_size(4) /* INET_DIAG_CLASS_ID */
-               + nla_total_size(sizeof(struct inet_diag_meminfo))
                + nla_total_size(sizeof(struct inet_diag_msg))
+               + inet_diag_msg_attrs_size()
+               + nla_total_size(sizeof(struct inet_diag_meminfo))
                + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
                + nla_total_size(TCP_CA_NAME_MAX)
                + nla_total_size(sizeof(struct tcpvegas_info))
@@ -147,6 +143,24 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
        if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
                goto errout;
 
+       if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
+           ext & (1 << (INET_DIAG_TCLASS - 1))) {
+               u32 classid = 0;
+
+#ifdef CONFIG_SOCK_CGROUP_DATA
+               classid = sock_cgroup_classid(&sk->sk_cgrp_data);
+#endif
+               /* Fallback to socket priority if class id isn't set.
+                * Classful qdiscs use it as direct reference to class.
+                * For cgroup2 classid is always zero.
+                */
+               if (!classid)
+                       classid = sk->sk_priority;
+
+               if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
+                       goto errout;
+       }
+
        r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
        r->idiag_inode = sock_i_ino(sk);
 
@@ -284,24 +298,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
                        goto errout;
        }
 
-       if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
-           ext & (1 << (INET_DIAG_TCLASS - 1))) {
-               u32 classid = 0;
-
-#ifdef CONFIG_SOCK_CGROUP_DATA
-               classid = sock_cgroup_classid(&sk->sk_cgrp_data);
-#endif
-               /* Fallback to socket priority if class id isn't set.
-                * Classful qdiscs use it as direct reference to class.
-                * For cgroup2 classid is always zero.
-                */
-               if (!classid)
-                       classid = sk->sk_priority;
-
-               if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
-                       goto errout;
-       }
-
 out:
        nlmsg_end(skb, nlh);
        return 0;
index e35736b..a93e7d1 100644 (file)
@@ -100,8 +100,9 @@ static int raw_diag_dump_one(struct sk_buff *in_skb,
        if (IS_ERR(sk))
                return PTR_ERR(sk);
 
-       rep = nlmsg_new(sizeof(struct inet_diag_msg) +
-                       sizeof(struct inet_diag_meminfo) + 64,
+       rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
+                       inet_diag_msg_attrs_size() +
+                       nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
                        GFP_KERNEL);
        if (!rep) {
                sock_put(sk);
index 910555a..dccd228 100644 (file)
@@ -64,8 +64,9 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
                goto out;
 
        err = -ENOMEM;
-       rep = nlmsg_new(sizeof(struct inet_diag_msg) +
-                       sizeof(struct inet_diag_meminfo) + 64,
+       rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
+                       inet_diag_msg_attrs_size() +
+                       nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
                        GFP_KERNEL);
        if (!rep)
                goto out;
index cb493e1..46d614b 100644 (file)
@@ -1226,11 +1226,13 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
 }
 
 static void
-cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
+cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
+                    bool del_rt, bool del_peer)
 {
        struct fib6_info *f6i;
 
-       f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
+       f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
+                                       ifp->prefix_len,
                                        ifp->idev->dev, 0, RTF_DEFAULT, true);
        if (f6i) {
                if (del_rt)
@@ -1293,7 +1295,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
 
        if (action != CLEANUP_PREFIX_RT_NOP) {
                cleanup_prefix_route(ifp, expires,
-                       action == CLEANUP_PREFIX_RT_DEL);
+                       action == CLEANUP_PREFIX_RT_DEL, false);
        }
 
        /* clean up prefsrc entries */
@@ -3345,6 +3347,10 @@ static void addrconf_dev_config(struct net_device *dev)
            (dev->type != ARPHRD_NONE) &&
            (dev->type != ARPHRD_RAWIP)) {
                /* Alas, we support only Ethernet autoconfiguration. */
+               idev = __in6_dev_get(dev);
+               if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
+                   dev->flags & IFF_MULTICAST)
+                       ipv6_mc_up(idev);
                return;
        }
 
@@ -4586,12 +4592,14 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
 }
 
 static int modify_prefix_route(struct inet6_ifaddr *ifp,
-                              unsigned long expires, u32 flags)
+                              unsigned long expires, u32 flags,
+                              bool modify_peer)
 {
        struct fib6_info *f6i;
        u32 prio;
 
-       f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
+       f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
+                                       ifp->prefix_len,
                                        ifp->idev->dev, 0, RTF_DEFAULT, true);
        if (!f6i)
                return -ENOENT;
@@ -4602,7 +4610,8 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
                ip6_del_rt(dev_net(ifp->idev->dev), f6i);
 
                /* add new one */
-               addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
+               addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
+                                     ifp->prefix_len,
                                      ifp->rt_priority, ifp->idev->dev,
                                      expires, flags, GFP_KERNEL);
        } else {
@@ -4624,6 +4633,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
        unsigned long timeout;
        bool was_managetempaddr;
        bool had_prefixroute;
+       bool new_peer = false;
 
        ASSERT_RTNL();
 
@@ -4655,6 +4665,13 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
                cfg->preferred_lft = timeout;
        }
 
+       if (cfg->peer_pfx &&
+           memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
+               if (!ipv6_addr_any(&ifp->peer_addr))
+                       cleanup_prefix_route(ifp, expires, true, true);
+               new_peer = true;
+       }
+
        spin_lock_bh(&ifp->lock);
        was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
        had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
@@ -4670,6 +4687,9 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
        if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
                ifp->rt_priority = cfg->rt_priority;
 
+       if (new_peer)
+               ifp->peer_addr = *cfg->peer_pfx;
+
        spin_unlock_bh(&ifp->lock);
        if (!(ifp->flags&IFA_F_TENTATIVE))
                ipv6_ifa_notify(0, ifp);
@@ -4678,7 +4698,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
                int rc = -ENOENT;
 
                if (had_prefixroute)
-                       rc = modify_prefix_route(ifp, expires, flags);
+                       rc = modify_prefix_route(ifp, expires, flags, false);
 
                /* prefix route could have been deleted; if so restore it */
                if (rc == -ENOENT) {
@@ -4686,6 +4706,15 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
                                              ifp->rt_priority, ifp->idev->dev,
                                              expires, flags, GFP_KERNEL);
                }
+
+               if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
+                       rc = modify_prefix_route(ifp, expires, flags, true);
+
+               if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
+                       addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
+                                             ifp->rt_priority, ifp->idev->dev,
+                                             expires, flags, GFP_KERNEL);
+               }
        } else if (had_prefixroute) {
                enum cleanup_prefix_rt_t action;
                unsigned long rt_expires;
@@ -4696,7 +4725,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
 
                if (action != CLEANUP_PREFIX_RT_NOP) {
                        cleanup_prefix_route(ifp, rt_expires,
-                               action == CLEANUP_PREFIX_RT_DEL);
+                               action == CLEANUP_PREFIX_RT_DEL, false);
                }
        }
 
@@ -5983,9 +6012,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
                if (ifp->idev->cnf.forwarding)
                        addrconf_join_anycast(ifp);
                if (!ipv6_addr_any(&ifp->peer_addr))
-                       addrconf_prefix_route(&ifp->peer_addr, 128, 0,
-                                             ifp->idev->dev, 0, 0,
-                                             GFP_ATOMIC);
+                       addrconf_prefix_route(&ifp->peer_addr, 128,
+                                             ifp->rt_priority, ifp->idev->dev,
+                                             0, 0, GFP_ATOMIC);
                break;
        case RTM_DELADDR:
                if (ifp->idev->cnf.forwarding)
index ab7f124..8c52efe 100644 (file)
@@ -268,7 +268,7 @@ static int seg6_do_srh(struct sk_buff *skb)
                skb_mac_header_rebuild(skb);
                skb_push(skb, skb->mac_len);
 
-               err = seg6_do_srh_encap(skb, tinfo->srh, NEXTHDR_NONE);
+               err = seg6_do_srh_encap(skb, tinfo->srh, IPPROTO_ETHERNET);
                if (err)
                        return err;
 
index 7cbc197..8165802 100644 (file)
@@ -282,7 +282,7 @@ static int input_action_end_dx2(struct sk_buff *skb,
        struct net_device *odev;
        struct ethhdr *eth;
 
-       if (!decap_and_validate(skb, NEXTHDR_NONE))
+       if (!decap_and_validate(skb, IPPROTO_ETHERNET))
                goto drop;
 
        if (!pskb_may_pull(skb, ETH_HLEN))
index d699833..38a0383 100644 (file)
@@ -1152,7 +1152,8 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
                }
        }
 
-       if (!(mpath->flags & MESH_PATH_RESOLVING))
+       if (!(mpath->flags & MESH_PATH_RESOLVING) &&
+           mesh_path_sel_is_hwmp(sdata))
                mesh_queue_preq(mpath, PREQ_Q_F_START);
 
        if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
index 45acd87..fd2c315 100644 (file)
@@ -334,6 +334,8 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
        struct mptcp_sock *msk;
        unsigned int ack_size;
        bool ret = false;
+       bool can_ack;
+       u64 ack_seq;
        u8 tcp_fin;
 
        if (skb) {
@@ -360,9 +362,22 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
                ret = true;
        }
 
+       /* passive sockets msk will set the 'can_ack' after accept(), even
+        * if the first subflow may have the already the remote key handy
+        */
+       can_ack = true;
        opts->ext_copy.use_ack = 0;
        msk = mptcp_sk(subflow->conn);
-       if (!msk || !READ_ONCE(msk->can_ack)) {
+       if (likely(msk && READ_ONCE(msk->can_ack))) {
+               ack_seq = msk->ack_seq;
+       } else if (subflow->can_ack) {
+               mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
+               ack_seq++;
+       } else {
+               can_ack = false;
+       }
+
+       if (unlikely(!can_ack)) {
                *size = ALIGN(dss_size, 4);
                return ret;
        }
@@ -375,7 +390,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
 
        dss_size += ack_size;
 
-       opts->ext_copy.data_ack = msk->ack_seq;
+       opts->ext_copy.data_ack = ack_seq;
        opts->ext_copy.ack64 = 1;
        opts->ext_copy.use_ack = 1;
 
index 410809c..4912069 100644 (file)
@@ -411,7 +411,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                *pos = cpu + 1;
                return per_cpu_ptr(net->ct.stat, cpu);
        }
-
+       (*pos)++;
        return NULL;
 }
 
index b0930d4..b9cbe1e 100644 (file)
@@ -267,7 +267,7 @@ static void *synproxy_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
                *pos = cpu + 1;
                return per_cpu_ptr(snet->stats, cpu);
        }
-
+       (*pos)++;
        return NULL;
 }
 
index d1318bd..38c680f 100644 (file)
@@ -1405,6 +1405,11 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
                                              lockdep_commit_lock_is_held(net));
                if (nft_dump_stats(skb, stats))
                        goto nla_put_failure;
+
+               if ((chain->flags & NFT_CHAIN_HW_OFFLOAD) &&
+                   nla_put_be32(skb, NFTA_CHAIN_FLAGS,
+                                htonl(NFT_CHAIN_HW_OFFLOAD)))
+                       goto nla_put_failure;
        }
 
        if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
@@ -6300,8 +6305,13 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
                goto err4;
 
        err = nft_register_flowtable_net_hooks(ctx.net, table, flowtable);
-       if (err < 0)
+       if (err < 0) {
+               list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) {
+                       list_del_rcu(&hook->list);
+                       kfree_rcu(hook, rcu);
+               }
                goto err4;
+       }
 
        err = nft_trans_flowtable_add(&ctx, NFT_MSG_NEWFLOWTABLE, flowtable);
        if (err < 0)
@@ -7378,13 +7388,8 @@ static void nf_tables_module_autoload(struct net *net)
        list_splice_init(&net->nft.module_list, &module_list);
        mutex_unlock(&net->nft.commit_mutex);
        list_for_each_entry_safe(req, next, &module_list, list) {
-               if (req->done) {
-                       list_del(&req->list);
-                       kfree(req);
-               } else {
-                       request_module("%s", req->module);
-                       req->done = true;
-               }
+               request_module("%s", req->module);
+               req->done = true;
        }
        mutex_lock(&net->nft.commit_mutex);
        list_splice(&module_list, &net->nft.module_list);
@@ -8167,6 +8172,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
        __nft_release_tables(net);
        mutex_unlock(&net->nft.commit_mutex);
        WARN_ON_ONCE(!list_empty(&net->nft.tables));
+       WARN_ON_ONCE(!list_empty(&net->nft.module_list));
 }
 
 static struct pernet_operations nf_tables_net_ops = {
index de3a959..a5f294a 100644 (file)
@@ -742,6 +742,8 @@ static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
        [NFCTH_NAME] = { .type = NLA_NUL_STRING,
                         .len = NF_CT_HELPER_NAME_LEN-1 },
        [NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
+       [NFCTH_PRIV_DATA_LEN] = { .type = NLA_U32, },
+       [NFCTH_STATUS] = { .type = NLA_U32, },
 };
 
 static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
index ff9ac8a..eac4a90 100644 (file)
@@ -89,6 +89,7 @@ static const struct nft_chain_type nft_chain_nat_inet = {
        .name           = "nat",
        .type           = NFT_CHAIN_T_NAT,
        .family         = NFPROTO_INET,
+       .owner          = THIS_MODULE,
        .hook_mask      = (1 << NF_INET_PRE_ROUTING) |
                          (1 << NF_INET_LOCAL_IN) |
                          (1 << NF_INET_LOCAL_OUT) |
index 1993af3..a7de3a5 100644 (file)
@@ -129,6 +129,7 @@ static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
        [NFTA_PAYLOAD_LEN]              = { .type = NLA_U32 },
        [NFTA_PAYLOAD_CSUM_TYPE]        = { .type = NLA_U32 },
        [NFTA_PAYLOAD_CSUM_OFFSET]      = { .type = NLA_U32 },
+       [NFTA_PAYLOAD_CSUM_FLAGS]       = { .type = NLA_U32 },
 };
 
 static int nft_payload_init(const struct nft_ctx *ctx,
index 4c3f2e2..764e886 100644 (file)
@@ -339,6 +339,8 @@ static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] =
        [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
        [NFTA_TUNNEL_KEY_TOS]   = { .type = NLA_U8, },
        [NFTA_TUNNEL_KEY_TTL]   = { .type = NLA_U8, },
+       [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
+       [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
        [NFTA_TUNNEL_KEY_OPTS]  = { .type = NLA_NESTED, },
 };
 
index e27c6c5..cd2b034 100644 (file)
@@ -1551,6 +1551,9 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
        uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
        struct nf_mttg_trav *trav = seq->private;
 
+       if (ppos != NULL)
+               ++(*ppos);
+
        switch (trav->class) {
        case MTTG_TRAV_INIT:
                trav->class = MTTG_TRAV_NFP_UNSPEC;
@@ -1576,9 +1579,6 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
        default:
                return NULL;
        }
-
-       if (ppos != NULL)
-               ++*ppos;
        return trav;
 }
 
index 0a97080..225a7ab 100644 (file)
@@ -492,12 +492,12 @@ static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        const struct recent_entry *e = v;
        const struct list_head *head = e->list.next;
 
+       (*pos)++;
        while (head == &t->iphash[st->bucket]) {
                if (++st->bucket >= ip_list_hash_size)
                        return NULL;
                head = t->iphash[st->bucket].next;
        }
-       (*pos)++;
        return list_entry(head, struct recent_entry, list);
 }
 
index edf3e28..5313f1c 100644 (file)
@@ -2434,7 +2434,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
                                                               in_skb->len))
                                WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
                                                    (u8 *)extack->bad_attr -
-                                                   in_skb->data));
+                                                   (u8 *)nlh));
                } else {
                        if (extack->cookie_len)
                                WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
index 6f1b096..43811b5 100644 (file)
@@ -181,13 +181,20 @@ exit:
 void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
                          struct sk_buff *skb)
 {
-       u8 gate = hdev->pipes[pipe].gate;
        u8 status = NFC_HCI_ANY_OK;
        struct hci_create_pipe_resp *create_info;
        struct hci_delete_pipe_noti *delete_info;
        struct hci_all_pipe_cleared_noti *cleared_info;
+       u8 gate;
 
-       pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd);
+       pr_debug("from pipe %x cmd %x\n", pipe, cmd);
+
+       if (pipe >= NFC_HCI_MAX_PIPES) {
+               status = NFC_HCI_ANY_E_NOK;
+               goto exit;
+       }
+
+       gate = hdev->pipes[pipe].gate;
 
        switch (cmd) {
        case NFC_HCI_ADM_NOTIFY_PIPE_CREATED:
@@ -375,8 +382,14 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
                            struct sk_buff *skb)
 {
        int r = 0;
-       u8 gate = hdev->pipes[pipe].gate;
+       u8 gate;
+
+       if (pipe >= NFC_HCI_MAX_PIPES) {
+               pr_err("Discarded event %x to invalid pipe %x\n", event, pipe);
+               goto exit;
+       }
 
+       gate = hdev->pipes[pipe].gate;
        if (gate == NFC_HCI_INVALID_GATE) {
                pr_err("Discarded event %x to unopened pipe %x\n", event, pipe);
                goto exit;
index eee0ddd..e894254 100644 (file)
@@ -32,6 +32,7 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
        [NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING,
                                .len = NFC_DEVICE_NAME_MAXSIZE },
        [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
+       [NFC_ATTR_TARGET_INDEX] = { .type = NLA_U32 },
        [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
        [NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
        [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
@@ -43,7 +44,10 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
        [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
        [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
                                     .len = NFC_FIRMWARE_NAME_MAXSIZE },
+       [NFC_ATTR_SE_INDEX] = { .type = NLA_U32 },
        [NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
+       [NFC_ATTR_VENDOR_ID] = { .type = NLA_U32 },
+       [NFC_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
        [NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
 
 };
index c047afd..07a7dd1 100644 (file)
@@ -645,6 +645,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
        [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
        [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
        [OVS_PACKET_ATTR_MRU] = { .type = NLA_U16 },
+       [OVS_PACKET_ATTR_HASH] = { .type = NLA_U64 },
 };
 
 static const struct genl_ops dp_packet_genl_ops[] = {
index 30c6879..e5b0986 100644 (file)
@@ -2274,6 +2274,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                                        TP_STATUS_KERNEL, (macoff+snaplen));
        if (!h.raw)
                goto drop_n_account;
+
+       if (do_vnet &&
+           virtio_net_hdr_from_skb(skb, h.raw + macoff -
+                                   sizeof(struct virtio_net_hdr),
+                                   vio_le(), true, 0))
+               goto drop_n_account;
+
        if (po->tp_version <= TPACKET_V2) {
                packet_increment_rx_head(po, &po->rx_ring);
        /*
@@ -2286,12 +2293,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                        status |= TP_STATUS_LOSING;
        }
 
-       if (do_vnet &&
-           virtio_net_hdr_from_skb(skb, h.raw + macoff -
-                                   sizeof(struct virtio_net_hdr),
-                                   vio_le(), true, 0))
-               goto drop_n_account;
-
        po->stats.stats1.tp_packets++;
        if (copy_skb) {
                status |= TP_STATUS_COPY;
index a5a2954..371ad84 100644 (file)
@@ -744,6 +744,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
        [TCA_FQ_FLOW_MAX_RATE]          = { .type = NLA_U32 },
        [TCA_FQ_BUCKETS_LOG]            = { .type = NLA_U32 },
        [TCA_FQ_FLOW_REFILL_DELAY]      = { .type = NLA_U32 },
+       [TCA_FQ_ORPHAN_MASK]            = { .type = NLA_U32 },
        [TCA_FQ_LOW_RATE_THRESHOLD]     = { .type = NLA_U32 },
        [TCA_FQ_CE_THRESHOLD]           = { .type = NLA_U32 },
 };
index 660fc45..b1eb12d 100644 (file)
@@ -564,8 +564,10 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
                prio = skb->priority;
                tc = netdev_get_prio_tc_map(dev, prio);
 
-               if (!(gate_mask & BIT(tc)))
+               if (!(gate_mask & BIT(tc))) {
+                       skb = NULL;
                        continue;
+               }
 
                len = qdisc_pkt_len(skb);
                guard = ktime_add_ns(taprio_get_time(q),
@@ -575,13 +577,17 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
                 * guard band ...
                 */
                if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
-                   ktime_after(guard, entry->close_time))
+                   ktime_after(guard, entry->close_time)) {
+                       skb = NULL;
                        continue;
+               }
 
                /* ... and no budget. */
                if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
-                   atomic_sub_return(len, &entry->budget) < 0)
+                   atomic_sub_return(len, &entry->budget) < 0) {
+                       skb = NULL;
                        continue;
+               }
 
                skb = child->ops->dequeue(child);
                if (unlikely(!skb))
@@ -768,6 +774,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
        [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]           = { .type = NLA_S64 },
        [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
        [TCA_TAPRIO_ATTR_FLAGS]                      = { .type = NLA_U32 },
+       [TCA_TAPRIO_ATTR_TXTIME_DELAY]               = { .type = NLA_U32 },
 };
 
 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
index 8a15146..1069d7a 100644 (file)
@@ -237,15 +237,11 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
                addrcnt++;
 
        return    nla_total_size(sizeof(struct sctp_info))
-               + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
-               + nla_total_size(1) /* INET_DIAG_TOS */
-               + nla_total_size(1) /* INET_DIAG_TCLASS */
-               + nla_total_size(4) /* INET_DIAG_MARK */
-               + nla_total_size(4) /* INET_DIAG_CLASS_ID */
                + nla_total_size(addrlen * asoc->peer.transport_count)
                + nla_total_size(addrlen * addrcnt)
-               + nla_total_size(sizeof(struct inet_diag_meminfo))
                + nla_total_size(sizeof(struct inet_diag_msg))
+               + inet_diag_msg_attrs_size()
+               + nla_total_size(sizeof(struct inet_diag_meminfo))
                + 64;
 }
 
index d6ba186..05b825b 100644 (file)
@@ -582,6 +582,7 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
        smc_smcr_terminate_all(smcibdev);
        smc_ib_cleanup_per_ibdev(smcibdev);
        ib_unregister_event_handler(&smcibdev->event_handler);
+       cancel_work_sync(&smcibdev->port_event_work);
        kfree(smcibdev);
 }
 
index b79a05d..2eecf15 100644 (file)
@@ -1707,7 +1707,8 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
 
 int __sys_accept4_file(struct file *file, unsigned file_flags,
                       struct sockaddr __user *upeer_sockaddr,
-                      int __user *upeer_addrlen, int flags)
+                      int __user *upeer_addrlen, int flags,
+                      unsigned long nofile)
 {
        struct socket *sock, *newsock;
        struct file *newfile;
@@ -1738,7 +1739,7 @@ int __sys_accept4_file(struct file *file, unsigned file_flags,
         */
        __module_get(newsock->ops->owner);
 
-       newfd = get_unused_fd_flags(flags);
+       newfd = __get_unused_fd_flags(flags, nofile);
        if (unlikely(newfd < 0)) {
                err = newfd;
                sock_release(newsock);
@@ -1807,7 +1808,8 @@ int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
        f = fdget(fd);
        if (f.file) {
                ret = __sys_accept4_file(f.file, 0, upeer_sockaddr,
-                                               upeer_addrlen, flags);
+                                               upeer_addrlen, flags,
+                                               rlimit(RLIMIT_NOFILE));
                if (f.flags)
                        fput(f.file);
        }
index 7c35094..bb98624 100644 (file)
@@ -116,6 +116,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
        [TIPC_NLA_PROP_PRIO]            = { .type = NLA_U32 },
        [TIPC_NLA_PROP_TOL]             = { .type = NLA_U32 },
        [TIPC_NLA_PROP_WIN]             = { .type = NLA_U32 },
+       [TIPC_NLA_PROP_MTU]             = { .type = NLA_U32 },
        [TIPC_NLA_PROP_BROADCAST]       = { .type = NLA_U32 },
        [TIPC_NLA_PROP_BROADCAST_RATIO] = { .type = NLA_U32 }
 };
index 5b19e9f..ec5d677 100644 (file)
@@ -470,6 +470,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED },
        [NL80211_ATTR_STA_PLINK_STATE] =
                NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_STATES - 1),
+       [NL80211_ATTR_MEASUREMENT_DURATION] = { .type = NLA_U16 },
+       [NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY] = { .type = NLA_FLAG },
        [NL80211_ATTR_MESH_PEER_AID] =
                NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
        [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
@@ -531,6 +533,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_MDID] = { .type = NLA_U16 },
        [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
                                  .len = IEEE80211_MAX_DATA_LEN },
+       [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 },
+       [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 },
        [NL80211_ATTR_PEER_AID] =
                NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
        [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
@@ -561,6 +565,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
                NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1),
        [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
        [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
+       [NL80211_ATTR_OPER_CLASS] = { .type = NLA_U8 },
        [NL80211_ATTR_MAC_MASK] = {
                .type = NLA_EXACT_LEN_WARN,
                .len = ETH_ALEN
index 85334dc..496d11c 100644 (file)
@@ -44,3 +44,10 @@ $(error-if,$(success, $(LD) -v | grep -q gold), gold linker '$(LD)' not supporte
 
 # gcc version including patch level
 gcc-version := $(shell,$(srctree)/scripts/gcc-version.sh $(CC))
+
+# machine bit flags
+#  $(m32-flag): -m32 if the compiler supports it, or an empty string otherwise.
+#  $(m64-flag): -m64 if the compiler supports it, or an empty string otherwise.
+cc-option-bit = $(if-success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null,$(1))
+m32-flag := $(cc-option-bit,-m32)
+m64-flag := $(cc-option-bit,-m64)
index ecddf83..ca08f2f 100644 (file)
@@ -48,6 +48,7 @@ KBUILD_CFLAGS += -Wno-initializer-overrides
 KBUILD_CFLAGS += -Wno-format
 KBUILD_CFLAGS += -Wno-sign-compare
 KBUILD_CFLAGS += -Wno-format-zero-length
+KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
 endif
 
 endif
index 548330e..feb3d55 100755 (executable)
@@ -94,7 +94,7 @@ if (defined $opt{'o'}) {
 #
 while ( <$module_symvers> ) {
        chomp;
-       my (undef, $symbol, $namespace, $module, $gpl) = split('\t');
+       my (undef, $symbol, $module, $gpl, $namespace) = split('\t');
        $SYMBOL { $symbol } =  [ $module , "0" , $symbol, $gpl];
 }
 close($module_symvers);
index 0133dfa..3e8dea6 100644 (file)
@@ -195,13 +195,13 @@ static struct sym_entry *read_symbol(FILE *in)
                return NULL;
        }
 
-       if (is_ignored_symbol(name, type))
-               return NULL;
-
-       /* Ignore most absolute/undefined (?) symbols. */
        if (strcmp(name, "_text") == 0)
                _text = addr;
 
+       /* Ignore most absolute/undefined (?) symbols. */
+       if (is_ignored_symbol(name, type))
+               return NULL;
+
        check_symbol_range(name, addr, text_ranges, ARRAY_SIZE(text_ranges));
        check_symbol_range(name, addr, &percpu_range, 1);
 
index 7edfdb2..55a0a2e 100644 (file)
@@ -308,7 +308,8 @@ static const char *sec_name(struct elf_info *elf, int secindex)
 
 static void *sym_get_data(const struct elf_info *info, const Elf_Sym *sym)
 {
-       Elf_Shdr *sechdr = &info->sechdrs[sym->st_shndx];
+       unsigned int secindex = get_secindex(info, sym);
+       Elf_Shdr *sechdr = &info->sechdrs[secindex];
        unsigned long offset;
 
        offset = sym->st_value;
@@ -2427,7 +2428,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
 }
 
 /* parse Module.symvers file. line format:
- * 0x12345678<tab>symbol<tab>module[[<tab>export]<tab>something]
+ * 0x12345678<tab>symbol<tab>module<tab>export<tab>namespace
  **/
 static void read_dump(const char *fname, unsigned int kernel)
 {
@@ -2440,7 +2441,7 @@ static void read_dump(const char *fname, unsigned int kernel)
                return;
 
        while ((line = get_next_line(&pos, file, size))) {
-               char *symname, *namespace, *modname, *d, *export, *end;
+               char *symname, *namespace, *modname, *d, *export;
                unsigned int crc;
                struct module *mod;
                struct symbol *s;
@@ -2448,16 +2449,16 @@ static void read_dump(const char *fname, unsigned int kernel)
                if (!(symname = strchr(line, '\t')))
                        goto fail;
                *symname++ = '\0';
-               if (!(namespace = strchr(symname, '\t')))
-                       goto fail;
-               *namespace++ = '\0';
-               if (!(modname = strchr(namespace, '\t')))
+               if (!(modname = strchr(symname, '\t')))
                        goto fail;
                *modname++ = '\0';
-               if ((export = strchr(modname, '\t')) != NULL)
-                       *export++ = '\0';
-               if (export && ((end = strchr(export, '\t')) != NULL))
-                       *end = '\0';
+               if (!(export = strchr(modname, '\t')))
+                       goto fail;
+               *export++ = '\0';
+               if (!(namespace = strchr(export, '\t')))
+                       goto fail;
+               *namespace++ = '\0';
+
                crc = strtoul(line, &d, 16);
                if (*symname == '\0' || *modname == '\0' || *d != '\0')
                        goto fail;
@@ -2508,9 +2509,9 @@ static void write_dump(const char *fname)
                                namespace = symbol->namespace;
                                buf_printf(&buf, "0x%08x\t%s\t%s\t%s\t%s\n",
                                           symbol->crc, symbol->name,
-                                          namespace ? namespace : "",
                                           symbol->module->name,
-                                          export_str(symbol->export));
+                                          export_str(symbol->export),
+                                          namespace ? namespace : "");
                        }
                        symbol = symbol->next;
                }
index 240e470..752d078 100644 (file)
@@ -111,7 +111,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
                while (plugin->next) {
                        if (plugin->dst_frames)
                                frames = plugin->dst_frames(plugin, frames);
-                       if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0))
+                       if ((snd_pcm_sframes_t)frames <= 0)
                                return -ENXIO;
                        plugin = plugin->next;
                        err = snd_pcm_plugin_alloc(plugin, frames);
@@ -123,7 +123,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
                while (plugin->prev) {
                        if (plugin->src_frames)
                                frames = plugin->src_frames(plugin, frames);
-                       if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0))
+                       if ((snd_pcm_sframes_t)frames <= 0)
                                return -ENXIO;
                        plugin = plugin->prev;
                        err = snd_pcm_plugin_alloc(plugin, frames);
@@ -209,6 +209,8 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
        if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
                plugin = snd_pcm_plug_last(plug);
                while (plugin && drv_frames > 0) {
+                       if (drv_frames > plugin->buf_frames)
+                               drv_frames = plugin->buf_frames;
                        plugin_prev = plugin->prev;
                        if (plugin->src_frames)
                                drv_frames = plugin->src_frames(plugin, drv_frames);
@@ -220,6 +222,8 @@ snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *plug, snd_p
                        plugin_next = plugin->next;
                        if (plugin->dst_frames)
                                drv_frames = plugin->dst_frames(plugin, drv_frames);
+                       if (drv_frames > plugin->buf_frames)
+                               drv_frames = plugin->buf_frames;
                        plugin = plugin_next;
                }
        } else
@@ -248,11 +252,15 @@ snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *plug, snd_pc
                                if (frames < 0)
                                        return frames;
                        }
+                       if (frames > plugin->buf_frames)
+                               frames = plugin->buf_frames;
                        plugin = plugin_next;
                }
        } else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
                plugin = snd_pcm_plug_last(plug);
                while (plugin) {
+                       if (frames > plugin->buf_frames)
+                               frames = plugin->buf_frames;
                        plugin_prev = plugin->prev;
                        if (plugin->src_frames) {
                                frames = plugin->src_frames(plugin, frames);
index d5443ee..cbdf061 100644 (file)
@@ -748,11 +748,11 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream,
        snd_pcm_timer_resolution_change(substream);
        snd_pcm_set_state(substream, SNDRV_PCM_STATE_SETUP);
 
-       if (pm_qos_request_active(&substream->latency_pm_qos_req))
-               pm_qos_remove_request(&substream->latency_pm_qos_req);
+       if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
+               cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
        if ((usecs = period_to_usecs(runtime)) >= 0)
-               pm_qos_add_request(&substream->latency_pm_qos_req,
-                                  PM_QOS_CPU_DMA_LATENCY, usecs);
+               cpu_latency_qos_add_request(&substream->latency_pm_qos_req,
+                                           usecs);
        return 0;
  _error:
        /* hardware might be unusable from this time,
@@ -821,7 +821,7 @@ static int snd_pcm_hw_free(struct snd_pcm_substream *substream)
                return -EBADFD;
        result = do_hw_free(substream);
        snd_pcm_set_state(substream, SNDRV_PCM_STATE_OPEN);
-       pm_qos_remove_request(&substream->latency_pm_qos_req);
+       cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
        return result;
 }
 
@@ -2599,8 +2599,8 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream)
                substream->ops->close(substream);
                substream->hw_opened = 0;
        }
-       if (pm_qos_request_active(&substream->latency_pm_qos_req))
-               pm_qos_remove_request(&substream->latency_pm_qos_req);
+       if (cpu_latency_qos_request_active(&substream->latency_pm_qos_req))
+               cpu_latency_qos_remove_request(&substream->latency_pm_qos_req);
        if (substream->pcm_release) {
                substream->pcm_release(substream);
                substream->pcm_release = NULL;
index a88c235..2ddfe22 100644 (file)
@@ -602,6 +602,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq
                len = snd_seq_oss_timer_start(dp->timer);
        if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
                snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
+               snd_midi_event_reset_decode(mdev->coder);
        } else {
                len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
                if (len > 0)
index 626d87c..77d7037 100644 (file)
@@ -81,6 +81,7 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev,
                        if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
                                continue;
                        snd_seq_dump_var_event(ev, (snd_seq_dump_func_t)snd_rawmidi_receive, vmidi->substream);
+                       snd_midi_event_reset_decode(vmidi->parser);
                } else {
                        len = snd_midi_event_decode(vmidi->parser, msg, sizeof(msg), ev);
                        if (len > 0)
index 0ac06ff..63e1a56 100644 (file)
@@ -8051,6 +8051,8 @@ static int patch_alc269(struct hda_codec *codec)
                spec->gen.mixer_nid = 0;
                break;
        case 0x10ec0225:
+               codec->power_save_node = 1;
+               /* fall through */
        case 0x10ec0295:
        case 0x10ec0299:
                spec->codec_variant = ALC269_TYPE_ALC225;
@@ -8610,6 +8612,8 @@ enum {
        ALC669_FIXUP_ACER_ASPIRE_ETHOS,
        ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET,
        ALC671_FIXUP_HP_HEADSET_MIC2,
+       ALC662_FIXUP_ACER_X2660G_HEADSET_MODE,
+       ALC662_FIXUP_ACER_NITRO_HEADSET_MODE,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -8955,6 +8959,25 @@ static const struct hda_fixup alc662_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc671_fixup_hp_headset_mic2,
        },
+       [ALC662_FIXUP_ACER_X2660G_HEADSET_MODE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x02a1113c }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC662_FIXUP_USI_FUNC
+       },
+       [ALC662_FIXUP_ACER_NITRO_HEADSET_MODE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x01a11140 }, /* use as headset mic, without its own jack detect */
+                       { 0x1b, 0x0221144f },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC662_FIXUP_USI_FUNC
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -8966,6 +8989,8 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
+       SND_PCI_QUIRK(0x1025, 0x123c, "Acer Nitro N50-600", ALC662_FIXUP_ACER_NITRO_HEADSET_MODE),
+       SND_PCI_QUIRK(0x1025, 0x124e, "Acer 2660G", ALC662_FIXUP_ACER_X2660G_HEADSET_MODE),
        SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05fe, "Dell XPS 15", ALC668_FIXUP_DELL_XPS13),
index 68bcec5..d656398 100644 (file)
@@ -325,8 +325,7 @@ int sst_context_init(struct intel_sst_drv *ctx)
                ret = -ENOMEM;
                goto do_free_mem;
        }
-       pm_qos_add_request(ctx->qos, PM_QOS_CPU_DMA_LATENCY,
-                               PM_QOS_DEFAULT_VALUE);
+       cpu_latency_qos_add_request(ctx->qos, PM_QOS_DEFAULT_VALUE);
 
        dev_dbg(ctx->dev, "Requesting FW %s now...\n", ctx->firmware_name);
        ret = request_firmware_nowait(THIS_MODULE, true, ctx->firmware_name,
@@ -364,7 +363,7 @@ void sst_context_cleanup(struct intel_sst_drv *ctx)
        sysfs_remove_group(&ctx->dev->kobj, &sst_fw_version_attr_group);
        flush_scheduled_work();
        destroy_workqueue(ctx->post_msg_wq);
-       pm_qos_remove_request(ctx->qos);
+       cpu_latency_qos_remove_request(ctx->qos);
        kfree(ctx->fw_sg_list.src);
        kfree(ctx->fw_sg_list.dst);
        ctx->fw_sg_list.list_len = 0;
index ce11c36..9b0e373 100644 (file)
@@ -412,7 +412,7 @@ int sst_load_fw(struct intel_sst_drv *sst_drv_ctx)
                return -ENOMEM;
 
        /* Prevent C-states beyond C6 */
-       pm_qos_update_request(sst_drv_ctx->qos, 0);
+       cpu_latency_qos_update_request(sst_drv_ctx->qos, 0);
 
        sst_drv_ctx->sst_state = SST_FW_LOADING;
 
@@ -442,7 +442,7 @@ int sst_load_fw(struct intel_sst_drv *sst_drv_ctx)
 
 restore:
        /* Re-enable Deeper C-states beyond C6 */
-       pm_qos_update_request(sst_drv_ctx->qos, PM_QOS_DEFAULT_VALUE);
+       cpu_latency_qos_update_request(sst_drv_ctx->qos, PM_QOS_DEFAULT_VALUE);
        sst_free_block(sst_drv_ctx, block);
        dev_dbg(sst_drv_ctx->dev, "fw load successful!!!\n");
 
index 3f226be..913579c 100644 (file)
@@ -112,7 +112,7 @@ static void omap_dmic_dai_shutdown(struct snd_pcm_substream *substream,
 
        mutex_lock(&dmic->mutex);
 
-       pm_qos_remove_request(&dmic->pm_qos_req);
+       cpu_latency_qos_remove_request(&dmic->pm_qos_req);
 
        if (!dai->active)
                dmic->active = 0;
@@ -230,8 +230,9 @@ static int omap_dmic_dai_prepare(struct snd_pcm_substream *substream,
        struct omap_dmic *dmic = snd_soc_dai_get_drvdata(dai);
        u32 ctrl;
 
-       if (pm_qos_request_active(&dmic->pm_qos_req))
-               pm_qos_update_request(&dmic->pm_qos_req, dmic->latency);
+       if (cpu_latency_qos_request_active(&dmic->pm_qos_req))
+               cpu_latency_qos_update_request(&dmic->pm_qos_req,
+                                              dmic->latency);
 
        /* Configure uplink threshold */
        omap_dmic_write(dmic, OMAP_DMIC_FIFO_CTRL_REG, dmic->threshold);
index 26b503b..302d5c4 100644 (file)
@@ -836,10 +836,10 @@ static void omap_mcbsp_dai_shutdown(struct snd_pcm_substream *substream,
        int stream2 = tx ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
 
        if (mcbsp->latency[stream2])
-               pm_qos_update_request(&mcbsp->pm_qos_req,
-                                     mcbsp->latency[stream2]);
+               cpu_latency_qos_update_request(&mcbsp->pm_qos_req,
+                                              mcbsp->latency[stream2]);
        else if (mcbsp->latency[stream1])
-               pm_qos_remove_request(&mcbsp->pm_qos_req);
+               cpu_latency_qos_remove_request(&mcbsp->pm_qos_req);
 
        mcbsp->latency[stream1] = 0;
 
@@ -863,10 +863,10 @@ static int omap_mcbsp_dai_prepare(struct snd_pcm_substream *substream,
        if (!latency || mcbsp->latency[stream1] < latency)
                latency = mcbsp->latency[stream1];
 
-       if (pm_qos_request_active(pm_qos_req))
-               pm_qos_update_request(pm_qos_req, latency);
+       if (cpu_latency_qos_request_active(pm_qos_req))
+               cpu_latency_qos_update_request(pm_qos_req, latency);
        else if (latency)
-               pm_qos_add_request(pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency);
+               cpu_latency_qos_add_request(pm_qos_req, latency);
 
        return 0;
 }
@@ -1434,8 +1434,8 @@ static int asoc_mcbsp_remove(struct platform_device *pdev)
        if (mcbsp->pdata->ops && mcbsp->pdata->ops->free)
                mcbsp->pdata->ops->free(mcbsp->id);
 
-       if (pm_qos_request_active(&mcbsp->pm_qos_req))
-               pm_qos_remove_request(&mcbsp->pm_qos_req);
+       if (cpu_latency_qos_request_active(&mcbsp->pm_qos_req))
+               cpu_latency_qos_remove_request(&mcbsp->pm_qos_req);
 
        if (mcbsp->pdata->buffer_size)
                sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group);
index a726cd7..d7ac4df 100644 (file)
@@ -281,10 +281,10 @@ static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream,
        }
 
        if (mcpdm->latency[stream2])
-               pm_qos_update_request(&mcpdm->pm_qos_req,
-                                     mcpdm->latency[stream2]);
+               cpu_latency_qos_update_request(&mcpdm->pm_qos_req,
+                                              mcpdm->latency[stream2]);
        else if (mcpdm->latency[stream1])
-               pm_qos_remove_request(&mcpdm->pm_qos_req);
+               cpu_latency_qos_remove_request(&mcpdm->pm_qos_req);
 
        mcpdm->latency[stream1] = 0;
 
@@ -386,10 +386,10 @@ static int omap_mcpdm_prepare(struct snd_pcm_substream *substream,
        if (!latency || mcpdm->latency[stream1] < latency)
                latency = mcpdm->latency[stream1];
 
-       if (pm_qos_request_active(pm_qos_req))
-               pm_qos_update_request(pm_qos_req, latency);
+       if (cpu_latency_qos_request_active(pm_qos_req))
+               cpu_latency_qos_update_request(pm_qos_req, latency);
        else if (latency)
-               pm_qos_add_request(pm_qos_req, PM_QOS_CPU_DMA_LATENCY, latency);
+               cpu_latency_qos_add_request(pm_qos_req, latency);
 
        if (!omap_mcpdm_active(mcpdm)) {
                omap_mcpdm_start(mcpdm);
@@ -451,8 +451,8 @@ static int omap_mcpdm_remove(struct snd_soc_dai *dai)
        free_irq(mcpdm->irq, (void *)mcpdm);
        pm_runtime_disable(mcpdm->dev);
 
-       if (pm_qos_request_active(&mcpdm->pm_qos_req))
-               pm_qos_remove_request(&mcpdm->pm_qos_req);
+       if (cpu_latency_qos_request_active(&mcpdm->pm_qos_req))
+               cpu_latency_qos_remove_request(&mcpdm->pm_qos_req);
 
        return 0;
 }
index b5a3f75..4f09668 100644 (file)
@@ -305,7 +305,7 @@ static void line6_data_received(struct urb *urb)
                                line6_midibuf_read(mb, line6->buffer_message,
                                                LINE6_MIDI_MESSAGE_MAXLEN);
 
-                       if (done == 0)
+                       if (done <= 0)
                                break;
 
                        line6->message_length = done;
index 8d6eefa..6a70463 100644 (file)
@@ -159,7 +159,7 @@ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
                        int midi_length_prev =
                            midibuf_message_length(this->command_prev);
 
-                       if (midi_length_prev > 0) {
+                       if (midi_length_prev > 1) {
                                midi_length = midi_length_prev - 1;
                                repeat = 1;
                        } else
index ce3c594..637189e 100644 (file)
@@ -1,18 +1,18 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #if defined(__i386__) || defined(__x86_64__)
-#include "../../arch/x86/include/uapi/asm/errno.h"
+#include "../../../arch/x86/include/uapi/asm/errno.h"
 #elif defined(__powerpc__)
-#include "../../arch/powerpc/include/uapi/asm/errno.h"
+#include "../../../arch/powerpc/include/uapi/asm/errno.h"
 #elif defined(__sparc__)
-#include "../../arch/sparc/include/uapi/asm/errno.h"
+#include "../../../arch/sparc/include/uapi/asm/errno.h"
 #elif defined(__alpha__)
-#include "../../arch/alpha/include/uapi/asm/errno.h"
+#include "../../../arch/alpha/include/uapi/asm/errno.h"
 #elif defined(__mips__)
-#include "../../arch/mips/include/uapi/asm/errno.h"
+#include "../../../arch/mips/include/uapi/asm/errno.h"
 #elif defined(__ia64__)
-#include "../../arch/ia64/include/uapi/asm/errno.h"
+#include "../../../arch/ia64/include/uapi/asm/errno.h"
 #elif defined(__xtensa__)
-#include "../../arch/xtensa/include/uapi/asm/errno.h"
+#include "../../../arch/xtensa/include/uapi/asm/errno.h"
 #else
 #include <asm-generic/errno.h>
 #endif
index 8d6821d..27653be 100644 (file)
 #include <linux/zalloc.h>
 #include <time.h>
 
-#include "../../util/cpumap.h"
-#include "../../util/event.h"
-#include "../../util/evsel.h"
-#include "../../util/evlist.h"
-#include "../../util/session.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/event.h"
+#include "../../../util/evsel.h"
+#include "../../../util/evlist.h"
+#include "../../../util/session.h"
 #include <internal/lib.h> // page_size
-#include "../../util/pmu.h"
-#include "../../util/debug.h"
-#include "../../util/auxtrace.h"
-#include "../../util/record.h"
-#include "../../util/arm-spe.h"
+#include "../../../util/pmu.h"
+#include "../../../util/debug.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/record.h"
+#include "../../../util/arm-spe.h"
 
 #define KiB(x) ((x) * 1024)
 #define MiB(x) ((x) * 1024 * 1024)
index 2864e2e..2833e10 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-#include "../../util/perf_regs.h"
+#include "../../../util/perf_regs.h"
 
 const struct sample_reg sample_reg_masks[] = {
        SMPL_REG_END
index e9c436e..0a52429 100644 (file)
@@ -4,8 +4,8 @@
 #include <regex.h>
 #include <linux/zalloc.h>
 
-#include "../../util/perf_regs.h"
-#include "../../util/debug.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/debug.h"
 
 #include <linux/kernel.h>
 
index 7abc9fd..3da506e 100644 (file)
@@ -7,13 +7,13 @@
 #include <errno.h>
 #include <stdbool.h>
 
-#include "../../util/header.h"
-#include "../../util/debug.h"
-#include "../../util/pmu.h"
-#include "../../util/auxtrace.h"
-#include "../../util/intel-pt.h"
-#include "../../util/intel-bts.h"
-#include "../../util/evlist.h"
+#include "../../../util/header.h"
+#include "../../../util/debug.h"
+#include "../../../util/pmu.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/intel-pt.h"
+#include "../../../util/intel-bts.h"
+#include "../../../util/evlist.h"
 
 static
 struct auxtrace_record *auxtrace_record__init_intel(struct evlist *evlist,
index ac45015..047dc00 100644 (file)
@@ -3,12 +3,12 @@
 #include <linux/string.h>
 #include <linux/zalloc.h>
 
-#include "../../util/event.h"
-#include "../../util/synthetic-events.h"
-#include "../../util/machine.h"
-#include "../../util/tool.h"
-#include "../../util/map.h"
-#include "../../util/debug.h"
+#include "../../../util/event.h"
+#include "../../../util/synthetic-events.h"
+#include "../../../util/machine.h"
+#include "../../../util/tool.h"
+#include "../../../util/map.h"
+#include "../../../util/debug.h"
 
 #if defined(__x86_64__)
 
index aa6deb4..578c8c5 100644 (file)
@@ -7,8 +7,8 @@
 #include <string.h>
 #include <regex.h>
 
-#include "../../util/debug.h"
-#include "../../util/header.h"
+#include "../../../util/debug.h"
+#include "../../../util/header.h"
 
 static inline void
 cpuid(unsigned int op, unsigned int *a, unsigned int *b, unsigned int *c,
index 26cee10..09f9380 100644 (file)
 #include <linux/log2.h>
 #include <linux/zalloc.h>
 
-#include "../../util/cpumap.h"
-#include "../../util/event.h"
-#include "../../util/evsel.h"
-#include "../../util/evlist.h"
-#include "../../util/mmap.h"
-#include "../../util/session.h"
-#include "../../util/pmu.h"
-#include "../../util/debug.h"
-#include "../../util/record.h"
-#include "../../util/tsc.h"
-#include "../../util/auxtrace.h"
-#include "../../util/intel-bts.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/event.h"
+#include "../../../util/evsel.h"
+#include "../../../util/evlist.h"
+#include "../../../util/mmap.h"
+#include "../../../util/session.h"
+#include "../../../util/pmu.h"
+#include "../../../util/debug.h"
+#include "../../../util/record.h"
+#include "../../../util/tsc.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/intel-bts.h"
 #include <internal/lib.h> // page_size
 
 #define KiB(x) ((x) * 1024)
index 7eea4fd..1643aed 100644 (file)
 #include <linux/zalloc.h>
 #include <cpuid.h>
 
-#include "../../util/session.h"
-#include "../../util/event.h"
-#include "../../util/evlist.h"
-#include "../../util/evsel.h"
-#include "../../util/evsel_config.h"
-#include "../../util/cpumap.h"
-#include "../../util/mmap.h"
+#include "../../../util/session.h"
+#include "../../../util/event.h"
+#include "../../../util/evlist.h"
+#include "../../../util/evsel.h"
+#include "../../../util/evsel_config.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/mmap.h"
 #include <subcmd/parse-options.h>
-#include "../../util/parse-events.h"
-#include "../../util/pmu.h"
-#include "../../util/debug.h"
-#include "../../util/auxtrace.h"
-#include "../../util/record.h"
-#include "../../util/target.h"
-#include "../../util/tsc.h"
+#include "../../../util/parse-events.h"
+#include "../../../util/pmu.h"
+#include "../../../util/debug.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/record.h"
+#include "../../../util/target.h"
+#include "../../../util/tsc.h"
 #include <internal/lib.h> // page_size
-#include "../../util/intel-pt.h"
+#include "../../../util/intel-pt.h"
 
 #define KiB(x) ((x) * 1024)
 #define MiB(x) ((x) * 1024 * 1024)
index e17e080..31679c3 100644 (file)
@@ -5,9 +5,9 @@
 #include <stdlib.h>
 
 #include <internal/lib.h> // page_size
-#include "../../util/machine.h"
-#include "../../util/map.h"
-#include "../../util/symbol.h"
+#include "../../../util/machine.h"
+#include "../../../util/map.h"
+#include "../../../util/symbol.h"
 #include <linux/ctype.h>
 
 #include <symbol/kallsyms.h>
index c218b83..fca81b3 100644 (file)
@@ -5,10 +5,10 @@
 #include <linux/kernel.h>
 #include <linux/zalloc.h>
 
-#include "../../perf-sys.h"
-#include "../../util/perf_regs.h"
-#include "../../util/debug.h"
-#include "../../util/event.h"
+#include "../../../perf-sys.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/debug.h"
+#include "../../../util/event.h"
 
 const struct sample_reg sample_reg_masks[] = {
        SMPL_REG(AX, PERF_REG_X86_AX),
index e33ef5b..d48d608 100644 (file)
@@ -4,9 +4,9 @@
 #include <linux/stddef.h>
 #include <linux/perf_event.h>
 
-#include "../../util/intel-pt.h"
-#include "../../util/intel-bts.h"
-#include "../../util/pmu.h"
+#include "../../../util/intel-pt.h"
+#include "../../../util/intel-bts.h"
+#include "../../../util/pmu.h"
 
 struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
 {
index fddb3ce..4aa6de1 100644 (file)
@@ -2,6 +2,10 @@
 #ifndef BENCH_H
 #define BENCH_H
 
+#include <sys/time.h>
+
+extern struct timeval bench__start, bench__end, bench__runtime;
+
 /*
  * The madvise transparent hugepage constants were added in glibc
  * 2.13. For compatibility with older versions of glibc, define these
index bb617e5..cadc18d 100644 (file)
@@ -35,7 +35,6 @@
 
 static unsigned int nthreads = 0;
 static unsigned int nsecs    = 8;
-struct timeval start, end, runtime;
 static bool done, __verbose, randomize;
 
 /*
@@ -94,8 +93,8 @@ static void toggle_done(int sig __maybe_unused,
 {
        /* inform all threads that we're done for the day */
        done = true;
-       gettimeofday(&end, NULL);
-       timersub(&end, &start, &runtime);
+       gettimeofday(&bench__end, NULL);
+       timersub(&bench__end, &bench__start, &bench__runtime);
 }
 
 static void nest_epollfd(void)
@@ -313,6 +312,7 @@ int bench_epoll_ctl(int argc, const char **argv)
                exit(EXIT_FAILURE);
        }
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
@@ -361,7 +361,7 @@ int bench_epoll_ctl(int argc, const char **argv)
 
        threads_starting = nthreads;
 
-       gettimeofday(&start, NULL);
+       gettimeofday(&bench__start, NULL);
 
        do_threads(worker, cpu);
 
index 7af6944..f938c58 100644 (file)
@@ -90,7 +90,6 @@
 
 static unsigned int nthreads = 0;
 static unsigned int nsecs    = 8;
-struct timeval start, end, runtime;
 static bool wdone, done, __verbose, randomize, nonblocking;
 
 /*
@@ -276,8 +275,8 @@ static void toggle_done(int sig __maybe_unused,
 {
        /* inform all threads that we're done for the day */
        done = true;
-       gettimeofday(&end, NULL);
-       timersub(&end, &start, &runtime);
+       gettimeofday(&bench__end, NULL);
+       timersub(&bench__end, &bench__start, &bench__runtime);
 }
 
 static void print_summary(void)
@@ -287,7 +286,7 @@ static void print_summary(void)
 
        printf("\nAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
               avg, rel_stddev_stats(stddev, avg),
-              (int) runtime.tv_sec);
+              (int)bench__runtime.tv_sec);
 }
 
 static int do_threads(struct worker *worker, struct perf_cpu_map *cpu)
@@ -427,6 +426,7 @@ int bench_epoll_wait(int argc, const char **argv)
                exit(EXIT_FAILURE);
        }
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
@@ -479,7 +479,7 @@ int bench_epoll_wait(int argc, const char **argv)
 
        threads_starting = nthreads;
 
-       gettimeofday(&start, NULL);
+       gettimeofday(&bench__start, NULL);
 
        do_threads(worker, cpu);
 
@@ -519,7 +519,7 @@ int bench_epoll_wait(int argc, const char **argv)
                qsort(worker, nthreads, sizeof(struct worker), cmpworker);
 
        for (i = 0; i < nthreads; i++) {
-               unsigned long t = worker[i].ops/runtime.tv_sec;
+               unsigned long t = worker[i].ops / bench__runtime.tv_sec;
 
                update_stats(&throughput_stats, t);
 
index 8ba0c33..65eebe0 100644 (file)
@@ -37,7 +37,7 @@ static unsigned int nfutexes = 1024;
 static bool fshared = false, done = false, silent = false;
 static int futex_flag = 0;
 
-struct timeval start, end, runtime;
+struct timeval bench__start, bench__end, bench__runtime;
 static pthread_mutex_t thread_lock;
 static unsigned int threads_starting;
 static struct stats throughput_stats;
@@ -103,8 +103,8 @@ static void toggle_done(int sig __maybe_unused,
 {
        /* inform all threads that we're done for the day */
        done = true;
-       gettimeofday(&end, NULL);
-       timersub(&end, &start, &runtime);
+       gettimeofday(&bench__end, NULL);
+       timersub(&bench__end, &bench__start, &bench__runtime);
 }
 
 static void print_summary(void)
@@ -114,7 +114,7 @@ static void print_summary(void)
 
        printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
               !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
-              (int) runtime.tv_sec);
+              (int)bench__runtime.tv_sec);
 }
 
 int bench_futex_hash(int argc, const char **argv)
@@ -137,6 +137,7 @@ int bench_futex_hash(int argc, const char **argv)
        if (!cpu)
                goto errmem;
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
@@ -161,7 +162,7 @@ int bench_futex_hash(int argc, const char **argv)
 
        threads_starting = nthreads;
        pthread_attr_init(&thread_attr);
-       gettimeofday(&start, NULL);
+       gettimeofday(&bench__start, NULL);
        for (i = 0; i < nthreads; i++) {
                worker[i].tid = i;
                worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex));
@@ -204,7 +205,7 @@ int bench_futex_hash(int argc, const char **argv)
        pthread_mutex_destroy(&thread_lock);
 
        for (i = 0; i < nthreads; i++) {
-               unsigned long t = worker[i].ops/runtime.tv_sec;
+               unsigned long t = worker[i].ops / bench__runtime.tv_sec;
                update_stats(&throughput_stats, t);
                if (!silent) {
                        if (nfutexes == 1)
index d0cae81..89fd8f3 100644 (file)
@@ -37,7 +37,6 @@ static bool silent = false, multi = false;
 static bool done = false, fshared = false;
 static unsigned int nthreads = 0;
 static int futex_flag = 0;
-struct timeval start, end, runtime;
 static pthread_mutex_t thread_lock;
 static unsigned int threads_starting;
 static struct stats throughput_stats;
@@ -64,7 +63,7 @@ static void print_summary(void)
 
        printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
               !silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
-              (int) runtime.tv_sec);
+              (int)bench__runtime.tv_sec);
 }
 
 static void toggle_done(int sig __maybe_unused,
@@ -73,8 +72,8 @@ static void toggle_done(int sig __maybe_unused,
 {
        /* inform all threads that we're done for the day */
        done = true;
-       gettimeofday(&end, NULL);
-       timersub(&end, &start, &runtime);
+       gettimeofday(&bench__end, NULL);
+       timersub(&bench__end, &bench__start, &bench__runtime);
 }
 
 static void *workerfn(void *arg)
@@ -161,6 +160,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
        if (!cpu)
                err(EXIT_FAILURE, "calloc");
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
@@ -185,7 +185,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
 
        threads_starting = nthreads;
        pthread_attr_init(&thread_attr);
-       gettimeofday(&start, NULL);
+       gettimeofday(&bench__start, NULL);
 
        create_threads(worker, thread_attr, cpu);
        pthread_attr_destroy(&thread_attr);
@@ -211,7 +211,7 @@ int bench_futex_lock_pi(int argc, const char **argv)
        pthread_mutex_destroy(&thread_lock);
 
        for (i = 0; i < nthreads; i++) {
-               unsigned long t = worker[i].ops/runtime.tv_sec;
+               unsigned long t = worker[i].ops / bench__runtime.tv_sec;
 
                update_stats(&throughput_stats, t);
                if (!silent)
index a00a689..7a15c2e 100644 (file)
@@ -128,6 +128,7 @@ int bench_futex_requeue(int argc, const char **argv)
        if (!cpu)
                err(EXIT_FAILURE, "cpu_map__new");
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
index a053cf2..cd2b81a 100644 (file)
@@ -234,6 +234,7 @@ int bench_futex_wake_parallel(int argc, const char **argv)
                exit(EXIT_FAILURE);
        }
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
index df81009..2dfcef3 100644 (file)
@@ -43,7 +43,7 @@ static bool done = false, silent = false, fshared = false;
 static pthread_mutex_t thread_lock;
 static pthread_cond_t thread_parent, thread_worker;
 static struct stats waketime_stats, wakeup_stats;
-static unsigned int ncpus, threads_starting, nthreads = 0;
+static unsigned int threads_starting, nthreads = 0;
 static int futex_flag = 0;
 
 static const struct option options[] = {
@@ -136,12 +136,13 @@ int bench_futex_wake(int argc, const char **argv)
        if (!cpu)
                err(EXIT_FAILURE, "calloc");
 
+       memset(&act, 0, sizeof(act));
        sigfillset(&act.sa_mask);
        act.sa_sigaction = toggle_done;
        sigaction(SIGINT, &act, NULL);
 
        if (!nthreads)
-               nthreads = ncpus;
+               nthreads = cpu->nr;
 
        worker = calloc(nthreads, sizeof(*worker));
        if (!worker)
index f8b6ae5..c03c36f 100644 (file)
@@ -1312,7 +1312,8 @@ static int cycles_printf(struct hist_entry *he, struct hist_entry *pair,
        end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
                                he->ms.sym);
 
-       if ((start_line != SRCLINE_UNKNOWN) && (end_line != SRCLINE_UNKNOWN)) {
+       if ((strncmp(start_line, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0) &&
+           (strncmp(end_line, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0)) {
                scnprintf(buf, sizeof(buf), "[%s -> %s] %4ld",
                          start_line, end_line, block_he->diff.cycles);
        } else {
index f6dd1a6..d2539b7 100644 (file)
@@ -684,7 +684,9 @@ repeat:
        delay_msecs = top->delay_secs * MSEC_PER_SEC;
        set_term_quiet_input(&save);
        /* trash return*/
-       getc(stdin);
+       clearerr(stdin);
+       if (poll(&stdin_poll, 1, 0) > 0)
+               getc(stdin);
 
        while (!done) {
                perf_top__print_sym_table(top);
index 079c77b..27b4da8 100644 (file)
@@ -1082,10 +1082,9 @@ static int process_one_file(const char *fpath, const struct stat *sb,
  */
 int main(int argc, char *argv[])
 {
-       int rc;
+       int rc, ret = 0;
        int maxfds;
        char ldirname[PATH_MAX];
-
        const char *arch;
        const char *output_file;
        const char *start_dirname;
@@ -1156,7 +1155,8 @@ int main(int argc, char *argv[])
                /* Make build fail */
                fclose(eventsfp);
                free_arch_std_events();
-               return 1;
+               ret = 1;
+               goto out_free_mapfile;
        } else if (rc) {
                goto empty_map;
        }
@@ -1174,14 +1174,17 @@ int main(int argc, char *argv[])
                /* Make build fail */
                fclose(eventsfp);
                free_arch_std_events();
-               return 1;
+               ret = 1;
        }
 
-       return 0;
+
+       goto out_free_mapfile;
 
 empty_map:
        fclose(eventsfp);
        create_empty_mapping(output_file);
        free_arch_std_events();
-       return 0;
+out_free_mapfile:
+       free(mapfile);
+       return ret;
 }
index d0b9353..489b506 100644 (file)
@@ -19,7 +19,7 @@
 #include "../perf-sys.h"
 #include "cloexec.h"
 
-volatile long the_var;
+static volatile long the_var;
 
 static noinline int test_function(void)
 {
index c4b030b..fbbb6d6 100644 (file)
@@ -295,7 +295,8 @@ static int block_range_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
        end_line = map__srcline(he->ms.map, bi->sym->start + bi->end,
                                he->ms.sym);
 
-       if ((start_line != SRCLINE_UNKNOWN) && (end_line != SRCLINE_UNKNOWN)) {
+       if ((strncmp(start_line, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0) &&
+           (strncmp(end_line, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0)) {
                scnprintf(buf, sizeof(buf), "[%s -> %s]",
                          start_line, end_line);
        } else {
index 6242a92..4154f94 100644 (file)
@@ -343,11 +343,11 @@ static const char *normalize_arch(char *arch)
 
 const char *perf_env__arch(struct perf_env *env)
 {
-       struct utsname uts;
        char *arch_name;
 
        if (!env || !env->arch) { /* Assume local operation */
-               if (uname(&uts) < 0)
+               static struct utsname uts = { .machine[0] = '\0', };
+               if (uts.machine[0] == '\0' && uname(&uts) < 0)
                        return NULL;
                arch_name = uts.machine;
        } else
index a08ca27..9542851 100644 (file)
@@ -431,7 +431,7 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix,
 
        if (map && map->dso) {
                char *srcline = map__srcline(map, addr, NULL);
-               if (srcline != SRCLINE_UNKNOWN)
+               if (strncmp(srcline, SRCLINE_UNKNOWN, strlen(SRCLINE_UNKNOWN)) != 0)
                        ret = fprintf(fp, "%s%s", prefix, srcline);
                free_srcline(srcline);
        }
index c01ba6f..a149958 100644 (file)
@@ -257,21 +257,15 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
                                path = zalloc(sizeof(*path));
                                if (!path)
                                        return NULL;
-                               path->system = malloc(MAX_EVENT_LENGTH);
-                               if (!path->system) {
+                               if (asprintf(&path->system, "%.*s", MAX_EVENT_LENGTH, sys_dirent->d_name) < 0) {
                                        free(path);
                                        return NULL;
                                }
-                               path->name = malloc(MAX_EVENT_LENGTH);
-                               if (!path->name) {
+                               if (asprintf(&path->name, "%.*s", MAX_EVENT_LENGTH, evt_dirent->d_name) < 0) {
                                        zfree(&path->system);
                                        free(path);
                                        return NULL;
                                }
-                               strncpy(path->system, sys_dirent->d_name,
-                                       MAX_EVENT_LENGTH);
-                               strncpy(path->name, evt_dirent->d_name,
-                                       MAX_EVENT_LENGTH);
                                return path;
                        }
                }
index 1077013..26bc6a0 100644 (file)
@@ -1622,7 +1622,12 @@ int dso__load(struct dso *dso, struct map *map)
                goto out;
        }
 
-       if (dso->kernel) {
+       kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
+               dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
+               dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
+               dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
+
+       if (dso->kernel && !kmod) {
                if (dso->kernel == DSO_TYPE_KERNEL)
                        ret = dso__load_kernel_sym(dso, map);
                else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
@@ -1650,12 +1655,6 @@ int dso__load(struct dso *dso, struct map *map)
        if (!name)
                goto out;
 
-       kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
-               dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
-               dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
-               dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
-
-
        /*
         * Read the build id if possible. This is required for
         * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
index 33dc34d..20f4634 100644 (file)
@@ -82,7 +82,7 @@ static struct pci_access *pci_acc;
 static struct pci_dev *amd_fam14h_pci_dev;
 static int nbp1_entered;
 
-struct timespec start_time;
+static struct timespec start_time;
 static unsigned long long timediff;
 
 #ifdef DEBUG
index 3c4cee1..a65f7d0 100644 (file)
@@ -19,7 +19,7 @@ struct cpuidle_monitor cpuidle_sysfs_monitor;
 
 static unsigned long long **previous_count;
 static unsigned long long **current_count;
-struct timespec start_time;
+static struct timespec start_time;
 static unsigned long long timediff;
 
 static int cpuidle_get_count_percent(unsigned int id, double *percent,
index 6d44fec..7c77045 100644 (file)
@@ -27,6 +27,8 @@ struct cpuidle_monitor *all_monitors[] = {
 0
 };
 
+int cpu_count;
+
 static struct cpuidle_monitor *monitors[MONITORS_MAX];
 static unsigned int avail_monitors;
 
index 5b5eb1d..c559d31 100644 (file)
@@ -25,7 +25,7 @@
 #endif
 #define CSTATE_DESC_LEN 60
 
-int cpu_count;
+extern int cpu_count;
 
 /* Hard to define the right names ...: */
 enum power_range_e {
index 256199c..3c47865 100755 (executable)
@@ -235,7 +235,6 @@ def plot_duration_cpu():
     output_png = 'all_cpu_durations.png'
     g_plot = common_all_gnuplot_settings(output_png)
 #   autoscale this one, no set y range
-    g_plot('set ytics 0, 500')
     g_plot('set ylabel "Timer Duration (MilliSeconds)"')
     g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now()))
 
index 13f1e8b..2b65512 100644 (file)
@@ -16,7 +16,7 @@ override CFLAGS +=    -D_FORTIFY_SOURCE=2
 
 %: %.c
        @mkdir -p $(BUILD_OUTPUT)
-       $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS)
+       $(CC) $(CFLAGS) $< -o $(BUILD_OUTPUT)/$@ $(LDFLAGS) -lcap
 
 .PHONY : clean
 clean :
index 31c1ca0..33b3708 100644 (file)
@@ -30,7 +30,7 @@
 #include <sched.h>
 #include <time.h>
 #include <cpuid.h>
-#include <linux/capability.h>
+#include <sys/capability.h>
 #include <errno.h>
 #include <math.h>
 
@@ -304,6 +304,10 @@ int *irqs_per_cpu;         /* indexed by cpu_num */
 
 void setup_all_buffers(void);
 
+char *sys_lpi_file;
+char *sys_lpi_file_sysfs = "/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us";
+char *sys_lpi_file_debugfs = "/sys/kernel/debug/pmc_core/slp_s0_residency_usec";
+
 int cpu_is_not_present(int cpu)
 {
        return !CPU_ISSET_S(cpu, cpu_present_setsize, cpu_present_set);
@@ -2916,8 +2920,6 @@ int snapshot_gfx_mhz(void)
  *
  * record snapshot of
  * /sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us
- *
- * return 1 if config change requires a restart, else return 0
  */
 int snapshot_cpu_lpi_us(void)
 {
@@ -2941,17 +2943,14 @@ int snapshot_cpu_lpi_us(void)
 /*
  * snapshot_sys_lpi()
  *
- * record snapshot of
- * /sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us
- *
- * return 1 if config change requires a restart, else return 0
+ * record snapshot of sys_lpi_file
  */
 int snapshot_sys_lpi_us(void)
 {
        FILE *fp;
        int retval;
 
-       fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r");
+       fp = fopen_or_die(sys_lpi_file, "r");
 
        retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us);
        if (retval != 1) {
@@ -3151,28 +3150,42 @@ void check_dev_msr()
                        err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
 }
 
-void check_permissions()
+/*
+ * check for CAP_SYS_RAWIO
+ * return 0 on success
+ * return 1 on fail
+ */
+int check_for_cap_sys_rawio(void)
 {
-       struct __user_cap_header_struct cap_header_data;
-       cap_user_header_t cap_header = &cap_header_data;
-       struct __user_cap_data_struct cap_data_data;
-       cap_user_data_t cap_data = &cap_data_data;
-       extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
-       int do_exit = 0;
-       char pathname[32];
+       cap_t caps;
+       cap_flag_value_t cap_flag_value;
 
-       /* check for CAP_SYS_RAWIO */
-       cap_header->pid = getpid();
-       cap_header->version = _LINUX_CAPABILITY_VERSION;
-       if (capget(cap_header, cap_data) < 0)
-               err(-6, "capget(2) failed");
+       caps = cap_get_proc();
+       if (caps == NULL)
+               err(-6, "cap_get_proc\n");
 
-       if ((cap_data->effective & (1 << CAP_SYS_RAWIO)) == 0) {
-               do_exit++;
+       if (cap_get_flag(caps, CAP_SYS_RAWIO, CAP_EFFECTIVE, &cap_flag_value))
+               err(-6, "cap_get\n");
+
+       if (cap_flag_value != CAP_SET) {
                warnx("capget(CAP_SYS_RAWIO) failed,"
                        " try \"# setcap cap_sys_rawio=ep %s\"", progname);
+               return 1;
        }
 
+       if (cap_free(caps) == -1)
+               err(-6, "cap_free\n");
+
+       return 0;
+}
+void check_permissions(void)
+{
+       int do_exit = 0;
+       char pathname[32];
+
+       /* check for CAP_SYS_RAWIO */
+       do_exit += check_for_cap_sys_rawio();
+
        /* test file permissions */
        sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
        if (euidaccess(pathname, R_OK)) {
@@ -3265,6 +3278,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
        case INTEL_FAM6_ATOM_GOLDMONT:  /* BXT */
        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
        case INTEL_FAM6_ATOM_GOLDMONT_D:        /* DNV */
+       case INTEL_FAM6_ATOM_TREMONT:   /* EHL */
                pkg_cstate_limits = glm_pkg_cstate_limits;
                break;
        default:
@@ -3336,6 +3350,17 @@ int is_skx(unsigned int family, unsigned int model)
        }
        return 0;
 }
+int is_ehl(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+
+       switch (model) {
+       case INTEL_FAM6_ATOM_TREMONT:
+               return 1;
+       }
+       return 0;
+}
 
 int has_turbo_ratio_limit(unsigned int family, unsigned int model)
 {
@@ -3478,6 +3503,23 @@ dump_cstate_pstate_config_info(unsigned int family, unsigned int model)
        dump_nhm_cst_cfg();
 }
 
+static void dump_sysfs_file(char *path)
+{
+       FILE *input;
+       char cpuidle_buf[64];
+
+       input = fopen(path, "r");
+       if (input == NULL) {
+               if (debug)
+                       fprintf(outf, "NSFOD %s\n", path);
+               return;
+       }
+       if (!fgets(cpuidle_buf, sizeof(cpuidle_buf), input))
+               err(1, "%s: failed to read file", path);
+       fclose(input);
+
+       fprintf(outf, "%s: %s", strrchr(path, '/') + 1, cpuidle_buf);
+}
 static void
 dump_sysfs_cstate_config(void)
 {
@@ -3491,6 +3533,15 @@ dump_sysfs_cstate_config(void)
        if (!DO_BIC(BIC_sysfs))
                return;
 
+       if (access("/sys/devices/system/cpu/cpuidle", R_OK)) {
+               fprintf(outf, "cpuidle not loaded\n");
+               return;
+       }
+
+       dump_sysfs_file("/sys/devices/system/cpu/cpuidle/current_driver");
+       dump_sysfs_file("/sys/devices/system/cpu/cpuidle/current_governor");
+       dump_sysfs_file("/sys/devices/system/cpu/cpuidle/current_governor_ro");
+
        for (state = 0; state < 10; ++state) {
 
                sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/name",
@@ -3894,6 +3945,20 @@ void rapl_probe_intel(unsigned int family, unsigned int model)
                else
                        BIC_PRESENT(BIC_PkgWatt);
                break;
+       case INTEL_FAM6_ATOM_TREMONT:   /* EHL */
+               do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
+               if (rapl_joules) {
+                       BIC_PRESENT(BIC_Pkg_J);
+                       BIC_PRESENT(BIC_Cor_J);
+                       BIC_PRESENT(BIC_RAM_J);
+                       BIC_PRESENT(BIC_GFX_J);
+               } else {
+                       BIC_PRESENT(BIC_PkgWatt);
+                       BIC_PRESENT(BIC_CorWatt);
+                       BIC_PRESENT(BIC_RAMWatt);
+                       BIC_PRESENT(BIC_GFXWatt);
+               }
+               break;
        case INTEL_FAM6_SKYLAKE_L:      /* SKL */
        case INTEL_FAM6_CANNONLAKE_L:   /* CNL */
                do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_GFX | RAPL_PKG_POWER_INFO;
@@ -4295,6 +4360,7 @@ int has_snb_msrs(unsigned int family, unsigned int model)
        case INTEL_FAM6_ATOM_GOLDMONT:          /* BXT */
        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
        case INTEL_FAM6_ATOM_GOLDMONT_D:        /* DNV */
+       case INTEL_FAM6_ATOM_TREMONT:           /* EHL */
                return 1;
        }
        return 0;
@@ -4324,6 +4390,7 @@ int has_c8910_msrs(unsigned int family, unsigned int model)
        case INTEL_FAM6_CANNONLAKE_L:   /* CNL */
        case INTEL_FAM6_ATOM_GOLDMONT:  /* BXT */
        case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+       case INTEL_FAM6_ATOM_TREMONT:   /* EHL */
                return 1;
        }
        return 0;
@@ -4610,14 +4677,24 @@ unsigned int intel_model_duplicates(unsigned int model)
        case INTEL_FAM6_SKYLAKE:
        case INTEL_FAM6_KABYLAKE_L:
        case INTEL_FAM6_KABYLAKE:
+       case INTEL_FAM6_COMETLAKE_L:
+       case INTEL_FAM6_COMETLAKE:
                return INTEL_FAM6_SKYLAKE_L;
 
        case INTEL_FAM6_ICELAKE_L:
        case INTEL_FAM6_ICELAKE_NNPI:
+       case INTEL_FAM6_TIGERLAKE_L:
+       case INTEL_FAM6_TIGERLAKE:
                return INTEL_FAM6_CANNONLAKE_L;
 
        case INTEL_FAM6_ATOM_TREMONT_D:
                return INTEL_FAM6_ATOM_GOLDMONT_D;
+
+       case INTEL_FAM6_ATOM_TREMONT_L:
+               return INTEL_FAM6_ATOM_TREMONT;
+
+       case INTEL_FAM6_ICELAKE_X:
+               return INTEL_FAM6_SKYLAKE_X;
        }
        return model;
 }
@@ -4872,7 +4949,8 @@ void process_cpuid()
        do_slm_cstates = is_slm(family, model);
        do_knl_cstates  = is_knl(family, model);
 
-       if (do_slm_cstates || do_knl_cstates || is_cnl(family, model))
+       if (do_slm_cstates || do_knl_cstates || is_cnl(family, model) ||
+           is_ehl(family, model))
                BIC_NOT_PRESENT(BIC_CPU_c3);
 
        if (!quiet)
@@ -4907,10 +4985,16 @@ void process_cpuid()
        else
                BIC_NOT_PRESENT(BIC_CPU_LPI);
 
-       if (!access("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", R_OK))
+       if (!access(sys_lpi_file_sysfs, R_OK)) {
+               sys_lpi_file = sys_lpi_file_sysfs;
                BIC_PRESENT(BIC_SYS_LPI);
-       else
+       } else if (!access(sys_lpi_file_debugfs, R_OK)) {
+               sys_lpi_file = sys_lpi_file_debugfs;
+               BIC_PRESENT(BIC_SYS_LPI);
+       } else {
+               sys_lpi_file_sysfs = NULL;
                BIC_NOT_PRESENT(BIC_SYS_LPI);
+       }
 
        if (!quiet)
                decode_misc_feature_control();
@@ -5306,7 +5390,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(outf, "turbostat version 19.08.31"
+       fprintf(outf, "turbostat version 20.03.20"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
@@ -5323,9 +5407,9 @@ int add_counter(unsigned int msr_num, char *path, char *name,
        }
 
        msrp->msr_num = msr_num;
-       strncpy(msrp->name, name, NAME_BYTES);
+       strncpy(msrp->name, name, NAME_BYTES - 1);
        if (path)
-               strncpy(msrp->path, path, PATH_BYTES);
+               strncpy(msrp->path, path, PATH_BYTES - 1);
        msrp->width = width;
        msrp->type = type;
        msrp->format = format;
index 220d04f..7570e36 100755 (executable)
@@ -30,7 +30,7 @@ my %default = (
     "EMAIL_WHEN_STARTED"       => 0,
     "NUM_TESTS"                        => 1,
     "TEST_TYPE"                        => "build",
-    "BUILD_TYPE"               => "randconfig",
+    "BUILD_TYPE"               => "oldconfig",
     "MAKE_CMD"                 => "make",
     "CLOSE_CONSOLE_SIGNAL"     => "INT",
     "TIMEOUT"                  => 120,
@@ -1030,7 +1030,7 @@ sub __read_config {
            }
 
            if (!$skip && $rest !~ /^\s*$/) {
-               die "$name: $.: Gargbage found after $type\n$_";
+               die "$name: $.: Garbage found after $type\n$_";
            }
 
            if ($skip && $type eq "TEST_START") {
@@ -1063,7 +1063,7 @@ sub __read_config {
            }
 
            if ($rest !~ /^\s*$/) {
-               die "$name: $.: Gargbage found after DEFAULTS\n$_";
+               die "$name: $.: Garbage found after DEFAULTS\n$_";
            }
 
        } elsif (/^\s*INCLUDE\s+(\S+)/) {
@@ -1154,7 +1154,7 @@ sub __read_config {
            # on of these sections that have SKIP defined.
            # The save variable can be
            # defined multiple times and the new one simply overrides
-           # the prevous one.
+           # the previous one.
            set_variable($lvalue, $rvalue);
 
        } else {
@@ -1234,7 +1234,7 @@ sub read_config {
        foreach my $option (keys %not_used) {
            print "$option\n";
        }
-       print "Set IGRNORE_UNUSED = 1 to have ktest ignore unused variables\n";
+       print "Set IGNORE_UNUSED = 1 to have ktest ignore unused variables\n";
        if (!read_yn "Do you want to continue?") {
            exit -1;
        }
@@ -1345,7 +1345,7 @@ sub eval_option {
        # Check for recursive evaluations.
        # 100 deep should be more than enough.
        if ($r++ > 100) {
-           die "Over 100 evaluations accurred with $option\n" .
+           die "Over 100 evaluations occurred with $option\n" .
                "Check for recursive variables\n";
        }
        $prev = $option;
@@ -1383,7 +1383,7 @@ sub reboot {
 
     } else {
        # Make sure everything has been written to disk
-       run_ssh("sync");
+       run_ssh("sync", 10);
 
        if (defined($time)) {
            start_monitor;
@@ -1461,7 +1461,7 @@ sub get_test_name() {
 
 sub dodie {
 
-    # avoid recusion
+    # avoid recursion
     return if ($in_die);
     $in_die = 1;
 
index c3bc933..27666b8 100644 (file)
@@ -10,7 +10,7 @@
 #
 
 # Options set in the beginning of the file are considered to be
-# default options. These options can be overriden by test specific
+# default options. These options can be overridden by test specific
 # options, with the following exceptions:
 #
 #  LOG_FILE
 #
 # This config file can also contain "config variables".
 # These are assigned with ":=" instead of the ktest option
-# assigment "=".
+# assignment "=".
 #
 # The difference between ktest options and config variables
 # is that config variables can be used multiple times,
 #### Using options in other options ####
 #
 # Options that are defined in the config file may also be used
-# by other options. All options are evaulated at time of
+# by other options. All options are evaluated at time of
 # use (except that config variables are evaluated at config
 # processing time).
 #
 #TEST = ssh user@machine /root/run_test
 
 # The build type is any make config type or special command
-#  (default randconfig)
+#  (default oldconfig)
 #   nobuild - skip the clean and build step
 #   useconfig:/path/to/config - use the given config and run
 #              oldconfig on it.
 
 # Line to define a successful boot up in console output.
 # This is what the line contains, not the entire line. If you need
-# the entire line to match, then use regural expression syntax like:
+# the entire line to match, then use regular expression syntax like:
 #  (do not add any quotes around it)
 #
 #  SUCCESS_LINE = ^MyBox Login:$
 # (ignored if POWEROFF_ON_SUCCESS is set)
 #REBOOT_ON_SUCCESS = 1
 
-# In case there are isses with rebooting, you can specify this
+# In case there are issues with rebooting, you can specify this
 # to always powercycle after this amount of time after calling
 # reboot.
 # Note, POWERCYCLE_AFTER_REBOOT = 0 does NOT disable it. It just
 # (default undefined)
 #POWERCYCLE_AFTER_REBOOT = 5
 
-# In case there's isses with halting, you can specify this
+# In case there's issues with halting, you can specify this
 # to always poweroff after this amount of time after calling
 # halt.
 # Note, POWEROFF_AFTER_HALT = 0 does NOT disable it. It just
 #
 #  PATCHCHECK_START is required and is the first patch to
 #   test (the SHA1 of the commit). You may also specify anything
-#   that git checkout allows (branch name, tage, HEAD~3).
+#   that git checkout allows (branch name, tag, HEAD~3).
 #
 #  PATCHCHECK_END is the last patch to check (default HEAD)
 #
 #     IGNORE_WARNINGS is set for the given commit's sha1
 #
 #   IGNORE_WARNINGS can be used to disable the failure of patchcheck
-#     on a particuler commit (SHA1). You can add more than one commit
+#     on a particular commit (SHA1). You can add more than one commit
 #     by adding a list of SHA1s that are space delimited.
 #
 #   If BUILD_NOCLEAN is set, then make mrproper will not be run on
 #   whatever reason. (Can't reboot, want to inspect each iteration)
 #   Doing a BISECT_MANUAL will have the test wait for you to
 #   tell it if the test passed or failed after each iteration.
-#   This is basicall the same as running git bisect yourself
+#   This is basically the same as running git bisect yourself
 #   but ktest will rebuild and install the kernel for you.
 #
 # BISECT_CHECK = 1 (optional, default 0)
 #
 # CONFIG_BISECT_EXEC (optional)
 #  The config bisect is a separate program that comes with ktest.pl.
-#  By befault, it will look for:
+#  By default, it will look for:
 #    `pwd`/config-bisect.pl # the location ktest.pl was executed from.
 #  If it does not find it there, it will look for:
 #    `dirname <ktest.pl>`/config-bisect.pl # The directory that holds ktest.pl
index 60273f1..b761670 100755 (executable)
@@ -1041,6 +1041,27 @@ ipv6_addr_metric_test()
        fi
        log_test $rc 0 "Prefix route with metric on link up"
 
+       # verify peer metric added correctly
+       set -e
+       run_cmd "$IP -6 addr flush dev dummy2"
+       run_cmd "$IP -6 addr add dev dummy2 2001:db8:104::1 peer 2001:db8:104::2 metric 260"
+       set +e
+
+       check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260"
+       log_test $? 0 "Set metric with peer route on local side"
+       log_test $? 0 "User specified metric on local address"
+       check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260"
+       log_test $? 0 "Set metric with peer route on peer side"
+
+       set -e
+       run_cmd "$IP -6 addr change dev dummy2 2001:db8:104::1 peer 2001:db8:104::3 metric 261"
+       set +e
+
+       check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 261"
+       log_test $? 0 "Modify metric and peer address on local side"
+       check_route6 "2001:db8:104::3 dev dummy2 proto kernel metric 261"
+       log_test $? 0 "Modify metric and peer address on peer side"
+
        $IP li del dummy1
        $IP li del dummy2
        cleanup
@@ -1457,13 +1478,20 @@ ipv4_addr_metric_test()
 
        run_cmd "$IP addr flush dev dummy2"
        run_cmd "$IP addr add dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 260"
-       run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 261"
        rc=$?
        if [ $rc -eq 0 ]; then
-               check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
+               check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 260"
+               rc=$?
+       fi
+       log_test $rc 0 "Set metric of address with peer route"
+
+       run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.3 metric 261"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.104.3 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
                rc=$?
        fi
-       log_test $rc 0 "Modify metric of address with peer route"
+       log_test $rc 0 "Modify metric and peer address for peer route"
 
        $IP li del dummy1
        $IP li del dummy2
index 477bc61..c03af46 100644 (file)
@@ -57,3 +57,4 @@ CONFIG_NET_IFE_SKBMARK=m
 CONFIG_NET_IFE_SKBPRIO=m
 CONFIG_NET_IFE_SKBTCINDEX=m
 CONFIG_NET_SCH_FIFO=y
+CONFIG_NET_SCH_ETS=m
index bdf5bbd..96afb03 100644 (file)
@@ -124,17 +124,6 @@ choice
 
          If in doubt, select 'None'
 
-config INITRAMFS_COMPRESSION_NONE
-       bool "None"
-       help
-         Do not compress the built-in initramfs at all. This may sound wasteful
-         in space, but, you should be aware that the built-in initramfs will be
-         compressed at a later stage anyways along with the rest of the kernel,
-         on those architectures that support this. However, not compressing the
-         initramfs may lead to slightly higher memory consumption during a
-         short time at boot, while both the cpio image and the unpacked
-         filesystem image will be present in memory simultaneously
-
 config INITRAMFS_COMPRESSION_GZIP
        bool "Gzip"
        depends on RD_GZIP
@@ -207,4 +196,15 @@ config INITRAMFS_COMPRESSION_LZ4
          If you choose this, keep in mind that most distros don't provide lz4
          by default which could cause a build failure.
 
+config INITRAMFS_COMPRESSION_NONE
+       bool "None"
+       help
+         Do not compress the built-in initramfs at all. This may sound wasteful
+         in space, but, you should be aware that the built-in initramfs will be
+         compressed at a later stage anyways along with the rest of the kernel,
+         on those architectures that support this. However, not compressing the
+         initramfs may lead to slightly higher memory consumption during a
+         short time at boot, while both the cpio image and the unpacked
+         filesystem image will be present in memory simultaneously
+
 endchoice