Merge tag 'usb-5.12-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 3 Apr 2021 16:56:22 +0000 (09:56 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 3 Apr 2021 16:56:22 +0000 (09:56 -0700)
Pull USB fixes from Greg KH:
 "Here are a few small USB driver fixes for 5.12-rc6 to resolve reported
  problems.

  They include:

   - a number of cdc-acm fixes for reported problems. It seems more
     people are using this driver lately...

   - dwc3 driver fixes for reported problems, and fixes for the fixes :)

   - dwc2 driver fixes for reported issues.

   - musb driver fix.

   - new USB quirk additions.

  All of these have been in linux-next for a while with no reported
  issues"

* tag 'usb-5.12-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb: (23 commits)
  usb: dwc2: Prevent core suspend when port connection flag is 0
  usb: dwc2: Fix HPRT0.PrtSusp bit setting for HiKey 960 board.
  usb: musb: Fix suspend with devices connected for a64
  usb: xhci-mtk: fix broken streams issue on 0.96 xHCI
  usb: dwc3: gadget: Clear DEP flags after stop transfers in ep disable
  usbip: vhci_hcd fix shift out-of-bounds in vhci_hub_control()
  USB: quirks: ignore remote wake-up on Fibocom L850-GL LTE modem
  USB: cdc-acm: do not log successful probe on later errors
  USB: cdc-acm: always claim data interface
  USB: cdc-acm: use negation for NULL checks
  USB: cdc-acm: clean up probe error labels
  USB: cdc-acm: drop redundant driver-data reset
  USB: cdc-acm: drop redundant driver-data assignment
  USB: cdc-acm: fix use-after-free after probe failure
  USB: cdc-acm: fix double free on probe failure
  USB: cdc-acm: downgrade message to debug
  USB: cdc-acm: untangle a circular dependency between callback and softint
  cdc-acm: fix BREAK rx code path adding necessary calls
  usb: gadget: udc: amd5536udc_pci fix null-ptr-dereference
  usb: dwc3: pci: Enable dis_uX_susphy_quirk for Intel Merrifield
  ...

458 files changed:
.mailmap
Documentation/arm64/acpi_object_usage.rst
Documentation/arm64/silicon-errata.rst
Documentation/networking/device_drivers/ethernet/amazon/ena.rst
Documentation/networking/devlink/devlink-dpipe.rst
Documentation/networking/devlink/devlink-port.rst
Documentation/networking/xfrm_device.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/at91-sam9x60ek.dts
arch/arm/boot/dts/at91-sama5d27_som1.dtsi
arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
arch/arm/boot/dts/imx6ull-myir-mys-6ulx-eval.dts
arch/arm/boot/dts/sam9x60.dtsi
arch/arm/mach-imx/avic.c
arch/arm/mach-imx/common.h
arch/arm/mach-imx/mach-imx1.c
arch/arm/mach-imx/mach-imx25.c
arch/arm/mach-imx/mach-imx27.c
arch/arm/mach-imx/mach-imx31.c
arch/arm/mach-imx/mach-imx35.c
arch/arm/mach-imx/mm-imx3.c
arch/arm/mach-omap2/sr_device.c
arch/arm64/Kconfig
arch/arm64/boot/dts/freescale/fsl-ls1012a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1043a.dtsi
arch/arm64/boot/dts/freescale/fsl-ls1046a.dtsi
arch/arm64/boot/dts/freescale/imx8mp-phyboard-pollux-rdk.dts
arch/arm64/boot/dts/freescale/imx8mp-phycore-som.dtsi
arch/arm64/include/asm/checksum.h
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/thread_info.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/cpuinfo.c
arch/arm64/kernel/crash_dump.c
arch/arm64/kernel/process.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kvm/debug.c
arch/arm64/kvm/hyp/vgic-v3-sr.c
arch/arm64/mm/mmu.c
arch/ia64/kernel/err_inject.c
arch/ia64/kernel/mca.c
arch/mips/kernel/setup.c
arch/s390/include/asm/vdso/data.h
arch/s390/kernel/time.c
arch/x86/Makefile
arch/x86/include/asm/smp.h
arch/x86/include/asm/xen/page.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/setup.c
arch/x86/kernel/smpboot.c
arch/x86/kvm/Makefile
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/mem_encrypt.c
arch/x86/net/bpf_jit_comp.c
arch/x86/xen/p2m.c
arch/x86/xen/setup.c
arch/xtensa/kernel/coprocessor.S
arch/xtensa/mm/fault.c
block/bio.c
block/blk-merge.c
block/blk-mq-debugfs.c
block/partitions/core.c
drivers/acpi/acpica/nsaccess.c
drivers/acpi/internal.h
drivers/acpi/processor_idle.c
drivers/acpi/scan.c
drivers/acpi/tables.c
drivers/acpi/video_detect.c
drivers/auxdisplay/charlcd.c
drivers/base/power/runtime.c
drivers/block/null_blk/main.c
drivers/block/null_blk/null_blk.h
drivers/block/xen-blkback/blkback.c
drivers/bus/omap_l3_noc.c
drivers/bus/ti-sysc.c
drivers/clk/qcom/clk-rcg2.c
drivers/clk/qcom/clk-rpmh.c
drivers/clk/qcom/gcc-sc7180.c
drivers/cpufreq/freq_table.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/i915/display/intel_atomic_plane.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_aux.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/display/intel_dp_link_training.h
drivers/gpu/drm/i915/display/intel_vdsc.c
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_runtime_pm.h
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/imx-ldb.c
drivers/gpu/drm/msm/adreno/a5xx_power.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
drivers/gpu/drm/msm/dp/dp_aux.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_fence.c
drivers/gpu/drm/msm/msm_kms.h
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/rcar-du/rcar_du_encoder.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/sor.c
drivers/gpu/host1x/bus.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/qp.c
drivers/isdn/capi/kcapi.c
drivers/isdn/hardware/mISDN/mISDNipac.c
drivers/md/dm-ioctl.c
drivers/md/dm-table.c
drivers/md/dm-verity-target.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c
drivers/mfd/intel_quark_i2c_gpio.c
drivers/net/arcnet/com20020-pci.c
drivers/net/bonding/bond_main.c
drivers/net/can/c_can/c_can.c
drivers/net/can/c_can/c_can_pci.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/dev/netlink.c
drivers/net/can/flexcan.c
drivers/net/can/kvaser_pciefd.c
drivers/net/can/m_can/m_can.c
drivers/net/can/usb/Kconfig
drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/mt7530.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/hw.h
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igb/e1000_hw.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/igc/igc.h
drivers/net/ethernet/intel/igc/igc_ethtool.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
drivers/net/ethernet/marvell/octeontx2/af/rvu.c
drivers/net/ethernet/marvell/octeontx2/af/rvu.h
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
drivers/net/ethernet/mellanox/mlx5/core/sf/mlx5_ifc_vhca_event.h
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.h
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
drivers/net/ethernet/netronome/nfp/flower/metadata.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ipa/ipa_cmd.c
drivers/net/ipa/ipa_qmi.c
drivers/net/phy/broadcom.c
drivers/net/phy/phylink.c
drivers/net/usb/cdc-phonet.c
drivers/net/usb/r8152.c
drivers/net/veth.c
drivers/net/wan/hdlc_x25.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/pinctrl-microchip-sgpio.c
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
drivers/pinctrl/qcom/pinctrl-sc7280.c
drivers/pinctrl/qcom/pinctrl-sdx55.c
drivers/platform/x86/Kconfig
drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c
drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c
drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c
drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c
drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
drivers/platform/x86/intel-hid.c
drivers/platform/x86/intel-vbtn.c
drivers/platform/x86/intel_pmc_core.c
drivers/platform/x86/intel_pmt_class.c
drivers/platform/x86/intel_pmt_crashlog.c
drivers/platform/x86/thinkpad_acpi.c
drivers/ptp/ptp_qoriq.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/qedi/qedi_main.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/scsi_transport_iscsi.c
drivers/soc/ti/omap_prm.c
drivers/target/target_core_pscsi.c
drivers/vfio/pci/Kconfig
drivers/vfio/vfio_iommu_type1.c
drivers/xen/Kconfig
fs/afs/write.c
fs/block_dev.c
fs/btrfs/Makefile
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/inode.c
fs/btrfs/qgroup.c
fs/btrfs/volumes.c
fs/cachefiles/bind.c
fs/cachefiles/rdwr.c
fs/cifs/cifsacl.c
fs/cifs/cifsglob.h
fs/cifs/cifspdu.h
fs/cifs/file.c
fs/cifs/smb2glob.h
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2transport.c
fs/io-wq.c
fs/io_uring.c
fs/reiserfs/xattr.h
fs/squashfs/export.c
fs/squashfs/id.c
fs/squashfs/squashfs_fs.h
fs/squashfs/xattr_id.c
include/acpi/acpi_bus.h
include/linux/acpi.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/device-mapper.h
include/linux/host1x.h
include/linux/hugetlb_cgroup.h
include/linux/if_macvlan.h
include/linux/memblock.h
include/linux/mlx5/qp.h
include/linux/mm.h
include/linux/mmu_notifier.h
include/linux/mutex.h
include/linux/netdevice.h
include/linux/netfilter/x_tables.h
include/linux/pagemap.h
include/linux/skbuff.h
include/linux/usermode_driver.h
include/linux/xarray.h
include/net/dst.h
include/net/inet_connection_sock.h
include/net/netfilter/nf_tables.h
include/net/nexthop.h
include/net/red.h
include/net/rtnetlink.h
include/net/sock.h
include/scsi/scsi_transport_iscsi.h
include/uapi/linux/blkpg.h
include/uapi/linux/bpf.h
include/uapi/linux/psample.h
kernel/bpf/bpf_inode_storage.c
kernel/bpf/bpf_struct_ops.c
kernel/bpf/core.c
kernel/bpf/preload/bpf_preload_kern.c
kernel/bpf/syscall.c
kernel/bpf/trampoline.c
kernel/bpf/verifier.c
kernel/fork.c
kernel/freezer.c
kernel/gcov/clang.c
kernel/power/energy_model.c
kernel/ptrace.c
kernel/signal.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/usermode_driver.c
lib/math/div64.c
lib/test_xarray.c
lib/xarray.c
mm/highmem.c
mm/hugetlb.c
mm/hugetlb_cgroup.c
mm/kfence/core.c
mm/kmemleak.c
mm/memory.c
mm/mmu_notifier.c
mm/page-writeback.c
mm/z3fold.c
net/bridge/br_switchdev.c
net/can/isotp.c
net/core/dev.c
net/core/drop_monitor.c
net/core/dst.c
net/core/filter.c
net/core/flow_dissector.c
net/core/sock.c
net/dccp/ipv6.c
net/dsa/dsa2.c
net/ipv4/inet_connection_sock.c
net/ipv4/ipconfig.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/route.c
net/ipv4/tcp_minisocks.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_input.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/mac80211/aead_api.c
net/mac80211/aes_gmac.c
net/mac80211/cfg.c
net/mac80211/ibss.c
net/mac80211/main.c
net/mac80211/mlme.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/util.c
net/mptcp/options.c
net/mptcp/protocol.c
net/mptcp/subflow.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto_gre.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_tables_api.c
net/netfilter/x_tables.c
net/openvswitch/conntrack.c
net/openvswitch/conntrack.h
net/openvswitch/flow.c
net/qrtr/qrtr.c
net/sched/act_ct.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/sch_choke.c
net/sched/sch_gred.c
net/sched/sch_htb.c
net/sched/sch_red.c
net/sched/sch_sfq.c
net/sctp/output.c
net/sctp/outqueue.c
net/tipc/node.c
net/vmw_vsock/af_vsock.c
net/wireless/nl80211.c
scripts/module.lds.S
security/integrity/iint.c
security/selinux/include/security.h
security/selinux/selinuxfs.c
security/selinux/ss/services.c
security/tomoyo/network.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/usb/quirks.c
tools/include/uapi/linux/kvm.h
tools/kvm/kvm_stat/kvm_stat.service
tools/lib/bpf/Makefile
tools/lib/bpf/btf_dump.c
tools/lib/bpf/libbpf.c
tools/lib/bpf/netlink.c
tools/perf/builtin-daemon.c
tools/perf/tests/bpf.c
tools/perf/tests/shell/daemon.sh
tools/perf/util/auxtrace.c
tools/perf/util/bpf-event.c
tools/perf/util/parse-events.c
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/perf/util/synthetic-events.c
tools/perf/util/vdso.c
tools/testing/kunit/configs/broken_on_uml.config
tools/testing/kunit/kunit_config.py
tools/testing/radix-tree/idr-test.c
tools/testing/radix-tree/linux/compiler_types.h [deleted file]
tools/testing/radix-tree/multiorder.c
tools/testing/radix-tree/xarray.c
tools/testing/selftests/arm64/fp/sve-test.S
tools/testing/selftests/bpf/prog_tests/check_mtu.c
tools/testing/selftests/bpf/prog_tests/fexit_sleep.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c
tools/testing/selftests/bpf/progs/fexit_sleep.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_check_mtu.c
tools/testing/selftests/bpf/progs/test_tunnel_kern.c
tools/testing/selftests/bpf/verifier/bounds_deduction.c
tools/testing/selftests/bpf/verifier/map_ptr.c
tools/testing/selftests/bpf/verifier/unpriv.c
tools/testing/selftests/bpf/verifier/value_ptr_arith.c
tools/testing/selftests/kvm/hardware_disable_test.c
tools/testing/selftests/kvm/x86_64/hyperv_clock.c
tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
tools/testing/selftests/net/mptcp/mptcp_join.sh
tools/testing/selftests/net/reuseaddr_ports_exhausted.c
tools/testing/selftests/vm/Makefile

index 85b93cdefc87bb0d98fca268fbac125dca87360f..541635d2e02ecd1fbfb376224eb2cf535b16a35d 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -36,6 +36,7 @@ Andrew Morton <akpm@linux-foundation.org>
 Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
 Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
+Andrey Konovalov <andreyknvl@gmail.com> <andreyknvl@google.com>
 Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
 Andy Adamson <andros@citi.umich.edu>
@@ -65,6 +66,8 @@ Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
 Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
 Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
 Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
+Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com>
+Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
 Christophe Ricard <christophe.ricard@gmail.com>
 Christoph Hellwig <hch@lst.de>
 Corey Minyard <minyard@acm.org>
index 377e9d224db0f94a68c2b502d8d3f9c52124dc49..0609da73970bb45b2b98a7da2dfef0e3773f7b39 100644 (file)
@@ -17,12 +17,12 @@ For ACPI on arm64, tables also fall into the following categories:
 
        -  Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT
 
-       -  Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IORT,
-          MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, STAO,
-         TCPA, TPM2, UEFI, XENV
+       -  Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IBFT,
+          IORT, MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT,
+          STAO, TCPA, TPM2, UEFI, XENV
 
-       -  Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IBFT, IVRS, LPIT,
-          MSDM, OEMx, PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
+       -  Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT, MSDM, OEMx,
+          PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
 
 ====== ========================================================================
 Table  Usage for ARMv8 Linux
index 71951024729294a70f55d1fa5153390c970c7b64..d410a47ffa57ae9c6dc77f3cc40f4d888a718722 100644 (file)
@@ -130,6 +130,9 @@ stable kernels.
 | Marvell        | ARM-MMU-500     | #582743         | N/A                         |
 +----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
+| NVIDIA         | Carmel Core     | N/A             | NVIDIA_CARMEL_CNP_ERRATUM   |
++----------------+-----------------+-----------------+-----------------------------+
++----------------+-----------------+-----------------+-----------------------------+
 | Freescale/NXP  | LS2080A/LS1043A | A-008585        | FSL_ERRATUM_A008585         |
 +----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
index 3561a8a29fd23cbad54a7f4ffa1c21555ebdd6e1..f8c6469f2bd29597e6132eab8eb40eab1c051aed 100644 (file)
@@ -267,7 +267,7 @@ DATA PATH
 Tx
 --
 
-end_start_xmit() is called by the stack. This function does the following:
+ena_start_xmit() is called by the stack. This function does the following:
 
 - Maps data buffers (skb->data and frags).
 - Populates ena_buf for the push buffer (if the driver and device are
index 468fe1001b7437e7d4279d929bd31235072af5cf..af37f250df438cfeaacbb2934361d1df08d65bad 100644 (file)
@@ -52,7 +52,7 @@ purposes as a standard complementary tool. The system's view from
 ``devlink-dpipe`` should change according to the changes done by the
 standard configuration tools.
 
-For example, it’s quiet common to  implement Access Control Lists (ACL)
+For example, it’s quite common to  implement Access Control Lists (ACL)
 using Ternary Content Addressable Memory (TCAM). The TCAM memory can be
 divided into TCAM regions. Complex TC filters can have multiple rules with
 different priorities and different lookup keys. On the other hand hardware
index e99b41599465110f09573e8693781808d236dba3..ab790e7980b81a8bc1448de4480bf9817de07aff 100644 (file)
@@ -151,7 +151,7 @@ representor netdevice.
 -------------
 A subfunction devlink port is created but it is not active yet. That means the
 entities are created on devlink side, the e-switch port representor is created,
-but the subfunction device itself it not created. A user might use e-switch port
+but the subfunction device itself is not created. A user might use e-switch port
 representor to do settings, putting it into bridge, adding TC rules, etc. A user
 might as well configure the hardware address (such as MAC address) of the
 subfunction while subfunction is inactive.
@@ -173,7 +173,7 @@ Terms and Definitions
    * - Term
      - Definitions
    * - ``PCI device``
-     - A physical PCI device having one or more PCI bus consists of one or
+     - A physical PCI device having one or more PCI buses consists of one or
        more PCI controllers.
    * - ``PCI controller``
      -  A controller consists of potentially multiple physical functions,
index da1073acda9614c5ace217765f80d66de08d0c04..01391dfd37d9721b3eee60528380d500e8bdf433 100644 (file)
@@ -50,7 +50,7 @@ Callbacks to implement
 
 The NIC driver offering ipsec offload will need to implement these
 callbacks to make the offload available to the network stack's
-XFRM subsytem.  Additionally, the feature bits NETIF_F_HW_ESP and
+XFRM subsystem.  Additionally, the feature bits NETIF_F_HW_ESP and
 NETIF_F_HW_ESP_TX_CSUM will signal the availability of the offload.
 
 
index 9e876927c60d7dd61151f528391cb783469ded09..6e91994b8d3b26b8b6280d5f101b80b6b9134a2d 100644 (file)
@@ -2489,7 +2489,7 @@ N:        sc27xx
 N:     sc2731
 
 ARM/STI ARCHITECTURE
-M:     Patrice Chotard <patrice.chotard@st.com>
+M:     Patrice Chotard <patrice.chotard@foss.st.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 W:     http://www.stlinux.com
@@ -2522,7 +2522,7 @@ F:        include/linux/remoteproc/st_slim_rproc.h
 
 ARM/STM32 ARCHITECTURE
 M:     Maxime Coquelin <mcoquelin.stm32@gmail.com>
-M:     Alexandre Torgue <alexandre.torgue@st.com>
+M:     Alexandre Torgue <alexandre.torgue@foss.st.com>
 L:     linux-stm32@st-md-mailman.stormreply.com (moderated for non-subscribers)
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -3115,7 +3115,7 @@ C:        irc://irc.oftc.net/bcache
 F:     drivers/md/bcache/
 
 BDISP ST MEDIA DRIVER
-M:     Fabien Dessenne <fabien.dessenne@st.com>
+M:     Fabien Dessenne <fabien.dessenne@foss.st.com>
 L:     linux-media@vger.kernel.org
 S:     Supported
 W:     https://linuxtv.org
@@ -3675,7 +3675,7 @@ M:        bcm-kernel-feedback-list@broadcom.com
 L:     linux-pm@vger.kernel.org
 S:     Maintained
 T:     git git://github.com/broadcom/stblinux.git
-F:     drivers/soc/bcm/bcm-pmb.c
+F:     drivers/soc/bcm/bcm63xx/bcm-pmb.c
 F:     include/dt-bindings/soc/bcm-pmb.h
 
 BROADCOM SPECIFIC AMBA DRIVER (BCMA)
@@ -5080,7 +5080,7 @@ S:        Maintained
 F:     drivers/platform/x86/dell/dell-wmi.c
 
 DELTA ST MEDIA DRIVER
-M:     Hugues Fruchet <hugues.fruchet@st.com>
+M:     Hugues Fruchet <hugues.fruchet@foss.st.com>
 L:     linux-media@vger.kernel.org
 S:     Supported
 W:     https://linuxtv.org
@@ -6006,7 +6006,6 @@ F:        drivers/gpu/drm/rockchip/
 
 DRM DRIVERS FOR STI
 M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
-M:     Vincent Abriou <vincent.abriou@st.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
@@ -6014,10 +6013,9 @@ F:       Documentation/devicetree/bindings/display/st,stih4xx.txt
 F:     drivers/gpu/drm/sti
 
 DRM DRIVERS FOR STM
-M:     Yannick Fertre <yannick.fertre@st.com>
-M:     Philippe Cornu <philippe.cornu@st.com>
+M:     Yannick Fertre <yannick.fertre@foss.st.com>
+M:     Philippe Cornu <philippe.cornu@foss.st.com>
 M:     Benjamin Gaignard <benjamin.gaignard@linaro.org>
-M:     Vincent Abriou <vincent.abriou@st.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
@@ -8230,7 +8228,7 @@ F:        include/linux/hugetlb.h
 F:     mm/hugetlb.c
 
 HVA ST MEDIA DRIVER
-M:     Jean-Christophe Trotin <jean-christophe.trotin@st.com>
+M:     Jean-Christophe Trotin <jean-christophe.trotin@foss.st.com>
 L:     linux-media@vger.kernel.org
 S:     Supported
 W:     https://linuxtv.org
@@ -8520,6 +8518,7 @@ IBM Power SRIOV Virtual NIC Device Driver
 M:     Dany Madden <drt@linux.ibm.com>
 M:     Lijun Pan <ljp@linux.ibm.com>
 M:     Sukadev Bhattiprolu <sukadev@linux.ibm.com>
+R:     Thomas Falcon <tlfalcon@linux.ibm.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/ibm/ibmvnic.*
@@ -10029,7 +10028,6 @@ F:      scripts/leaking_addresses.pl
 
 LED SUBSYSTEM
 M:     Pavel Machek <pavel@ucw.cz>
-R:     Dan Murphy <dmurphy@ti.com>
 L:     linux-leds@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux-leds.git
@@ -10905,7 +10903,6 @@ T:      git git://linuxtv.org/media_tree.git
 F:     drivers/media/radio/radio-maxiradio*
 
 MCAN MMIO DEVICE DRIVER
-M:     Dan Murphy <dmurphy@ti.com>
 M:     Pankaj Sharma <pankj.sharma@samsung.com>
 L:     linux-can@vger.kernel.org
 S:     Maintained
@@ -11166,7 +11163,7 @@ T:      git git://linuxtv.org/media_tree.git
 F:     drivers/media/dvb-frontends/stv6111*
 
 MEDIA DRIVERS FOR STM32 - DCMI
-M:     Hugues Fruchet <hugues.fruchet@st.com>
+M:     Hugues Fruchet <hugues.fruchet@foss.st.com>
 L:     linux-media@vger.kernel.org
 S:     Supported
 T:     git git://linuxtv.org/media_tree.git
@@ -12537,7 +12534,7 @@ NETWORKING [MPTCP]
 M:     Mat Martineau <mathew.j.martineau@linux.intel.com>
 M:     Matthieu Baerts <matthieu.baerts@tessares.net>
 L:     netdev@vger.kernel.org
-L:     mptcp@lists.01.org
+L:     mptcp@lists.linux.dev
 S:     Maintained
 W:     https://github.com/multipath-tcp/mptcp_net-next/wiki
 B:     https://github.com/multipath-tcp/mptcp_net-next/issues
@@ -14708,15 +14705,11 @@ F:    drivers/net/ethernet/qlogic/qlcnic/
 QLOGIC QLGE 10Gb ETHERNET DRIVER
 M:     Manish Chopra <manishc@marvell.com>
 M:     GR-Linux-NIC-Dev@marvell.com
-L:     netdev@vger.kernel.org
-S:     Supported
-F:     drivers/staging/qlge/
-
-QLOGIC QLGE 10Gb ETHERNET DRIVER
 M:     Coiby Xu <coiby.xu@gmail.com>
 L:     netdev@vger.kernel.org
-S:     Maintained
+S:     Supported
 F:     Documentation/networking/device_drivers/qlogic/qlge.rst
+F:     drivers/staging/qlge/
 
 QM1D1B0004 MEDIA DRIVER
 M:     Akihiro Tsukada <tskd08@gmail.com>
@@ -15634,8 +15627,8 @@ F:      Documentation/s390/pci.rst
 
 S390 VFIO AP DRIVER
 M:     Tony Krowiak <akrowiak@linux.ibm.com>
-M:     Pierre Morel <pmorel@linux.ibm.com>
 M:     Halil Pasic <pasic@linux.ibm.com>
+M:     Jason Herne <jjherne@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 S:     Supported
 W:     http://www.ibm.com/developerworks/linux/linux390/
@@ -15647,6 +15640,7 @@ F:      drivers/s390/crypto/vfio_ap_private.h
 S390 VFIO-CCW DRIVER
 M:     Cornelia Huck <cohuck@redhat.com>
 M:     Eric Farman <farman@linux.ibm.com>
+M:     Matthew Rosato <mjrosato@linux.ibm.com>
 R:     Halil Pasic <pasic@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     kvm@vger.kernel.org
@@ -15657,6 +15651,7 @@ F:      include/uapi/linux/vfio_ccw.h
 
 S390 VFIO-PCI DRIVER
 M:     Matthew Rosato <mjrosato@linux.ibm.com>
+M:     Eric Farman <farman@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
 L:     kvm@vger.kernel.org
 S:     Supported
@@ -16886,8 +16881,10 @@ F:     tools/spi/
 
 SPIDERNET NETWORK DRIVER for CELL
 M:     Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
+M:     Geoff Levand <geoff@infradead.org>
 L:     netdev@vger.kernel.org
-S:     Supported
+L:     linuxppc-dev@lists.ozlabs.org
+S:     Maintained
 F:     Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
 F:     drivers/net/ethernet/toshiba/spider_net*
 
@@ -16941,7 +16938,8 @@ F:      Documentation/devicetree/bindings/media/i2c/st,st-mipid02.txt
 F:     drivers/media/i2c/st-mipid02.c
 
 ST STM32 I2C/SMBUS DRIVER
-M:     Pierre-Yves MORDRET <pierre-yves.mordret@st.com>
+M:     Pierre-Yves MORDRET <pierre-yves.mordret@foss.st.com>
+M:     Alain Volmat <alain.volmat@foss.st.com>
 L:     linux-i2c@vger.kernel.org
 S:     Maintained
 F:     drivers/i2c/busses/i2c-stm32*
@@ -17066,7 +17064,7 @@ F:      kernel/jump_label.c
 F:     kernel/static_call.c
 
 STI AUDIO (ASoC) DRIVERS
-M:     Arnaud Pouliquen <arnaud.pouliquen@st.com>
+M:     Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
@@ -17086,15 +17084,15 @@ T:    git git://linuxtv.org/media_tree.git
 F:     drivers/media/usb/stk1160/
 
 STM32 AUDIO (ASoC) DRIVERS
-M:     Olivier Moysan <olivier.moysan@st.com>
-M:     Arnaud Pouliquen <arnaud.pouliquen@st.com>
+M:     Olivier Moysan <olivier.moysan@foss.st.com>
+M:     Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
 F:     sound/soc/stm/
 
 STM32 TIMER/LPTIMER DRIVERS
-M:     Fabrice Gasnier <fabrice.gasnier@st.com>
+M:     Fabrice Gasnier <fabrice.gasnier@foss.st.com>
 S:     Maintained
 F:     Documentation/ABI/testing/*timer-stm32
 F:     Documentation/devicetree/bindings/*/*stm32-*timer*
@@ -17104,7 +17102,7 @@ F:      include/linux/*/stm32-*tim*
 
 STMMAC ETHERNET DRIVER
 M:     Giuseppe Cavallaro <peppe.cavallaro@st.com>
-M:     Alexandre Torgue <alexandre.torgue@st.com>
+M:     Alexandre Torgue <alexandre.torgue@foss.st.com>
 M:     Jose Abreu <joabreu@synopsys.com>
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -17846,7 +17844,6 @@ S:      Maintained
 F:     drivers/thermal/ti-soc-thermal/
 
 TI BQ27XXX POWER SUPPLY DRIVER
-R:     Dan Murphy <dmurphy@ti.com>
 F:     drivers/power/supply/bq27xxx_battery.c
 F:     drivers/power/supply/bq27xxx_battery_i2c.c
 F:     include/linux/power/bq27xxx_battery.h
@@ -17981,7 +17978,6 @@ S:      Odd Fixes
 F:     sound/soc/codecs/tas571x*
 
 TI TCAN4X5X DEVICE DRIVER
-M:     Dan Murphy <dmurphy@ti.com>
 L:     linux-can@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/net/can/tcan4x5x.txt
index d4784d18112313f04e0ca4243478658824dec761..73add16f9898244fa74e39decb19c133cc201ce7 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 5
 PATCHLEVEL = 12
 SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc5
 NAME = Frozen Wasteland
 
 # *DOCUMENTATION*
index 5b213a1e68bb2a4554eb37e4e63705cfcfa2157e..5e33d0e88f5b1eb94a7870a7de6b59ff95ea5769 100644 (file)
@@ -40,6 +40,9 @@
                ethernet1 = &cpsw_emac1;
                spi0 = &spi0;
                spi1 = &spi1;
+               mmc0 = &mmc1;
+               mmc1 = &mmc2;
+               mmc2 = &mmc3;
        };
 
        cpus {
index 73b6b1f89de992a6a67de0fc25115544ba399f2d..775ceb3acb6c01074b8277f8ce1fcb2cdecbe65f 100644 (file)
 };
 
 &pinctrl {
-       atmel,mux-mask = <
-                        /*     A       B       C       */
-                        0xFFFFFE7F 0xC0E0397F 0xEF00019D       /* pioA */
-                        0x03FFFFFF 0x02FC7E68 0x00780000       /* pioB */
-                        0xffffffff 0xF83FFFFF 0xB800F3FC       /* pioC */
-                        0x003FFFFF 0x003F8000 0x00000000       /* pioD */
-                        >;
-
        adc {
                pinctrl_adc_default: adc_default {
                        atmel,pins = <AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
index 1b1163858b1d1b28d58f4703d52211b9981e1beb..e3251f3e3eaa2f6671cf76d24805589b0a8b5c97 100644 (file)
@@ -84,8 +84,8 @@
                                pinctrl-0 = <&pinctrl_macb0_default>;
                                phy-mode = "rmii";
 
-                               ethernet-phy@0 {
-                                       reg = <0x0>;
+                               ethernet-phy@7 {
+                                       reg = <0x7>;
                                        interrupt-parent = <&pioA>;
                                        interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
                                        pinctrl-names = "default";
index c593597b21196fa2aaca0d31bb49886421cbdf95..5a1e10def6ef6336e7e7ec6eaeb04466503a7f34 100644 (file)
                        micrel,led-mode = <1>;
                        clocks = <&clks IMX6UL_CLK_ENET_REF>;
                        clock-names = "rmii-ref";
-                       reset-gpios = <&gpio_spi 1 GPIO_ACTIVE_LOW>;
-                       reset-assert-us = <10000>;
-                       reset-deassert-us = <100>;
 
                };
 
                        micrel,led-mode = <1>;
                        clocks = <&clks IMX6UL_CLK_ENET2_REF>;
                        clock-names = "rmii-ref";
-                       reset-gpios = <&gpio_spi 2 GPIO_ACTIVE_LOW>;
-                       reset-assert-us = <10000>;
-                       reset-deassert-us = <100>;
                };
        };
 };
        status = "okay";
 };
 
+&gpio_spi {
+       eth0-phy-hog {
+               gpio-hog;
+               gpios = <1 GPIO_ACTIVE_HIGH>;
+               output-high;
+               line-name = "eth0-phy";
+       };
+
+       eth1-phy-hog {
+               gpio-hog;
+               gpios = <2 GPIO_ACTIVE_HIGH>;
+               output-high;
+               line-name = "eth1-phy";
+       };
+};
+
 &i2c1 {
        clock-frequency = <100000>;
        pinctrl-names = "default";
index ecbb2cc5b9ab44bfcb922af0026abd78957888de..79cc45728cd2d47091adfacc710f1d7ce7dc3ba6 100644 (file)
@@ -14,5 +14,6 @@
 };
 
 &gpmi {
+       fsl,use-minimum-ecc;
        status = "okay";
 };
index 84066c1298df954145859773d4772e5cbe4538eb..ec45ced3cde68eb3492e619d31d261eaaf27248f 100644 (file)
                                compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
                                ranges = <0xfffff400 0xfffff400 0x800>;
 
+                               /* mux-mask corresponding to sam9x60 SoC in TFBGA228L package */
+                               atmel,mux-mask = <
+                                                /*     A       B       C       */
+                                                0xffffffff 0xffe03fff 0xef00019d       /* pioA */
+                                                0x03ffffff 0x02fc7e7f 0x00780000       /* pioB */
+                                                0xffffffff 0xffffffff 0xf83fffff       /* pioC */
+                                                0x003fffff 0x003f8000 0x00000000       /* pioD */
+                                                >;
+
                                pioA: gpio@fffff400 {
                                        compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
                                        reg = <0xfffff400 0x200>;
index 322caa21bcb36f76f94f77e930161b1df8572379..21bce4049cec2e7d1607de0f1ba0a73adb5ef2fd 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/module.h>
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
+#include <linux/irqchip.h>
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -162,7 +163,7 @@ static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
  * interrupts. It registers the interrupt enable and disable functions
  * to the kernel for each interrupt source.
  */
-void __init mxc_init_irq(void __iomem *irqbase)
+static void __init mxc_init_irq(void __iomem *irqbase)
 {
        struct device_node *np;
        int irq_base;
@@ -220,3 +221,16 @@ void __init mxc_init_irq(void __iomem *irqbase)
 
        printk(KERN_INFO "MXC IRQ initialized\n");
 }
+
+static int __init imx_avic_init(struct device_node *node,
+                              struct device_node *parent)
+{
+       void __iomem *avic_base;
+
+       avic_base = of_iomap(node, 0);
+       BUG_ON(!avic_base);
+       mxc_init_irq(avic_base);
+       return 0;
+}
+
+IRQCHIP_DECLARE(imx_avic, "fsl,avic", imx_avic_init);
index 2b004cc4f95e5637fef01a8dc6d25581ea7d5502..474dedb73bc75f950cc14e1bba33572d57982f55 100644 (file)
@@ -22,7 +22,6 @@ void mx35_map_io(void);
 void imx21_init_early(void);
 void imx31_init_early(void);
 void imx35_init_early(void);
-void mxc_init_irq(void __iomem *);
 void mx31_init_irq(void);
 void mx35_init_irq(void);
 void mxc_set_cpu_type(unsigned int type);
index 32df3b8012f9fb58c6fae5a0cd97b22da1fda70a..8eca92d66a2e9cb6a8eb37182411314f90c31129 100644 (file)
@@ -17,16 +17,6 @@ static void __init imx1_init_early(void)
        mxc_set_cpu_type(MXC_CPU_MX1);
 }
 
-static void __init imx1_init_irq(void)
-{
-       void __iomem *avic_addr;
-
-       avic_addr = ioremap(MX1_AVIC_ADDR, SZ_4K);
-       WARN_ON(!avic_addr);
-
-       mxc_init_irq(avic_addr);
-}
-
 static const char * const imx1_dt_board_compat[] __initconst = {
        "fsl,imx1",
        NULL
@@ -34,7 +24,6 @@ static const char * const imx1_dt_board_compat[] __initconst = {
 
 DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)")
        .init_early     = imx1_init_early,
-       .init_irq       = imx1_init_irq,
        .dt_compat      = imx1_dt_board_compat,
        .restart        = mxc_restart,
 MACHINE_END
index 95de48a1aa7d6c927b8336d7ca4e14f683cd568b..51927bd08aefcae54c44d1ae4b1e4a7f371d0954 100644 (file)
@@ -22,17 +22,6 @@ static void __init imx25_dt_init(void)
        imx_aips_allow_unprivileged_access("fsl,imx25-aips");
 }
 
-static void __init mx25_init_irq(void)
-{
-       struct device_node *np;
-       void __iomem *avic_base;
-
-       np = of_find_compatible_node(NULL, NULL, "fsl,avic");
-       avic_base = of_iomap(np, 0);
-       BUG_ON(!avic_base);
-       mxc_init_irq(avic_base);
-}
-
 static const char * const imx25_dt_board_compat[] __initconst = {
        "fsl,imx25",
        NULL
@@ -42,6 +31,5 @@ DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
        .init_early     = imx25_init_early,
        .init_machine   = imx25_dt_init,
        .init_late      = imx25_pm_init,
-       .init_irq       = mx25_init_irq,
        .dt_compat      = imx25_dt_board_compat,
 MACHINE_END
index 262422a9c196c2e52e26faa629b5dd5317e46dbd..e325c9468105134eceba1010e330dab6bf2f1f79 100644 (file)
@@ -56,17 +56,6 @@ static void __init imx27_init_early(void)
        mxc_set_cpu_type(MXC_CPU_MX27);
 }
 
-static void __init mx27_init_irq(void)
-{
-       void __iomem *avic_base;
-       struct device_node *np;
-
-       np = of_find_compatible_node(NULL, NULL, "fsl,avic");
-       avic_base = of_iomap(np, 0);
-       BUG_ON(!avic_base);
-       mxc_init_irq(avic_base);
-}
-
 static const char * const imx27_dt_board_compat[] __initconst = {
        "fsl,imx27",
        NULL
@@ -75,7 +64,6 @@ static const char * const imx27_dt_board_compat[] __initconst = {
 DT_MACHINE_START(IMX27_DT, "Freescale i.MX27 (Device Tree Support)")
        .map_io         = mx27_map_io,
        .init_early     = imx27_init_early,
-       .init_irq       = mx27_init_irq,
        .init_late      = imx27_pm_init,
        .dt_compat      = imx27_dt_board_compat,
 MACHINE_END
index dc69dfe600dfe19da6e8b00ea0049592e1ef9215..e9a1092b6093879acacb3e2443d4dda993de47e3 100644 (file)
@@ -14,6 +14,5 @@ static const char * const imx31_dt_board_compat[] __initconst = {
 DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)")
        .map_io         = mx31_map_io,
        .init_early     = imx31_init_early,
-       .init_irq       = mx31_init_irq,
        .dt_compat      = imx31_dt_board_compat,
 MACHINE_END
index ec5c3068715c3495d56c32f5b455692809b8d12c..0fc08218b77da05ac738dba5a4cac93144b4c052 100644 (file)
@@ -27,6 +27,5 @@ DT_MACHINE_START(IMX35_DT, "Freescale i.MX35 (Device Tree Support)")
        .l2c_aux_mask   = ~0,
        .map_io         = mx35_map_io,
        .init_early     = imx35_init_early,
-       .init_irq       = mx35_init_irq,
        .dt_compat      = imx35_dt_board_compat,
 MACHINE_END
index 5056438e5b4276fc49ca608dec32f3509d3d8cd9..28db97289ee8ffec84d83f0b1d024bcd9edd1c7e 100644 (file)
@@ -109,18 +109,6 @@ void __init imx31_init_early(void)
        mx3_ccm_base = of_iomap(np, 0);
        BUG_ON(!mx3_ccm_base);
 }
-
-void __init mx31_init_irq(void)
-{
-       void __iomem *avic_base;
-       struct device_node *np;
-
-       np = of_find_compatible_node(NULL, NULL, "fsl,imx31-avic");
-       avic_base = of_iomap(np, 0);
-       BUG_ON(!avic_base);
-
-       mxc_init_irq(avic_base);
-}
 #endif /* ifdef CONFIG_SOC_IMX31 */
 
 #ifdef CONFIG_SOC_IMX35
@@ -158,16 +146,4 @@ void __init imx35_init_early(void)
        mx3_ccm_base = of_iomap(np, 0);
        BUG_ON(!mx3_ccm_base);
 }
-
-void __init mx35_init_irq(void)
-{
-       void __iomem *avic_base;
-       struct device_node *np;
-
-       np = of_find_compatible_node(NULL, NULL, "fsl,imx35-avic");
-       avic_base = of_iomap(np, 0);
-       BUG_ON(!avic_base);
-
-       mxc_init_irq(avic_base);
-}
 #endif /* ifdef CONFIG_SOC_IMX35 */
index 62df666c2bd0bad494a908212e04b7a7d1a33553..17b66f0d0deef07dc6f48c60c1baf874664a8360 100644 (file)
@@ -88,34 +88,26 @@ static void __init sr_set_nvalues(struct omap_volt_data *volt_data,
 
 extern struct omap_sr_data omap_sr_pdata[];
 
-static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
+static int __init sr_init_by_name(const char *name, const char *voltdm)
 {
        struct omap_sr_data *sr_data = NULL;
        struct omap_volt_data *volt_data;
-       struct omap_smartreflex_dev_attr *sr_dev_attr;
        static int i;
 
-       if (!strncmp(oh->name, "smartreflex_mpu_iva", 20) ||
-           !strncmp(oh->name, "smartreflex_mpu", 16))
+       if (!strncmp(name, "smartreflex_mpu_iva", 20) ||
+           !strncmp(name, "smartreflex_mpu", 16))
                sr_data = &omap_sr_pdata[OMAP_SR_MPU];
-       else if (!strncmp(oh->name, "smartreflex_core", 17))
+       else if (!strncmp(name, "smartreflex_core", 17))
                sr_data = &omap_sr_pdata[OMAP_SR_CORE];
-       else if (!strncmp(oh->name, "smartreflex_iva", 16))
+       else if (!strncmp(name, "smartreflex_iva", 16))
                sr_data = &omap_sr_pdata[OMAP_SR_IVA];
 
        if (!sr_data) {
-               pr_err("%s: Unknown instance %s\n", __func__, oh->name);
+               pr_err("%s: Unknown instance %s\n", __func__, name);
                return -EINVAL;
        }
 
-       sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
-       if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
-               pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
-                      __func__, oh->name);
-               goto exit;
-       }
-
-       sr_data->name = oh->name;
+       sr_data->name = name;
        if (cpu_is_omap343x())
                sr_data->ip_type = 1;
        else
@@ -136,10 +128,10 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
                }
        }
 
-       sr_data->voltdm = voltdm_lookup(sr_dev_attr->sensor_voltdm_name);
+       sr_data->voltdm = voltdm_lookup(voltdm);
        if (!sr_data->voltdm) {
                pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
-                       __func__, sr_dev_attr->sensor_voltdm_name);
+                       __func__, voltdm);
                goto exit;
        }
 
@@ -160,6 +152,20 @@ exit:
        return 0;
 }
 
+static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
+{
+       struct omap_smartreflex_dev_attr *sr_dev_attr;
+
+       sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
+       if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
+               pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
+                      __func__, oh->name);
+               return 0;
+       }
+
+       return sr_init_by_name(oh->name, sr_dev_attr->sensor_voltdm_name);
+}
+
 /*
  * API to be called from board files to enable smartreflex
  * autocompensation at init.
@@ -169,7 +175,42 @@ void __init omap_enable_smartreflex_on_init(void)
        sr_enable_on_init = true;
 }
 
+static const char * const omap4_sr_instances[] = {
+       "mpu",
+       "iva",
+       "core",
+};
+
+static const char * const dra7_sr_instances[] = {
+       "mpu",
+       "core",
+};
+
 int __init omap_devinit_smartreflex(void)
 {
+       const char * const *sr_inst;
+       int i, nr_sr = 0;
+
+       if (soc_is_omap44xx()) {
+               sr_inst = omap4_sr_instances;
+               nr_sr = ARRAY_SIZE(omap4_sr_instances);
+
+       } else if (soc_is_dra7xx()) {
+               sr_inst = dra7_sr_instances;
+               nr_sr = ARRAY_SIZE(dra7_sr_instances);
+       }
+
+       if (nr_sr) {
+               const char *name, *voltdm;
+
+               for (i = 0; i < nr_sr; i++) {
+                       name = kasprintf(GFP_KERNEL, "smartreflex_%s", sr_inst[i]);
+                       voltdm = sr_inst[i];
+                       sr_init_by_name(name, voltdm);
+               }
+
+               return 0;
+       }
+
        return omap_hwmod_for_each_by_class("smartreflex", sr_dev_init, NULL);
 }
index 5656e7aacd698436c8b8046d9d01280ea02fda3e..e4e1b65501156e037c7225c742af0b8ef8735983 100644 (file)
@@ -810,6 +810,16 @@ config QCOM_FALKOR_ERRATUM_E1041
 
          If unsure, say Y.
 
+config NVIDIA_CARMEL_CNP_ERRATUM
+       bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores"
+       default y
+       help
+         If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not
+         invalidate shared TLB entries installed by a different core, as it would
+         on standard ARM cores.
+
+         If unsure, say Y.
+
 config SOCIONEXT_SYNQUACER_PREITS
        bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
        default y
index 7de6b376d7924bc4cb322ad5876b3b7d3789b362..9058cfa4980f2739746f90428cf79fb1c08c628b 100644 (file)
                        ranges = <0x0 0x00 0x1700000 0x100000>;
                        reg = <0x00 0x1700000 0x0 0x100000>;
                        interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+                       dma-coherent;
 
                        sec_jr0: jr@10000 {
                                compatible = "fsl,sec-v5.4-job-ring",
index 5a8a1dc4262d876e66e1cfb170200e07d1e4013e..28c51e521cb22299e5314b92430e09de5fccfa0d 100644 (file)
                        ranges = <0x0 0x00 0x1700000 0x100000>;
                        reg = <0x00 0x1700000 0x0 0x100000>;
                        interrupts = <0 75 0x4>;
+                       dma-coherent;
 
                        sec_jr0: jr@10000 {
                                compatible = "fsl,sec-v5.4-job-ring",
index 1d6dfd189c7f4f686162cd23c3adaba044a5710c..39458305e3334cc460d1330a19ab82970b0ee751 100644 (file)
                        ranges = <0x0 0x00 0x1700000 0x100000>;
                        reg = <0x00 0x1700000 0x0 0x100000>;
                        interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+                       dma-coherent;
 
                        sec_jr0: jr@10000 {
                                compatible = "fsl,sec-v5.4-job-ring",
index 0e1a6d953389d3ac9ba33c7688f38a13c60f5e69..122c95ddad3053b2c28d6a24bcedad354003bf41 100644 (file)
@@ -35,7 +35,7 @@
 
 &i2c2 {
        clock-frequency = <400000>;
-       pinctrl-names = "default";
+       pinctrl-names = "default", "gpio";
        pinctrl-0 = <&pinctrl_i2c2>;
        pinctrl-1 = <&pinctrl_i2c2_gpio>;
        sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
index 44a8c2337cee46f01bc535bd61de8725f2ae8392..f3965ec5b31ddf091ae10e3670defe982bc9aa8f 100644 (file)
@@ -67,7 +67,7 @@
 
 &i2c1 {
        clock-frequency = <400000>;
-       pinctrl-names = "default";
+       pinctrl-names = "default", "gpio";
        pinctrl-0 = <&pinctrl_i2c1>;
        pinctrl-1 = <&pinctrl_i2c1_gpio>;
        sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
index 93a161b3bf3fee9476811665296c10d9c3b919a4..dc52b733675db5521a810db1e31356d9e297530b 100644 (file)
@@ -37,7 +37,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
        } while (--n > 0);
 
        sum += ((sum >> 32) | (sum << 32));
-       return csum_fold((__force u32)(sum >> 32));
+       return csum_fold((__force __wsum)(sum >> 32));
 }
 #define ip_fast_csum ip_fast_csum
 
index b77d997b173bc7f7eba63a2025fbe8702d812ac1..c40f2490cd7b7ddca4e814681e6a87a63a3813a0 100644 (file)
@@ -66,7 +66,8 @@
 #define ARM64_WORKAROUND_1508412               58
 #define ARM64_HAS_LDAPR                                59
 #define ARM64_KVM_PROTECTED_MODE               60
+#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP     61
 
-#define ARM64_NCAPS                            61
+#define ARM64_NCAPS                            62
 
 #endif /* __ASM_CPUCAPS_H */
index 4e90c2debf70a9f33bec355911c0620d782ca7bc..94d4025acc0b96c70be223049626b5377dab2d5a 100644 (file)
 #define CPTR_EL2_DEFAULT       CPTR_EL2_RES1
 
 /* Hyp Debug Configuration Register bits */
+#define MDCR_EL2_TTRF          (1 << 19)
 #define MDCR_EL2_TPMS          (1 << 14)
 #define MDCR_EL2_E2PB_MASK     (UL(0x3))
 #define MDCR_EL2_E2PB_SHIFT    (UL(12))
index ca2cd75d32863c8301129700e0c6ab6f67183add..efc10e9041a035850bf343757c7fcf6092f8a462 100644 (file)
@@ -251,6 +251,8 @@ unsigned long get_wchan(struct task_struct *p);
 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
                                         struct task_struct *next);
 
+asmlinkage void arm64_preempt_schedule_irq(void);
+
 #define task_pt_regs(p) \
        ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
 
index 9f4e3b266f21a7cc83d45e69ef5f3af077774a19..6623c99f09841a667318bf2e76f2f449e382aed7 100644 (file)
@@ -55,6 +55,8 @@ void arch_setup_new_exec(void);
 #define arch_setup_new_exec     arch_setup_new_exec
 
 void arch_release_task_struct(struct task_struct *tsk);
+int arch_dup_task_struct(struct task_struct *dst,
+                               struct task_struct *src);
 
 #endif
 
index 506a1cd3797390f01c9ea4d27787d862bc895475..e2c20c036442fbc61e3706eac9075f2f89782fd6 100644 (file)
@@ -525,6 +525,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                                  0, 0,
                                  1, 0),
        },
+#endif
+#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
+       {
+               /* NVIDIA Carmel */
+               .desc = "NVIDIA Carmel CNP erratum",
+               .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
+               ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
+       },
 #endif
        {
        }
index 066030717a4c7f4c555659eeeb362871f25b7583..e5281e1c8f1d291020790d0319922ee8c1b3a4ea 100644 (file)
@@ -383,7 +383,6 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
         * of support.
         */
        S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
        ARM64_FTR_END,
 };
@@ -1321,7 +1320,10 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
         * may share TLB entries with a CPU stuck in the crashed
         * kernel.
         */
-        if (is_kdump_kernel())
+       if (is_kdump_kernel())
+               return false;
+
+       if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
                return false;
 
        return has_cpuid_feature(entry, scope);
index 77605aec25fec843e6169c639b7f3b16392947d8..51fcf99d535141b0d707260f2f3e5d48d3d3f5ad 100644 (file)
@@ -353,7 +353,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
         * with the CLIDR_EL1 fields to avoid triggering false warnings
         * when there is a mismatch across the CPUs. Keep track of the
         * effective value of the CTR_EL0 in our internal records for
-        * acurate sanity check and feature enablement.
+        * accurate sanity check and feature enablement.
         */
        info->reg_ctr = read_cpuid_effective_cachetype();
        info->reg_dczid = read_cpuid(DCZID_EL0);
index e6e284265f19d1f7e5ffb3f9c2249f1b69f39f53..58303a9ec32c4f4a58ff6726fa9de90092d7bf30 100644 (file)
@@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
 {
        memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
+       *ppos += count;
+
        return count;
 }
index 325c83b1a24df2101e19f8d695fe3ffde1230431..6e60aa3b5ea960346f05647eb0decbc8f3f5cd58 100644 (file)
@@ -57,6 +57,8 @@
 #include <asm/processor.h>
 #include <asm/pointer_auth.h>
 #include <asm/stacktrace.h>
+#include <asm/switch_to.h>
+#include <asm/system_misc.h>
 
 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
 #include <linux/stackprotector.h>
index ad20981dfda474bc60d1772b3ade0ef7831dbf7a..d55bdfb7789c1f05f118513ec091cc1846c8e49d 100644 (file)
@@ -194,8 +194,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
 
 #ifdef CONFIG_STACKTRACE
 
-void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
-                    struct task_struct *task, struct pt_regs *regs)
+noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
+                             void *cookie, struct task_struct *task,
+                             struct pt_regs *regs)
 {
        struct stackframe frame;
 
@@ -203,8 +204,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
                start_backtrace(&frame, regs->regs[29], regs->pc);
        else if (task == current)
                start_backtrace(&frame,
-                               (unsigned long)__builtin_frame_address(0),
-                               (unsigned long)arch_stack_walk);
+                               (unsigned long)__builtin_frame_address(1),
+                               (unsigned long)__builtin_return_address(0));
        else
                start_backtrace(&frame, thread_saved_fp(task),
                                thread_saved_pc(task));
index 7a7e425616b54710a0a10f8b494d31eb9f36bb15..dbc8905116311c6a724443fb1601698f5e27b636 100644 (file)
@@ -89,6 +89,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
  *  - Debug ROM Address (MDCR_EL2_TDRA)
  *  - OS related registers (MDCR_EL2_TDOSA)
  *  - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
+ *  - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
  *
  * Additionally, KVM only traps guest accesses to the debug registers if
  * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
@@ -112,6 +113,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
        vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
        vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
                                MDCR_EL2_TPMS |
+                               MDCR_EL2_TTRF |
                                MDCR_EL2_TPMCR |
                                MDCR_EL2_TDRA |
                                MDCR_EL2_TDOSA);
index ee3682b9873c44f00c3a2c965987289610caf5d5..39f8f7f9227c3eafabfea247bbe24b42ba9e6e0b 100644 (file)
@@ -429,6 +429,13 @@ u64 __vgic_v3_get_gic_config(void)
        if (has_vhe())
                flags = local_daif_save();
 
+       /*
+        * Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
+        * that to be able to set ICC_SRE_EL1.SRE to 0, all the
+        * interrupt overrides must be set. You've got to love this.
+        */
+       sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
+       isb();
        write_gicreg(0, ICC_SRE_EL1);
        isb();
 
@@ -436,6 +443,8 @@ u64 __vgic_v3_get_gic_config(void)
 
        write_gicreg(sre, ICC_SRE_EL1);
        isb();
+       sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
+       isb();
 
        if (has_vhe())
                local_daif_restore(flags);
index 7484ea4f6ba07300a25a4c0689309f9332e9913a..5d9550fdb9cf8d14871069d2c229db3245688756 100644 (file)
@@ -1448,6 +1448,22 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
 struct range arch_get_mappable_range(void)
 {
        struct range mhp_range;
+       u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
+       u64 end_linear_pa = __pa(PAGE_END - 1);
+
+       if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
+               /*
+                * Check for a wrap, it is possible because of randomized linear
+                * mapping the start physical address is actually bigger than
+                * the end physical address. In this case set start to zero
+                * because [0, end_linear_pa] range must still be able to cover
+                * all addressable physical addresses.
+                */
+               if (start_linear_pa > end_linear_pa)
+                       start_linear_pa = 0;
+       }
+
+       WARN_ON(start_linear_pa > end_linear_pa);
 
        /*
         * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
@@ -1455,8 +1471,9 @@ struct range arch_get_mappable_range(void)
         * range which can be mapped inside this linear mapping range, must
         * also be derived from its end points.
         */
-       mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual));
-       mhp_range.end =  __pa(PAGE_END - 1);
+       mhp_range.start = start_linear_pa;
+       mhp_range.end =  end_linear_pa;
+
        return mhp_range;
 }
 
index 8b5b8e6bc9d9ad9937119531d102e410b005b7ac..dd5bfed52031de81b2166279a0811b6bd7b4f01a 100644 (file)
@@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr,        \
                char *buf)                                              \
 {                                                                      \
        u32 cpu=dev->id;                                                \
-       return sprintf(buf, "%lx\n", name[cpu]);                        \
+       return sprintf(buf, "%llx\n", name[cpu]);                       \
 }
 
 #define store(name)                                                    \
@@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr,
 
 #ifdef ERR_INJ_DEBUG
        printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
-       printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]);
-       printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]);
-       printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n",
+       printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]);
+       printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]);
+       printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n",
                          err_data_buffer[cpu].data1,
                          err_data_buffer[cpu].data2,
                          err_data_buffer[cpu].data3);
@@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr,
 
 #ifdef ERR_INJ_DEBUG
        printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
-       printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
-       printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
+       printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]);
+       printk(KERN_DEBUG "resources=%llx\n", resources[cpu]);
 #endif
        return size;
 }
@@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
                        char *buf)
 {
        unsigned int cpu=dev->id;
-       return sprintf(buf, "%lx\n", phys_addr[cpu]);
+       return sprintf(buf, "%llx\n", phys_addr[cpu]);
 }
 
 static ssize_t
@@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
        ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
        if (ret<=0) {
 #ifdef ERR_INJ_DEBUG
-               printk("Virtual address %lx is not existing.\n",virt_addr);
+               printk("Virtual address %llx is not existing.\n", virt_addr);
 #endif
                return -EINVAL;
        }
@@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev,
 {
        unsigned int cpu=dev->id;
 
-       return sprintf(buf, "%lx, %lx, %lx\n",
+       return sprintf(buf, "%llx, %llx, %llx\n",
                        err_data_buffer[cpu].data1,
                        err_data_buffer[cpu].data2,
                        err_data_buffer[cpu].data3);
@@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev,
        int ret;
 
 #ifdef ERR_INJ_DEBUG
-       printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n",
+       printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n",
                 err_data_buffer[cpu].data1,
                 err_data_buffer[cpu].data2,
                 err_data_buffer[cpu].data3,
                 cpu);
 #endif
-       ret=sscanf(buf, "%lx, %lx, %lx",
+       ret = sscanf(buf, "%llx, %llx, %llx",
                        &err_data_buffer[cpu].data1,
                        &err_data_buffer[cpu].data2,
                        &err_data_buffer[cpu].data3);
index d4cae2fc69ca333ae03fe342aa3f04f0ce389653..adf6521525f4bd4f2d25cee97c20c9690914aebc 100644 (file)
@@ -1824,7 +1824,7 @@ ia64_mca_cpu_init(void *cpu_data)
                        data = mca_bootmem();
                        first_time = 0;
                } else
-                       data = (void *)__get_free_pages(GFP_KERNEL,
+                       data = (void *)__get_free_pages(GFP_ATOMIC,
                                                        get_order(sz));
                if (!data)
                        panic("Could not allocate MCA memory for cpu %d\n",
index 279be0153f8b27acc1411bebf37717d5f13550e6..23a140327a0bac1b88df79dab008a4709f28cdaf 100644 (file)
@@ -43,7 +43,7 @@
 #include <asm/prom.h>
 
 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB
-const char __section(".appended_dtb") __appended_dtb[0x100000];
+char __section(".appended_dtb") __appended_dtb[0x100000];
 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
 
 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
index 7b3cdb4a5f48148085608ec00cb48131feaaf416..73ee8914266629559ad11053f79984ac49b4b97e 100644 (file)
@@ -6,7 +6,7 @@
 #include <vdso/datapage.h>
 
 struct arch_vdso_data {
-       __u64 tod_steering_delta;
+       __s64 tod_steering_delta;
        __u64 tod_steering_end;
 };
 
index 165da961f9019adf641c6568a2cc4c853862a007..326cb8f75f58ef21cc73ba7c244103c8e13e509a 100644 (file)
@@ -80,10 +80,12 @@ void __init time_early_init(void)
 {
        struct ptff_qto qto;
        struct ptff_qui qui;
+       int cs;
 
        /* Initialize TOD steering parameters */
        tod_steering_end = tod_clock_base.tod;
-       vdso_data->arch_data.tod_steering_end = tod_steering_end;
+       for (cs = 0; cs < CS_BASES; cs++)
+               vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
 
        if (!test_facility(28))
                return;
@@ -366,6 +368,7 @@ static void clock_sync_global(unsigned long delta)
 {
        unsigned long now, adj;
        struct ptff_qto qto;
+       int cs;
 
        /* Fixup the monotonic sched clock. */
        tod_clock_base.eitod += delta;
@@ -381,7 +384,10 @@ static void clock_sync_global(unsigned long delta)
                panic("TOD clock sync offset %li is too large to drift\n",
                      tod_steering_delta);
        tod_steering_end = now + (abs(tod_steering_delta) << 15);
-       vdso_data->arch_data.tod_steering_end = tod_steering_end;
+       for (cs = 0; cs < CS_BASES; cs++) {
+               vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
+               vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
+       }
 
        /* Update LPAR offset. */
        if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
index 2d6d5a28c3bf791b586d50b8bcb18039c0b62ffc..9a85eae37b17357e8d1b9ee5058b44e6bad2c09d 100644 (file)
@@ -27,7 +27,7 @@ endif
 REALMODE_CFLAGS        := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \
                   -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
                   -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
-                  -mno-mmx -mno-sse
+                  -mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
 
 REALMODE_CFLAGS += -ffreestanding
 REALMODE_CFLAGS += -fno-stack-protector
index c0538f82c9a220cfafbb20d7d954eff6eff5053e..57ef2094af93ede71e6ad6283fd699160d21de5d 100644 (file)
@@ -132,6 +132,7 @@ void native_play_dead(void);
 void play_dead_common(void);
 void wbinvd_on_cpu(int cpu);
 int wbinvd_on_all_cpus(void);
+bool wakeup_cpu0(void);
 
 void native_smp_send_reschedule(int cpu);
 void native_send_call_func_ipi(const struct cpumask *mask);
index 7068e4bb057d9a6a9572e53a848a1b95e5f8d947..1a162e559753b5ef97def181ad94f574eac28218 100644 (file)
@@ -86,18 +86,6 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
 }
 #endif
 
-/*
- * The maximum amount of extra memory compared to the base size.  The
- * main scaling factor is the size of struct page.  At extreme ratios
- * of base:extra, all the base memory can be filled with page
- * structures for the extra memory, leaving no space for anything
- * else.
- *
- * 10x seems like a reasonable balance between scaling flexibility and
- * leaving a practically usable system.
- */
-#define XEN_EXTRA_MEM_RATIO    (10)
-
 /*
  * Helper functions to write or read unsigned long values to/from
  * memory, when the access may fault.
index 7bdc0239a9435a131cfe8dda7b8d28ed97956696..14cd3186dc77dc7a929a47026bfa8c3152f89308 100644 (file)
@@ -1554,10 +1554,18 @@ void __init acpi_boot_table_init(void)
        /*
         * Initialize the ACPI boot-time table parser.
         */
-       if (acpi_table_init()) {
+       if (acpi_locate_initial_tables())
                disable_acpi();
-               return;
-       }
+       else
+               acpi_reserve_initial_tables();
+}
+
+int __init early_acpi_boot_init(void)
+{
+       if (acpi_disabled)
+               return 1;
+
+       acpi_table_init_complete();
 
        acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
 
@@ -1570,18 +1578,9 @@ void __init acpi_boot_table_init(void)
                } else {
                        printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
                        disable_acpi();
-                       return;
+                       return 1;
                }
        }
-}
-
-int __init early_acpi_boot_init(void)
-{
-       /*
-        * If acpi_disabled, bail out
-        */
-       if (acpi_disabled)
-               return 1;
 
        /*
         * Process the Multiple APIC Description Table (MADT), if present
index d883176ef2ce04c1f86465b433d4a3b8ec74549e..5ecd69a48393d29c4aaac22f9e95ccb073a2b1b4 100644 (file)
@@ -1045,6 +1045,9 @@ void __init setup_arch(char **cmdline_p)
 
        cleanup_highmap();
 
+       /* Look for ACPI tables and reserve memory occupied by them. */
+       acpi_boot_table_init();
+
        memblock_set_current_limit(ISA_END_ADDRESS);
        e820__memblock_setup();
 
@@ -1136,11 +1139,6 @@ void __init setup_arch(char **cmdline_p)
 
        early_platform_quirks();
 
-       /*
-        * Parse the ACPI tables for possible boot-time SMP configuration.
-        */
-       acpi_boot_table_init();
-
        early_acpi_boot_init();
 
        initmem_init();
index 02813a7f3a7cf98745a119aa3d3024adfcd675d8..f877150a91da11ade0e910473691693530bac64c 100644 (file)
@@ -1659,7 +1659,7 @@ void play_dead_common(void)
        local_irq_disable();
 }
 
-static bool wakeup_cpu0(void)
+bool wakeup_cpu0(void)
 {
        if (smp_processor_id() == 0 && enable_start_cpu0)
                return true;
index 1b4766fe1de2defec41a7025627ca9ef55d477b8..eafc4d601f2537c2ff8e314f8958b18143d2a338 100644 (file)
@@ -1,6 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
-ccflags-y += -Iarch/x86/kvm
+ccflags-y += -I $(srctree)/arch/x86/kvm
 ccflags-$(CONFIG_KVM_WERROR) += -Werror
 
 ifeq ($(CONFIG_FRAME_POINTER),y)
index d75524bc84234ecea48b0b6606238fead77c6e04..486aa94ecf1da37b138fd90a0669fa0eb47ba068 100644 (file)
@@ -5884,6 +5884,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
        struct kvm_mmu_page *sp;
        unsigned int ratio;
        LIST_HEAD(invalid_list);
+       bool flush = false;
        ulong to_zap;
 
        rcu_idx = srcu_read_lock(&kvm->srcu);
@@ -5905,19 +5906,19 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
                                      lpage_disallowed_link);
                WARN_ON_ONCE(!sp->lpage_disallowed);
                if (is_tdp_mmu_page(sp)) {
-                       kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
-                               sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
+                       flush = kvm_tdp_mmu_zap_sp(kvm, sp);
                } else {
                        kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
                        WARN_ON_ONCE(sp->lpage_disallowed);
                }
 
                if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
-                       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+                       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
                        cond_resched_rwlock_write(&kvm->mmu_lock);
+                       flush = false;
                }
        }
-       kvm_mmu_commit_zap_page(kvm, &invalid_list);
+       kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
 
        write_unlock(&kvm->mmu_lock);
        srcu_read_unlock(&kvm->srcu, rcu_idx);
index 462b1f71c77f94fb0d27370845919cfb1de0522a..018d82e73e3117d14b05adaa104f46569402b222 100644 (file)
@@ -86,7 +86,7 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
        list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
 
 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
-                         gfn_t start, gfn_t end, bool can_yield);
+                         gfn_t start, gfn_t end, bool can_yield, bool flush);
 
 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
 {
@@ -99,7 +99,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
 
        list_del(&root->link);
 
-       zap_gfn_range(kvm, root, 0, max_gfn, false);
+       zap_gfn_range(kvm, root, 0, max_gfn, false, false);
 
        free_page((unsigned long)root->spt);
        kmem_cache_free(mmu_page_header_cache, root);
@@ -668,20 +668,21 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
  * scheduler needs the CPU or there is contention on the MMU lock. If this
  * function cannot yield, it will not release the MMU lock or reschedule and
  * the caller must ensure it does not supply too large a GFN range, or the
- * operation can cause a soft lockup.
+ * operation can cause a soft lockup.  Note, in some use cases a flush may be
+ * required by prior actions.  Ensure the pending flush is performed prior to
+ * yielding.
  */
 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
-                         gfn_t start, gfn_t end, bool can_yield)
+                         gfn_t start, gfn_t end, bool can_yield, bool flush)
 {
        struct tdp_iter iter;
-       bool flush_needed = false;
 
        rcu_read_lock();
 
        tdp_root_for_each_pte(iter, root, start, end) {
                if (can_yield &&
-                   tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
-                       flush_needed = false;
+                   tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
+                       flush = false;
                        continue;
                }
 
@@ -699,11 +700,11 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
                        continue;
 
                tdp_mmu_set_spte(kvm, &iter, 0);
-               flush_needed = true;
+               flush = true;
        }
 
        rcu_read_unlock();
-       return flush_needed;
+       return flush;
 }
 
 /*
@@ -712,13 +713,14 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
  * SPTEs have been cleared and a TLB flush is needed before releasing the
  * MMU lock.
  */
-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
+bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
+                                bool can_yield)
 {
        struct kvm_mmu_page *root;
        bool flush = false;
 
        for_each_tdp_mmu_root_yield_safe(kvm, root)
-               flush |= zap_gfn_range(kvm, root, start, end, true);
+               flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
 
        return flush;
 }
@@ -930,7 +932,7 @@ static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
                                     struct kvm_mmu_page *root, gfn_t start,
                                     gfn_t end, unsigned long unused)
 {
-       return zap_gfn_range(kvm, root, start, end, false);
+       return zap_gfn_range(kvm, root, start, end, false, false);
 }
 
 int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
index 3b761c111bff13bf9d09531ebeaf1b21962ed42a..31096ece9b144a50a401c54803abff5f2cb00a0d 100644 (file)
@@ -8,7 +8,29 @@
 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
 
-bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end);
+bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
+                                bool can_yield);
+static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start,
+                                            gfn_t end)
+{
+       return __kvm_tdp_mmu_zap_gfn_range(kvm, start, end, true);
+}
+static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+       gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
+
+       /*
+        * Don't allow yielding, as the caller may have a flush pending.  Note,
+        * if mmu_lock is held for write, zapping will never yield in this case,
+        * but explicitly disallow it for safety.  The TDP MMU does not yield
+        * until it has made forward progress (steps sideways), and when zapping
+        * a single shadow page that it's guaranteed to see (thus the mmu_lock
+        * requirement), its "step sideways" will always step beyond the bounds
+        * of the shadow page's gfn range and stop iterating before yielding.
+        */
+       lockdep_assert_held_write(&kvm->mmu_lock);
+       return __kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, end, false);
+}
 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
 
 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
index 35891d9a1099b3f54d146e0abb26b89c74075b2e..fb204eaa8bb3924063a2f78e63bc98ffc450bb7f 100644 (file)
@@ -246,11 +246,18 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
        return true;
 }
 
-static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
 {
        struct kvm_vcpu *vcpu = &svm->vcpu;
        bool vmcb12_lma;
 
+       /*
+        * FIXME: these should be done after copying the fields,
+        * to avoid TOC/TOU races.  For these save area checks
+        * the possible damage is limited since kvm_set_cr0 and
+        * kvm_set_cr4 handle failure; EFER_SVME is an exception
+        * so it is force-set later in nested_prepare_vmcb_save.
+        */
        if ((vmcb12->save.efer & EFER_SVME) == 0)
                return false;
 
@@ -271,7 +278,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
        if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
                return false;
 
-       return nested_vmcb_check_controls(&vmcb12->control);
+       return true;
 }
 
 static void load_nested_vmcb_control(struct vcpu_svm *svm,
@@ -396,7 +403,14 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
        svm->vmcb->save.gdtr = vmcb12->save.gdtr;
        svm->vmcb->save.idtr = vmcb12->save.idtr;
        kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
-       svm_set_efer(&svm->vcpu, vmcb12->save.efer);
+
+       /*
+        * Force-set EFER_SVME even though it is checked earlier on the
+        * VMCB12, because the guest can flip the bit between the check
+        * and now.  Clearing EFER_SVME would call svm_free_nested.
+        */
+       svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
+
        svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
        svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
        svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
@@ -468,7 +482,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
 
 
        svm->nested.vmcb12_gpa = vmcb12_gpa;
-       load_nested_vmcb_control(svm, &vmcb12->control);
        nested_prepare_vmcb_control(svm);
        nested_prepare_vmcb_save(svm, vmcb12);
 
@@ -515,7 +528,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
        if (WARN_ON_ONCE(!svm->nested.initialized))
                return -EINVAL;
 
-       if (!nested_vmcb_checks(svm, vmcb12)) {
+       load_nested_vmcb_control(svm, &vmcb12->control);
+
+       if (!nested_vmcb_check_save(svm, vmcb12) ||
+           !nested_vmcb_check_controls(&svm->nested.ctl)) {
                vmcb12->control.exit_code    = SVM_EXIT_ERR;
                vmcb12->control.exit_code_hi = 0;
                vmcb12->control.exit_info_1  = 0;
@@ -1209,6 +1225,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
         */
        if (!(save->cr0 & X86_CR0_PG))
                goto out_free;
+       if (!(save->efer & EFER_SVME))
+               goto out_free;
 
        /*
         * All checks done, we can enter guest mode.  L1 control fields
index 035da07500e8bf033ad1fd78675739961b9b8423..fdf587f19c5fb2871c9d8dad20eba10c39b7375d 100644 (file)
@@ -98,6 +98,8 @@ static enum index msr_to_index(u32 msr)
 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
                                             enum pmu_type type)
 {
+       struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+
        switch (msr) {
        case MSR_F15H_PERF_CTL0:
        case MSR_F15H_PERF_CTL1:
@@ -105,6 +107,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
        case MSR_F15H_PERF_CTL3:
        case MSR_F15H_PERF_CTL4:
        case MSR_F15H_PERF_CTL5:
+               if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+                       return NULL;
+               fallthrough;
        case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
                if (type != PMU_TYPE_EVNTSEL)
                        return NULL;
@@ -115,6 +120,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
        case MSR_F15H_PERF_CTR3:
        case MSR_F15H_PERF_CTR4:
        case MSR_F15H_PERF_CTR5:
+               if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
+                       return NULL;
+               fallthrough;
        case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
                if (type != PMU_TYPE_COUNTER)
                        return NULL;
index fe806e89421280584e174ad9d6552c40b6418d05..eca63625aee4d826913d944bb98d2f39bd14cce2 100644 (file)
@@ -271,8 +271,7 @@ static struct kmem_cache *x86_emulator_cache;
  * When called, it means the previous get/set msr reached an invalid msr.
  * Return true if we want to ignore/silent this failed msr access.
  */
-static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
-                                 u64 data, bool write)
+static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
 {
        const char *op = write ? "wrmsr" : "rdmsr";
 
@@ -1445,7 +1444,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
        if (r == KVM_MSR_RET_INVALID) {
                /* Unconditionally clear the output for simplicity */
                *data = 0;
-               if (kvm_msr_ignored_check(vcpu, index, 0, false))
+               if (kvm_msr_ignored_check(index, 0, false))
                        r = 0;
        }
 
@@ -1620,7 +1619,7 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
        int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
 
        if (ret == KVM_MSR_RET_INVALID)
-               if (kvm_msr_ignored_check(vcpu, index, data, true))
+               if (kvm_msr_ignored_check(index, data, true))
                        ret = 0;
 
        return ret;
@@ -1658,7 +1657,7 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
        if (ret == KVM_MSR_RET_INVALID) {
                /* Unconditionally clear *data for simplicity */
                *data = 0;
-               if (kvm_msr_ignored_check(vcpu, index, 0, false))
+               if (kvm_msr_ignored_check(index, 0, false))
                        ret = 0;
        }
 
@@ -2329,7 +2328,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
        kvm_vcpu_write_tsc_offset(vcpu, offset);
        raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 
-       spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
+       spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
        if (!matched) {
                kvm->arch.nr_vcpus_matched_tsc = 0;
        } else if (!already_matched) {
@@ -2337,7 +2336,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
        }
 
        kvm_track_tsc_matching(vcpu);
-       spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
+       spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
 }
 
 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
@@ -2559,13 +2558,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
        int i;
        struct kvm_vcpu *vcpu;
        struct kvm_arch *ka = &kvm->arch;
+       unsigned long flags;
 
        kvm_hv_invalidate_tsc_page(kvm);
 
-       spin_lock(&ka->pvclock_gtod_sync_lock);
        kvm_make_mclock_inprogress_request(kvm);
+
        /* no guest entries from this point */
+       spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
        pvclock_update_vm_gtod_copy(kvm);
+       spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -2573,8 +2575,6 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
        /* guest entries allowed */
        kvm_for_each_vcpu(i, vcpu, kvm)
                kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
-       spin_unlock(&ka->pvclock_gtod_sync_lock);
 #endif
 }
 
@@ -2582,17 +2582,18 @@ u64 get_kvmclock_ns(struct kvm *kvm)
 {
        struct kvm_arch *ka = &kvm->arch;
        struct pvclock_vcpu_time_info hv_clock;
+       unsigned long flags;
        u64 ret;
 
-       spin_lock(&ka->pvclock_gtod_sync_lock);
+       spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
        if (!ka->use_master_clock) {
-               spin_unlock(&ka->pvclock_gtod_sync_lock);
+               spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
                return get_kvmclock_base_ns() + ka->kvmclock_offset;
        }
 
        hv_clock.tsc_timestamp = ka->master_cycle_now;
        hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
-       spin_unlock(&ka->pvclock_gtod_sync_lock);
+       spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
        /* both __this_cpu_read() and rdtsc() should be on the same cpu */
        get_cpu();
@@ -2686,13 +2687,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
         * If the host uses TSC clock, then passthrough TSC as stable
         * to the guest.
         */
-       spin_lock(&ka->pvclock_gtod_sync_lock);
+       spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
        use_master_clock = ka->use_master_clock;
        if (use_master_clock) {
                host_tsc = ka->master_cycle_now;
                kernel_ns = ka->master_kernel_ns;
        }
-       spin_unlock(&ka->pvclock_gtod_sync_lock);
+       spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
        /* Keep irq disabled to prevent changes to the clock */
        local_irq_save(flags);
@@ -5726,6 +5727,7 @@ set_pit2_out:
        }
 #endif
        case KVM_SET_CLOCK: {
+               struct kvm_arch *ka = &kvm->arch;
                struct kvm_clock_data user_ns;
                u64 now_ns;
 
@@ -5744,8 +5746,22 @@ set_pit2_out:
                 * pvclock_update_vm_gtod_copy().
                 */
                kvm_gen_update_masterclock(kvm);
-               now_ns = get_kvmclock_ns(kvm);
-               kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
+
+               /*
+                * This pairs with kvm_guest_time_update(): when masterclock is
+                * in use, we use master_kernel_ns + kvmclock_offset to set
+                * unsigned 'system_time' so if we use get_kvmclock_ns() (which
+                * is slightly ahead) here we risk going negative on unsigned
+                * 'system_time' when 'user_ns.clock' is very small.
+                */
+               spin_lock_irq(&ka->pvclock_gtod_sync_lock);
+               if (kvm->arch.use_master_clock)
+                       now_ns = ka->master_kernel_ns;
+               else
+                       now_ns = get_kvmclock_base_ns();
+               ka->kvmclock_offset = user_ns.clock - now_ns;
+               spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
+
                kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
                break;
        }
@@ -7724,6 +7740,7 @@ static void kvm_hyperv_tsc_notifier(void)
        struct kvm *kvm;
        struct kvm_vcpu *vcpu;
        int cpu;
+       unsigned long flags;
 
        mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list)
@@ -7739,17 +7756,15 @@ static void kvm_hyperv_tsc_notifier(void)
        list_for_each_entry(kvm, &vm_list, vm_list) {
                struct kvm_arch *ka = &kvm->arch;
 
-               spin_lock(&ka->pvclock_gtod_sync_lock);
-
+               spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
                pvclock_update_vm_gtod_copy(kvm);
+               spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
 
                kvm_for_each_vcpu(cpu, vcpu, kvm)
                        kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
 
                kvm_for_each_vcpu(cpu, vcpu, kvm)
                        kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
-
-               spin_unlock(&ka->pvclock_gtod_sync_lock);
        }
        mutex_unlock(&kvm_lock);
 }
index 39eb0488714161776fa502b35a3f4ce71d438b34..9035e34aa15610d6e5eb6beb26f42e52fec4ae30 100644 (file)
@@ -250,7 +250,6 @@ static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
 void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs);
 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
 
-void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
 u64 get_kvmclock_ns(struct kvm *kvm);
 
 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
index 4b01f7dbaf303ef21194fee505ee403150cee55f..ae78cef7998023e7d7520dab74e0153b543af8f4 100644 (file)
@@ -262,7 +262,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
        if (pgprot_val(old_prot) == pgprot_val(new_prot))
                return;
 
-       pa = pfn << page_level_shift(level);
+       pa = pfn << PAGE_SHIFT;
        size = page_level_size(level);
 
        /*
index 6926d0ca6c71797ffcbbf4e3c3e532f80c8244c3..b35fc80238846611f2ba9a881639648d7c99f648 100644 (file)
@@ -1936,7 +1936,7 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
  * add rsp, 8                      // skip eth_type_trans's frame
  * ret                             // return to its caller
  */
-int arch_prepare_bpf_trampoline(void *image, void *image_end,
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
                                const struct btf_func_model *m, u32 flags,
                                struct bpf_tramp_progs *tprogs,
                                void *orig_call)
@@ -1975,6 +1975,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
 
        save_regs(m, &prog, nr_args, stack_size);
 
+       if (flags & BPF_TRAMP_F_CALL_ORIG) {
+               /* arg1: mov rdi, im */
+               emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
+               if (emit_call(&prog, __bpf_tramp_enter, prog)) {
+                       ret = -EINVAL;
+                       goto cleanup;
+               }
+       }
+
        if (fentry->nr_progs)
                if (invoke_bpf(m, &prog, fentry, stack_size))
                        return -EINVAL;
@@ -1993,8 +2002,7 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
        }
 
        if (flags & BPF_TRAMP_F_CALL_ORIG) {
-               if (fentry->nr_progs || fmod_ret->nr_progs)
-                       restore_regs(m, &prog, nr_args, stack_size);
+               restore_regs(m, &prog, nr_args, stack_size);
 
                /* call original function */
                if (emit_call(&prog, orig_call, prog)) {
@@ -2003,6 +2011,9 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
                }
                /* remember return value in a stack for bpf prog to access */
                emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
+               im->ip_after_call = prog;
+               memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
+               prog += X86_PATCH_SIZE;
        }
 
        if (fmod_ret->nr_progs) {
@@ -2033,9 +2044,17 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
         * the return value is only updated on the stack and still needs to be
         * restored to R0.
         */
-       if (flags & BPF_TRAMP_F_CALL_ORIG)
+       if (flags & BPF_TRAMP_F_CALL_ORIG) {
+               im->ip_epilogue = prog;
+               /* arg1: mov rdi, im */
+               emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
+               if (emit_call(&prog, __bpf_tramp_exit, prog)) {
+                       ret = -EINVAL;
+                       goto cleanup;
+               }
                /* restore original return value back into RAX */
                emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
+       }
 
        EMIT1(0x5B); /* pop rbx */
        EMIT1(0xC9); /* leave */
@@ -2225,7 +2244,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                padding = true;
                goto skip_init_addrs;
        }
-       addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
+       addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
        if (!addrs) {
                prog = orig_prog;
                goto out_addrs;
@@ -2317,7 +2336,7 @@ out_image:
                if (image)
                        bpf_prog_fill_jited_linfo(prog, addrs + 1);
 out_addrs:
-               kfree(addrs);
+               kvfree(addrs);
                kfree(jit_data);
                prog->aux->jit_data = NULL;
        }
index 17d80f751fcb2477fa16799261c6db8823941fcc..ac06ca32e9ef71177fb385f6820e6b8dda8b78f0 100644 (file)
@@ -98,8 +98,8 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
 unsigned long xen_max_p2m_pfn __read_mostly;
 EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
 
-#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
-#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+#ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
+#define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
 #else
 #define P2M_LIMIT 0
 #endif
@@ -416,9 +416,6 @@ void __init xen_vmalloc_p2m_tree(void)
        xen_p2m_last_pfn = xen_max_p2m_pfn;
 
        p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
-       if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC))
-               p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO;
-
        vm.flags = VM_ALLOC;
        vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
                        PMD_SIZE * PMDS_PER_MID_PAGE);
index 1a3b75652fa4f662eb5fadac36255730d839eacd..8bfc1033010770fafdff3fd6238378f3322312f2 100644 (file)
@@ -59,6 +59,18 @@ static struct {
 } xen_remap_buf __initdata __aligned(PAGE_SIZE);
 static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
 
+/*
+ * The maximum amount of extra memory compared to the base size.  The
+ * main scaling factor is the size of struct page.  At extreme ratios
+ * of base:extra, all the base memory can be filled with page
+ * structures for the extra memory, leaving no space for anything
+ * else.
+ *
+ * 10x seems like a reasonable balance between scaling flexibility and
+ * leaving a practically usable system.
+ */
+#define EXTRA_MEM_RATIO                (10)
+
 static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
 
 static void __init xen_parse_512gb(void)
@@ -778,13 +790,13 @@ char * __init xen_memory_setup(void)
                extra_pages += max_pages - max_pfn;
 
        /*
-        * Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO
+        * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
         * factor the base size.
         *
         * Make sure we have no memory above max_pages, as this area
         * isn't handled by the p2m management.
         */
-       extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
+       extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
                           extra_pages, max_pages - max_pfn);
        i = 0;
        addr = xen_e820_table.entries[0].addr;
index c426b846beefbcbd6a6d9ed6439506f92e9ec070..45cc0ae0af6f966ed778253b9624d46a783c2152 100644 (file)
        LOAD_CP_REGS_TAB(6)
        LOAD_CP_REGS_TAB(7)
 
-/*
- * coprocessor_flush(struct thread_info*, index)
- *                             a2        a3
- *
- * Save coprocessor registers for coprocessor 'index'.
- * The register values are saved to or loaded from the coprocessor area 
- * inside the task_info structure.
- *
- * Note that this function doesn't update the coprocessor_owner information!
- *
- */
-
-ENTRY(coprocessor_flush)
-
-       /* reserve 4 bytes on stack to save a0 */
-       abi_entry(4)
-
-       s32i    a0, a1, 0
-       movi    a0, .Lsave_cp_regs_jump_table
-       addx8   a3, a3, a0
-       l32i    a4, a3, 4
-       l32i    a3, a3, 0
-       add     a2, a2, a4
-       beqz    a3, 1f
-       callx0  a3
-1:     l32i    a0, a1, 0
-
-       abi_ret(4)
-
-ENDPROC(coprocessor_flush)
-
 /*
  * Entry condition:
  *
@@ -245,6 +214,39 @@ ENTRY(fast_coprocessor)
 
 ENDPROC(fast_coprocessor)
 
+       .text
+
+/*
+ * coprocessor_flush(struct thread_info*, index)
+ *                             a2        a3
+ *
+ * Save coprocessor registers for coprocessor 'index'.
+ * The register values are saved to or loaded from the coprocessor area
+ * inside the task_info structure.
+ *
+ * Note that this function doesn't update the coprocessor_owner information!
+ *
+ */
+
+ENTRY(coprocessor_flush)
+
+       /* reserve 4 bytes on stack to save a0 */
+       abi_entry(4)
+
+       s32i    a0, a1, 0
+       movi    a0, .Lsave_cp_regs_jump_table
+       addx8   a3, a3, a0
+       l32i    a4, a3, 4
+       l32i    a3, a3, 0
+       add     a2, a2, a4
+       beqz    a3, 1f
+       callx0  a3
+1:     l32i    a0, a1, 0
+
+       abi_ret(4)
+
+ENDPROC(coprocessor_flush)
+
        .data
 
 ENTRY(coprocessor_owner)
index 7666408ce12a48a89bc2b3d619dd3514d1fe4014..95a74890c7e995ebc3313d9fafa0f54800c705d5 100644 (file)
@@ -112,8 +112,11 @@ good_area:
         */
        fault = handle_mm_fault(vma, address, flags, regs);
 
-       if (fault_signal_pending(fault, regs))
+       if (fault_signal_pending(fault, regs)) {
+               if (!user_mode(regs))
+                       goto bad_page_fault;
                return;
+       }
 
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
index 26b7f721cda88b37c991e9e7f9caef3145223b1a..50e579088aca4c5ebc8eb0fc62c10b7fc94a714c 100644 (file)
@@ -277,7 +277,7 @@ static struct bio *__bio_chain_endio(struct bio *bio)
 {
        struct bio *parent = bio->bi_private;
 
-       if (!parent->bi_status)
+       if (bio->bi_status && !parent->bi_status)
                parent->bi_status = bio->bi_status;
        bio_put(bio);
        return parent;
@@ -949,7 +949,7 @@ void bio_release_pages(struct bio *bio, bool mark_dirty)
 }
 EXPORT_SYMBOL_GPL(bio_release_pages);
 
-static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
+static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
 {
        WARN_ON_ONCE(bio->bi_max_vecs);
 
@@ -959,11 +959,26 @@ static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
        bio->bi_iter.bi_size = iter->count;
        bio_set_flag(bio, BIO_NO_PAGE_REF);
        bio_set_flag(bio, BIO_CLONED);
+}
 
+static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
+{
+       __bio_iov_bvec_set(bio, iter);
        iov_iter_advance(iter, iter->count);
        return 0;
 }
 
+static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
+{
+       struct request_queue *q = bio->bi_bdev->bd_disk->queue;
+       struct iov_iter i = *iter;
+
+       iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9);
+       __bio_iov_bvec_set(bio, &i);
+       iov_iter_advance(iter, i.count);
+       return 0;
+}
+
 #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
 
 /**
@@ -1094,8 +1109,8 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
        int ret = 0;
 
        if (iov_iter_is_bvec(iter)) {
-               if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
-                       return -EINVAL;
+               if (bio_op(bio) == REQ_OP_ZONE_APPEND)
+                       return bio_iov_bvec_set_append(bio, iter);
                return bio_iov_bvec_set(bio, iter);
        }
 
index ffb4aa0ea68b09db122940a07543ec71c1a4a896..4d97fb6dd226725bc732e07152047eec27741d33 100644 (file)
@@ -382,6 +382,14 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
        switch (bio_op(rq->bio)) {
        case REQ_OP_DISCARD:
        case REQ_OP_SECURE_ERASE:
+               if (queue_max_discard_segments(rq->q) > 1) {
+                       struct bio *bio = rq->bio;
+
+                       for_each_bio(bio)
+                               nr_phys_segs++;
+                       return nr_phys_segs;
+               }
+               return 1;
        case REQ_OP_WRITE_ZEROES:
                return 0;
        case REQ_OP_WRITE_SAME:
index 9ebb344e25850ab51cc386efbf5c2e76793ccdac..271f6596435ba2d09dc9597c4d852604c37eeec2 100644 (file)
@@ -302,7 +302,6 @@ static const char *const rqf_name[] = {
        RQF_NAME(QUIET),
        RQF_NAME(ELVPRIV),
        RQF_NAME(IO_STAT),
-       RQF_NAME(ALLOCED),
        RQF_NAME(PM),
        RQF_NAME(HASHED),
        RQF_NAME(STATS),
index 1a7558917c47d66f0537b97d92e789013f906647..46f055bc7ecb38710a92023f8eb1668061f6c2f6 100644 (file)
@@ -322,6 +322,13 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
        const char *dname;
        int err;
 
+       /*
+        * disk_max_parts() won't be zero, either GENHD_FL_EXT_DEVT is set
+        * or 'minors' is passed to alloc_disk().
+        */
+       if (partno >= disk_max_parts(disk))
+               return ERR_PTR(-EINVAL);
+
        /*
         * Partitions are not supported on zoned block devices that are used as
         * such.
index 3f045b5953b2e85ac7c2345fc568f57418c234b2..a0c1a665dfc1204c8f69a22bc1cca97a9b8cc961 100644 (file)
@@ -99,13 +99,12 @@ acpi_status acpi_ns_root_initialize(void)
                 * just create and link the new node(s) here.
                 */
                new_node =
-                   ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_namespace_node));
+                   acpi_ns_create_node(*ACPI_CAST_PTR(u32, init_val->name));
                if (!new_node) {
                        status = AE_NO_MEMORY;
                        goto unlock_and_exit;
                }
 
-               ACPI_COPY_NAMESEG(new_node->name.ascii, init_val->name);
                new_node->descriptor_type = ACPI_DESC_TYPE_NAMED;
                new_node->type = init_val->type;
 
index e6a5d997241c43d803d9758b01328db8b6cdb0cc..cb8f70842249e3cb9da68e0fe9de903a91203498 100644 (file)
@@ -9,6 +9,8 @@
 #ifndef _ACPI_INTERNAL_H_
 #define _ACPI_INTERNAL_H_
 
+#include <linux/idr.h>
+
 #define PREFIX "ACPI: "
 
 int early_acpi_osi_init(void);
@@ -96,9 +98,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
 
 extern struct list_head acpi_bus_id_list;
 
+#define ACPI_MAX_DEVICE_INSTANCES      4096
+
 struct acpi_device_bus_id {
        const char *bus_id;
-       unsigned int instance_no;
+       struct ida instance_ida;
        struct list_head node;
 };
 
index d93e400940a31b28aa90139e6958cae49abdd703..768a6b4d23680b775fd5f3343a998c2c6c47ac77 100644 (file)
@@ -29,6 +29,7 @@
  */
 #ifdef CONFIG_X86
 #include <asm/apic.h>
+#include <asm/cpu.h>
 #endif
 
 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
@@ -541,6 +542,12 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
                        wait_for_freeze();
                } else
                        return -ENODEV;
+
+#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
+               /* If NMI wants to wake up CPU0, start CPU0. */
+               if (wakeup_cpu0())
+                       start_cpu0();
+#endif
        }
 
        /* Never reached */
index a184529d8fa402f077790cbfa04094e7724c6c9a..6efe7edd7b1eadbf3c6b66c88c56b0fbe72fe2f8 100644 (file)
@@ -479,9 +479,8 @@ static void acpi_device_del(struct acpi_device *device)
        list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
                if (!strcmp(acpi_device_bus_id->bus_id,
                            acpi_device_hid(device))) {
-                       if (acpi_device_bus_id->instance_no > 0)
-                               acpi_device_bus_id->instance_no--;
-                       else {
+                       ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no);
+                       if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
                                list_del(&acpi_device_bus_id->node);
                                kfree_const(acpi_device_bus_id->bus_id);
                                kfree(acpi_device_bus_id);
@@ -631,6 +630,21 @@ static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
        return NULL;
 }
 
+static int acpi_device_set_name(struct acpi_device *device,
+                               struct acpi_device_bus_id *acpi_device_bus_id)
+{
+       struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
+       int result;
+
+       result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL);
+       if (result < 0)
+               return result;
+
+       device->pnp.instance_no = result;
+       dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result);
+       return 0;
+}
+
 int acpi_device_add(struct acpi_device *device,
                    void (*release)(struct device *))
 {
@@ -665,7 +679,9 @@ int acpi_device_add(struct acpi_device *device,
 
        acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
        if (acpi_device_bus_id) {
-               acpi_device_bus_id->instance_no++;
+               result = acpi_device_set_name(device, acpi_device_bus_id);
+               if (result)
+                       goto err_unlock;
        } else {
                acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
                                             GFP_KERNEL);
@@ -681,9 +697,16 @@ int acpi_device_add(struct acpi_device *device,
                        goto err_unlock;
                }
 
+               ida_init(&acpi_device_bus_id->instance_ida);
+
+               result = acpi_device_set_name(device, acpi_device_bus_id);
+               if (result) {
+                       kfree(acpi_device_bus_id);
+                       goto err_unlock;
+               }
+
                list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
        }
-       dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
 
        if (device->parent)
                list_add_tail(&device->node, &device->parent->children);
@@ -1647,6 +1670,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
        device_initialize(&device->dev);
        dev_set_uevent_suppress(&device->dev, true);
        acpi_init_coherency(device);
+       /* Assume there are unmet deps to start with. */
+       device->dep_unmet = 1;
 }
 
 void acpi_device_add_finalize(struct acpi_device *device)
@@ -1910,6 +1935,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
 {
        struct acpi_dep_data *dep;
 
+       adev->dep_unmet = 0;
+
        mutex_lock(&acpi_dep_list_lock);
 
        list_for_each_entry(dep, &acpi_dep_list, node) {
@@ -1957,7 +1984,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
                return AE_CTRL_DEPTH;
 
        acpi_scan_init_hotplug(device);
-       if (!check_dep)
+       /*
+        * If check_dep is true at this point, the device has no dependencies,
+        * or the creation of the device object would have been postponed above.
+        */
+       if (check_dep)
+               device->dep_unmet = 0;
+       else
                acpi_scan_dep_init(device);
 
 out:
index e48690a006a4e72d5b8dc3bf5fb327ee3b5e3cbc..9d581045acff079276ed81a307b60ec7244f816b 100644 (file)
@@ -780,7 +780,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
 }
 
 /*
- * acpi_table_init()
+ * acpi_locate_initial_tables()
  *
  * find RSDP, find and checksum SDT/XSDT.
  * checksum all tables, print SDT/XSDT
@@ -788,7 +788,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
  * result: sdt_entry[] is initialized
  */
 
-int __init acpi_table_init(void)
+int __init acpi_locate_initial_tables(void)
 {
        acpi_status status;
 
@@ -803,9 +803,45 @@ int __init acpi_table_init(void)
        status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
        if (ACPI_FAILURE(status))
                return -EINVAL;
-       acpi_table_initrd_scan();
 
+       return 0;
+}
+
+void __init acpi_reserve_initial_tables(void)
+{
+       int i;
+
+       for (i = 0; i < ACPI_MAX_TABLES; i++) {
+               struct acpi_table_desc *table_desc = &initial_tables[i];
+               u64 start = table_desc->address;
+               u64 size = table_desc->length;
+
+               if (!start || !size)
+                       break;
+
+               pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n",
+                       table_desc->signature.ascii, start, start + size - 1);
+
+               memblock_reserve(start, size);
+       }
+}
+
+void __init acpi_table_init_complete(void)
+{
+       acpi_table_initrd_scan();
        check_multiple_madt();
+}
+
+int __init acpi_table_init(void)
+{
+       int ret;
+
+       ret = acpi_locate_initial_tables();
+       if (ret)
+               return ret;
+
+       acpi_table_init_complete();
+
        return 0;
 }
 
index 811d298637cb250841e54cef4af9c37b9b7ecf54..83cd4c95faf0da8acec49d1104542c61cd218f5e 100644 (file)
@@ -147,6 +147,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                },
        },
        {
+       .callback = video_detect_force_vendor,
        .ident = "Sony VPCEH3U1E",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
index f43430e9dceedcc33ae9ce9df7bb278ad3a2e573..24fd6f369ebe9aaecd4b393c66a8c7d05687f8c3 100644 (file)
@@ -470,12 +470,14 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
        char c;
 
        for (; count-- > 0; (*ppos)++, tmp++) {
-               if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
+               if (((count + 1) & 0x1f) == 0) {
                        /*
-                        * let's be a little nice with other processes
-                        * that need some CPU
+                        * charlcd_write() is invoked as a VFS->write() callback
+                        * and as such it is always invoked from preemptible
+                        * context and may sleep.
                         */
-                       schedule();
+                       cond_resched();
+               }
 
                if (get_user(c, tmp))
                        return -EFAULT;
@@ -537,12 +539,8 @@ static void charlcd_puts(struct charlcd *lcd, const char *s)
        int count = strlen(s);
 
        for (; count-- > 0; tmp++) {
-               if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
-                       /*
-                        * let's be a little nice with other processes
-                        * that need some CPU
-                        */
-                       schedule();
+               if (((count + 1) & 0x1f) == 0)
+                       cond_resched();
 
                charlcd_write_char(lcd, *tmp);
        }
index a46a7e30881b19e00b1bda7b839ab263ec8be3ef..fe1dad68aee4dd61537f66ff42afa69d4a870607 100644 (file)
@@ -305,7 +305,7 @@ static int rpm_get_suppliers(struct device *dev)
        return 0;
 }
 
-static void rpm_put_suppliers(struct device *dev)
+static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
 {
        struct device_link *link;
 
@@ -313,10 +313,30 @@ static void rpm_put_suppliers(struct device *dev)
                                device_links_read_lock_held()) {
 
                while (refcount_dec_not_one(&link->rpm_active))
-                       pm_runtime_put(link->supplier);
+                       pm_runtime_put_noidle(link->supplier);
+
+               if (try_to_suspend)
+                       pm_request_idle(link->supplier);
        }
 }
 
+static void rpm_put_suppliers(struct device *dev)
+{
+       __rpm_put_suppliers(dev, true);
+}
+
+static void rpm_suspend_suppliers(struct device *dev)
+{
+       struct device_link *link;
+       int idx = device_links_read_lock();
+
+       list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
+                               device_links_read_lock_held())
+               pm_request_idle(link->supplier);
+
+       device_links_read_unlock(idx);
+}
+
 /**
  * __rpm_callback - Run a given runtime PM callback for a given device.
  * @cb: Runtime PM callback to run.
@@ -344,8 +364,10 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
                        idx = device_links_read_lock();
 
                        retval = rpm_get_suppliers(dev);
-                       if (retval)
+                       if (retval) {
+                               rpm_put_suppliers(dev);
                                goto fail;
+                       }
 
                        device_links_read_unlock(idx);
                }
@@ -368,9 +390,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
                    || (dev->power.runtime_status == RPM_RESUMING && retval))) {
                        idx = device_links_read_lock();
 
- fail:
-                       rpm_put_suppliers(dev);
+                       __rpm_put_suppliers(dev, false);
 
+fail:
                        device_links_read_unlock(idx);
                }
 
@@ -642,8 +664,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
                goto out;
        }
 
+       if (dev->power.irq_safe)
+               goto out;
+
        /* Maybe the parent is now able to suspend. */
-       if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
+       if (parent && !parent->power.ignore_children) {
                spin_unlock(&dev->power.lock);
 
                spin_lock(&parent->power.lock);
@@ -652,6 +677,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
 
                spin_lock(&dev->power.lock);
        }
+       /* Maybe the suppliers are now able to suspend. */
+       if (dev->power.links_count > 0) {
+               spin_unlock_irq(&dev->power.lock);
+
+               rpm_suspend_suppliers(dev);
+
+               spin_lock_irq(&dev->power.lock);
+       }
 
  out:
        trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
@@ -1657,8 +1690,8 @@ void pm_runtime_get_suppliers(struct device *dev)
                                device_links_read_lock_held())
                if (link->flags & DL_FLAG_PM_RUNTIME) {
                        link->supplier_preactivated = true;
-                       refcount_inc(&link->rpm_active);
                        pm_runtime_get_sync(link->supplier);
+                       refcount_inc(&link->rpm_active);
                }
 
        device_links_read_unlock(idx);
@@ -1671,6 +1704,8 @@ void pm_runtime_get_suppliers(struct device *dev)
 void pm_runtime_put_suppliers(struct device *dev)
 {
        struct device_link *link;
+       unsigned long flags;
+       bool put;
        int idx;
 
        idx = device_links_read_lock();
@@ -1679,7 +1714,11 @@ void pm_runtime_put_suppliers(struct device *dev)
                                device_links_read_lock_held())
                if (link->supplier_preactivated) {
                        link->supplier_preactivated = false;
-                       if (refcount_dec_not_one(&link->rpm_active))
+                       spin_lock_irqsave(&dev->power.lock, flags);
+                       put = pm_runtime_status_suspended(dev) &&
+                             refcount_dec_not_one(&link->rpm_active);
+                       spin_unlock_irqrestore(&dev->power.lock, flags);
+                       if (put)
                                pm_runtime_put(link->supplier);
                }
 
index d6c821d48090a3d489a1c1778a2f1030b5f3b10d..51bfd77375523769cb4d5bf43b31cc590c70f5de 100644 (file)
@@ -1369,10 +1369,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
        }
 
        if (dev->zoned)
-               cmd->error = null_process_zoned_cmd(cmd, op,
-                                                   sector, nr_sectors);
+               sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
        else
-               cmd->error = null_process_cmd(cmd, op, sector, nr_sectors);
+               sts = null_process_cmd(cmd, op, sector, nr_sectors);
+
+       /* Do not overwrite errors (e.g. timeout errors) */
+       if (cmd->error == BLK_STS_OK)
+               cmd->error = sts;
 
 out:
        nullb_complete_cmd(cmd);
@@ -1451,8 +1454,20 @@ static bool should_requeue_request(struct request *rq)
 
 static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
 {
+       struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
+
        pr_info("rq %p timed out\n", rq);
-       blk_mq_complete_request(rq);
+
+       /*
+        * If the device is marked as blocking (i.e. memory backed or zoned
+        * device), the submission path may be blocked waiting for resources
+        * and cause real timeouts. For these real timeouts, the submission
+        * path will complete the request using blk_mq_complete_request().
+        * Only fake timeouts need to execute blk_mq_complete_request() here.
+        */
+       cmd->error = BLK_STS_TIMEOUT;
+       if (cmd->fake_timeout)
+               blk_mq_complete_request(rq);
        return BLK_EH_DONE;
 }
 
@@ -1473,6 +1488,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
        cmd->rq = bd->rq;
        cmd->error = BLK_STS_OK;
        cmd->nq = nq;
+       cmd->fake_timeout = should_timeout_request(bd->rq);
 
        blk_mq_start_request(bd->rq);
 
@@ -1489,7 +1505,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
                        return BLK_STS_OK;
                }
        }
-       if (should_timeout_request(bd->rq))
+       if (cmd->fake_timeout)
                return BLK_STS_OK;
 
        return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
index 83504f3cc9d688444ca05d76a4430865da0f7581..4876d5adb12da0eb2d7a1b7beb0c70642bcf7c6d 100644 (file)
@@ -22,6 +22,7 @@ struct nullb_cmd {
        blk_status_t error;
        struct nullb_queue *nq;
        struct hrtimer timer;
+       bool fake_timeout;
 };
 
 struct nullb_queue {
index 1cdf09ff67b68c5d704d002ae59a3a6a0c9ba7c2..14e452896d04c6293bb4041fb1e6e6430792ef87 100644 (file)
@@ -891,7 +891,7 @@ next:
 out:
        for (i = last_map; i < num; i++) {
                /* Don't zap current batch's valid persistent grants. */
-               if(i >= last_map + segs_to_map)
+               if(i >= map_until)
                        pages[i]->persistent_gnt = NULL;
                pages[i]->handle = BLKBACK_INVALID_HANDLE;
        }
index b040447575adc2695f567224a09632bca21af17f..dcfb32ee5cb60239c358586b9fe1bca72161a384 100644 (file)
@@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev)
         */
        l3->debug_irq = platform_get_irq(pdev, 0);
        ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
-                              0x0, "l3-dbg-irq", l3);
+                              IRQF_NO_THREAD, "l3-dbg-irq", l3);
        if (ret) {
                dev_err(l3->dev, "request_irq failed for %d\n",
                        l3->debug_irq);
@@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev)
 
        l3->app_irq = platform_get_irq(pdev, 1);
        ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
-                              0x0, "l3-app-irq", l3);
+                              IRQF_NO_THREAD, "l3-app-irq", l3);
        if (ret)
                dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
 
index a27d751cf219d7215216b41577c19ed4331885c7..3d74f237f005bb3358e8040b9a63e16c2de4c4d0 100644 (file)
@@ -3053,7 +3053,9 @@ static int sysc_remove(struct platform_device *pdev)
 
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
-       reset_control_assert(ddata->rsts);
+
+       if (!reset_control_status(ddata->rsts))
+               reset_control_assert(ddata->rsts);
 
 unprepare:
        sysc_unprepare(ddata);
index 42f13a2d1cc16d96a5a5cd896058400078418485..05ff3b0d233e16507efb326fe0015b4df0eb4cc5 100644 (file)
@@ -730,7 +730,8 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
        struct clk_rate_request parent_req = { };
        struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
        struct clk_hw *xo, *p0, *p1, *p2;
-       unsigned long request, p0_rate;
+       unsigned long p0_rate;
+       u8 mux_div = cgfx->div;
        int ret;
 
        p0 = cgfx->hws[0];
@@ -750,14 +751,15 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
                return 0;
        }
 
-       request = req->rate;
-       if (cgfx->div > 1)
-               parent_req.rate = request = request * cgfx->div;
+       if (mux_div == 0)
+               mux_div = 1;
+
+       parent_req.rate = req->rate * mux_div;
 
        /* This has to be a fixed rate PLL */
        p0_rate = clk_hw_get_rate(p0);
 
-       if (request == p0_rate) {
+       if (parent_req.rate == p0_rate) {
                req->rate = req->best_parent_rate = p0_rate;
                req->best_parent_hw = p0;
                return 0;
@@ -765,7 +767,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
 
        if (req->best_parent_hw == p0) {
                /* Are we going back to a previously used rate? */
-               if (clk_hw_get_rate(p2) == request)
+               if (clk_hw_get_rate(p2) == parent_req.rate)
                        req->best_parent_hw = p2;
                else
                        req->best_parent_hw = p1;
@@ -780,8 +782,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
                return ret;
 
        req->rate = req->best_parent_rate = parent_req.rate;
-       if (cgfx->div > 1)
-               req->rate /= cgfx->div;
+       req->rate /= mux_div;
 
        return 0;
 }
index 91dc390a583b51c8c8712ccb880e2cdc2842d1c1..c623ce900406334ce2767e0136875c1fdca8bad2 100644 (file)
@@ -510,9 +510,12 @@ static const struct clk_rpmh_desc clk_rpmh_sm8350 = {
        .num_clks = ARRAY_SIZE(sm8350_rpmh_clocks),
 };
 
+/* Resource name must match resource id present in cmd-db */
+DEFINE_CLK_RPMH_ARC(sc7280, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 4);
+
 static struct clk_hw *sc7280_rpmh_clocks[] = {
-       [RPMH_CXO_CLK]      = &sdm845_bi_tcxo.hw,
-       [RPMH_CXO_CLK_A]    = &sdm845_bi_tcxo_ao.hw,
+       [RPMH_CXO_CLK]      = &sc7280_bi_tcxo.hw,
+       [RPMH_CXO_CLK_A]    = &sc7280_bi_tcxo_ao.hw,
        [RPMH_LN_BB_CLK2]   = &sdm845_ln_bb_clk2.hw,
        [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
        [RPMH_RF_CLK1]      = &sdm845_rf_clk1.hw,
index 88e896abb6631c3c3b5ba0ef4f28839baa044b6d..da8b627ca156c780dd5d029a8400a8fd0997d866 100644 (file)
@@ -620,7 +620,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
                .name = "gcc_sdcc1_apps_clk_src",
                .parent_data = gcc_parent_data_1,
                .num_parents = 5,
-               .ops = &clk_rcg2_ops,
+               .ops = &clk_rcg2_floor_ops,
        },
 };
 
@@ -642,7 +642,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
                .name = "gcc_sdcc1_ice_core_clk_src",
                .parent_data = gcc_parent_data_0,
                .num_parents = 4,
-               .ops = &clk_rcg2_floor_ops,
+               .ops = &clk_rcg2_ops,
        },
 };
 
index d3f756f7b5a0594c9543d76c902f9f422fd571e1..67e56cf638efb86a3f26c811c1d330a778a8bd47 100644 (file)
@@ -267,7 +267,7 @@ struct freq_attr cpufreq_freq_attr_##_name##_freqs =     \
 __ATTR_RO(_name##_frequencies)
 
 /*
- * show_scaling_available_frequencies - show available normal frequencies for
+ * scaling_available_frequencies_show - show available normal frequencies for
  * the specified CPU
  */
 static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
@@ -279,7 +279,7 @@ cpufreq_attr_available_freq(scaling_available);
 EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
 
 /*
- * show_available_boost_freqs - show available boost frequencies for
+ * scaling_boost_frequencies_show - show available boost frequencies for
  * the specified CPU
  */
 static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
index 49267eb64302d28e010b23d15117d80f68693917..29885febc0b0eb90d36ccabe0a7fe45cf9298c43 100644 (file)
@@ -1007,13 +1007,9 @@ struct amdgpu_device {
 
        /* s3/s4 mask */
        bool                            in_suspend;
-       bool                            in_hibernate;
-
-       /*
-        * The combination flag in_poweroff_reboot_com used to identify the poweroff
-        * and reboot opt in the s0i3 system-wide suspend.
-        */
-       bool                            in_poweroff_reboot_com;
+       bool                            in_s3;
+       bool                            in_s4;
+       bool                            in_s0ix;
 
        atomic_t                        in_gpu_reset;
        enum pp_mp1_state               mp1_state;
index 6447cd6ca5a8eb54841e2c9999c33a1437b02408..8a5a8ff5d36297bb55082ab21dc7c8506b0ffa4b 100644 (file)
@@ -2371,6 +2371,10 @@ static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
                i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
                if (!adev->ip_blocks[i].status.late_initialized)
                        continue;
+               /* skip CG for GFX on S0ix */
+               if (adev->in_s0ix &&
+                   adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+                       continue;
                /* skip CG for VCE/UVD, it's handled specially */
                if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
                    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@@ -2402,6 +2406,10 @@ static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_power
                i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
                if (!adev->ip_blocks[i].status.late_initialized)
                        continue;
+               /* skip PG for GFX on S0ix */
+               if (adev->in_s0ix &&
+                   adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX)
+                       continue;
                /* skip CG for VCE/UVD, it's handled specially */
                if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
                    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
@@ -2678,11 +2686,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
 {
        int i, r;
 
-       if (adev->in_poweroff_reboot_com ||
-           !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
-               amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
-               amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
-       }
+       amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+       amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
 
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
@@ -2722,6 +2727,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
 {
        int i, r;
 
+       if (adev->in_s0ix)
+               amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+
        for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
@@ -2734,6 +2742,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
                        adev->ip_blocks[i].status.hw = false;
                        continue;
                }
+
+               /* skip suspend of gfx and psp for S0ix
+                * gfx is in gfxoff state, so on resume it will exit gfxoff just
+                * like at runtime. PSP is also part of the always on hardware
+                * so no need to suspend it.
+                */
+               if (adev->in_s0ix &&
+                   (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP ||
+                    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))
+                       continue;
+
                /* XXX handle errors */
                r = adev->ip_blocks[i].version->funcs->suspend(adev);
                /* XXX handle errors */
@@ -3673,14 +3692,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
  */
 int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 {
-       struct amdgpu_device *adev;
-       struct drm_crtc *crtc;
-       struct drm_connector *connector;
-       struct drm_connector_list_iter iter;
+       struct amdgpu_device *adev = drm_to_adev(dev);
        int r;
 
-       adev = drm_to_adev(dev);
-
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
@@ -3692,61 +3706,19 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
 
        cancel_delayed_work_sync(&adev->delayed_init_work);
 
-       if (!amdgpu_device_has_dc_support(adev)) {
-               /* turn off display hw */
-               drm_modeset_lock_all(dev);
-               drm_connector_list_iter_begin(dev, &iter);
-               drm_for_each_connector_iter(connector, &iter)
-                       drm_helper_connector_dpms(connector,
-                                                 DRM_MODE_DPMS_OFF);
-               drm_connector_list_iter_end(&iter);
-               drm_modeset_unlock_all(dev);
-                       /* unpin the front buffers and cursors */
-               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-                       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-                       struct drm_framebuffer *fb = crtc->primary->fb;
-                       struct amdgpu_bo *robj;
-
-                       if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
-                               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-                               r = amdgpu_bo_reserve(aobj, true);
-                               if (r == 0) {
-                                       amdgpu_bo_unpin(aobj);
-                                       amdgpu_bo_unreserve(aobj);
-                               }
-                       }
-
-                       if (fb == NULL || fb->obj[0] == NULL) {
-                               continue;
-                       }
-                       robj = gem_to_amdgpu_bo(fb->obj[0]);
-                       /* don't unpin kernel fb objects */
-                       if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
-                               r = amdgpu_bo_reserve(robj, true);
-                               if (r == 0) {
-                                       amdgpu_bo_unpin(robj);
-                                       amdgpu_bo_unreserve(robj);
-                               }
-                       }
-               }
-       }
-
        amdgpu_ras_suspend(adev);
 
        r = amdgpu_device_ip_suspend_phase1(adev);
 
-       amdgpu_amdkfd_suspend(adev, adev->in_runpm);
+       if (!adev->in_s0ix)
+               amdgpu_amdkfd_suspend(adev, adev->in_runpm);
 
        /* evict vram memory */
        amdgpu_bo_evict_vram(adev);
 
        amdgpu_fence_driver_suspend(adev);
 
-       if (adev->in_poweroff_reboot_com ||
-           !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
-               r = amdgpu_device_ip_suspend_phase2(adev);
-       else
-               amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
+       r = amdgpu_device_ip_suspend_phase2(adev);
        /* evict remaining vram memory
         * This second call to evict vram is to evict the gart page table
         * using the CPU.
@@ -3768,16 +3740,13 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
  */
 int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
 {
-       struct drm_connector *connector;
-       struct drm_connector_list_iter iter;
        struct amdgpu_device *adev = drm_to_adev(dev);
-       struct drm_crtc *crtc;
        int r = 0;
 
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
-       if (amdgpu_acpi_is_s0ix_supported(adev))
+       if (adev->in_s0ix)
                amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
 
        /* post card */
@@ -3802,50 +3771,17 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
        queue_delayed_work(system_wq, &adev->delayed_init_work,
                           msecs_to_jiffies(AMDGPU_RESUME_MS));
 
-       if (!amdgpu_device_has_dc_support(adev)) {
-               /* pin cursors */
-               list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-                       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
-                       if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
-                               struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
-                               r = amdgpu_bo_reserve(aobj, true);
-                               if (r == 0) {
-                                       r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
-                                       if (r != 0)
-                                               dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
-                                       amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
-                                       amdgpu_bo_unreserve(aobj);
-                               }
-                       }
-               }
+       if (!adev->in_s0ix) {
+               r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
+               if (r)
+                       return r;
        }
-       r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
-       if (r)
-               return r;
 
        /* Make sure IB tests flushed */
        flush_delayed_work(&adev->delayed_init_work);
 
-       /* blat the mode back in */
-       if (fbcon) {
-               if (!amdgpu_device_has_dc_support(adev)) {
-                       /* pre DCE11 */
-                       drm_helper_resume_force_mode(dev);
-
-                       /* turn on display hw */
-                       drm_modeset_lock_all(dev);
-
-                       drm_connector_list_iter_begin(dev, &iter);
-                       drm_for_each_connector_iter(connector, &iter)
-                               drm_helper_connector_dpms(connector,
-                                                         DRM_MODE_DPMS_ON);
-                       drm_connector_list_iter_end(&iter);
-
-                       drm_modeset_unlock_all(dev);
-               }
+       if (fbcon)
                amdgpu_fbdev_set_suspend(adev, 0);
-       }
 
        drm_kms_helper_poll_enable(dev);
 
index 48cb33e5b3826caac252debfc011dc6b2c91c322..f753e04fee99a8bd04c6664ce2d3dd90c1e12bf3 100644 (file)
@@ -1310,3 +1310,92 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
        return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
                                                  stime, etime, mode);
 }
+
+int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev_to_drm(adev);
+       struct drm_crtc *crtc;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter iter;
+       int r;
+
+       /* turn off display hw */
+       drm_modeset_lock_all(dev);
+       drm_connector_list_iter_begin(dev, &iter);
+       drm_for_each_connector_iter(connector, &iter)
+               drm_helper_connector_dpms(connector,
+                                         DRM_MODE_DPMS_OFF);
+       drm_connector_list_iter_end(&iter);
+       drm_modeset_unlock_all(dev);
+       /* unpin the front buffers and cursors */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+               struct drm_framebuffer *fb = crtc->primary->fb;
+               struct amdgpu_bo *robj;
+
+               if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+                       struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+                       r = amdgpu_bo_reserve(aobj, true);
+                       if (r == 0) {
+                               amdgpu_bo_unpin(aobj);
+                               amdgpu_bo_unreserve(aobj);
+                       }
+               }
+
+               if (fb == NULL || fb->obj[0] == NULL) {
+                       continue;
+               }
+               robj = gem_to_amdgpu_bo(fb->obj[0]);
+               /* don't unpin kernel fb objects */
+               if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
+                       r = amdgpu_bo_reserve(robj, true);
+                       if (r == 0) {
+                               amdgpu_bo_unpin(robj);
+                               amdgpu_bo_unreserve(robj);
+                       }
+               }
+       }
+       return r;
+}
+
+int amdgpu_display_resume_helper(struct amdgpu_device *adev)
+{
+       struct drm_device *dev = adev_to_drm(adev);
+       struct drm_connector *connector;
+       struct drm_connector_list_iter iter;
+       struct drm_crtc *crtc;
+       int r;
+
+       /* pin cursors */
+       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+               if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
+                       struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+                       r = amdgpu_bo_reserve(aobj, true);
+                       if (r == 0) {
+                               r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+                               if (r != 0)
+                                       dev_err(adev->dev, "Failed to pin cursor BO (%d)\n", r);
+                               amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+                               amdgpu_bo_unreserve(aobj);
+                       }
+               }
+       }
+
+       drm_helper_resume_force_mode(dev);
+
+       /* turn on display hw */
+       drm_modeset_lock_all(dev);
+
+       drm_connector_list_iter_begin(dev, &iter);
+       drm_for_each_connector_iter(connector, &iter)
+               drm_helper_connector_dpms(connector,
+                                         DRM_MODE_DPMS_ON);
+       drm_connector_list_iter_end(&iter);
+
+       drm_modeset_unlock_all(dev);
+
+       return 0;
+}
+
index dc7b7d1165493c249fa7cb55b6d42d70ade8b0ed..7b6d83e2b13ca4bbedee70a12b74a56f087ce647 100644 (file)
@@ -47,4 +47,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
 const struct drm_format_info *
 amdgpu_lookup_format_info(u32 format, uint64_t modifier);
 
+int amdgpu_display_suspend_helper(struct amdgpu_device *adev);
+int amdgpu_display_resume_helper(struct amdgpu_device *adev);
+
 #endif
index b26e2fd1c538433899a2f9ec8249317c8543e258..e92e7dea71da1792e2e77113a64148245f50dc00 100644 (file)
@@ -1107,6 +1107,7 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+       {0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
 
        /* Van Gogh */
@@ -1274,24 +1275,35 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
         */
        if (!amdgpu_passthrough(adev))
                adev->mp1_state = PP_MP1_STATE_UNLOAD;
-       adev->in_poweroff_reboot_com = true;
        amdgpu_device_ip_suspend(adev);
-       adev->in_poweroff_reboot_com = false;
        adev->mp1_state = PP_MP1_STATE_NONE;
 }
 
 static int amdgpu_pmops_suspend(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(drm_dev);
+       int r;
 
-       return amdgpu_device_suspend(drm_dev, true);
+       if (amdgpu_acpi_is_s0ix_supported(adev))
+               adev->in_s0ix = true;
+       adev->in_s3 = true;
+       r = amdgpu_device_suspend(drm_dev, true);
+       adev->in_s3 = false;
+
+       return r;
 }
 
 static int amdgpu_pmops_resume(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(drm_dev);
+       int r;
 
-       return amdgpu_device_resume(drm_dev, true);
+       r = amdgpu_device_resume(drm_dev, true);
+       if (amdgpu_acpi_is_s0ix_supported(adev))
+               adev->in_s0ix = false;
+       return r;
 }
 
 static int amdgpu_pmops_freeze(struct device *dev)
@@ -1300,9 +1312,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
        struct amdgpu_device *adev = drm_to_adev(drm_dev);
        int r;
 
-       adev->in_hibernate = true;
+       adev->in_s4 = true;
        r = amdgpu_device_suspend(drm_dev, true);
-       adev->in_hibernate = false;
+       adev->in_s4 = false;
        if (r)
                return r;
        return amdgpu_asic_reset(adev);
@@ -1318,13 +1330,8 @@ static int amdgpu_pmops_thaw(struct device *dev)
 static int amdgpu_pmops_poweroff(struct device *dev)
 {
        struct drm_device *drm_dev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(drm_dev);
-       int r;
 
-       adev->in_poweroff_reboot_com = true;
-       r =  amdgpu_device_suspend(drm_dev, true);
-       adev->in_poweroff_reboot_com = false;
-       return r;
+       return amdgpu_device_suspend(drm_dev, true);
 }
 
 static int amdgpu_pmops_restore(struct device *dev)
index 64beb33996042647143539bace349d026fbf90dc..a4e2cf7cada1e4e406768aff2903640c4f0922c4 100644 (file)
@@ -778,9 +778,9 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                        dev_info->high_va_offset = AMDGPU_GMC_HOLE_END;
                        dev_info->high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
                }
-               dev_info->virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
+               dev_info->virtual_address_alignment = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
                dev_info->pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
-               dev_info->gart_page_size = AMDGPU_GPU_PAGE_SIZE;
+               dev_info->gart_page_size = max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
                dev_info->cu_active_number = adev->gfx.cu_info.number;
                dev_info->cu_ao_mask = adev->gfx.cu_info.ao_cu_mask;
                dev_info->ce_ram_size = adev->gfx.ce_ram_size;
index 4b29b820544281cc3f1ab3c717866e6c0ea66b00..072050429a2fdeed279c17329dd7ac8ea9d6ebde 100644 (file)
@@ -1028,13 +1028,10 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
 {
        struct ttm_resource_manager *man;
 
-       /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
-#ifndef CONFIG_HIBERNATION
-       if (adev->flags & AMD_IS_APU) {
-               /* Useless to evict on IGP chips */
+       if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
+               /* No need to evict vram on APUs for suspend to ram */
                return 0;
        }
-#endif
 
        man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
        return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
index ad91c0c3c42357cd3499df51286a77e86c861cf0..7d2c8b1698279cddb8e480401684ea62bf58e848 100644 (file)
@@ -2197,8 +2197,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
        uint64_t eaddr;
 
        /* validate the parameters */
-       if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
-           size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+           size == 0 || size & ~PAGE_MASK)
                return -EINVAL;
 
        /* make sure object fit at this offset */
@@ -2263,8 +2263,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
        int r;
 
        /* validate the parameters */
-       if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
-           size == 0 || size & AMDGPU_GPU_PAGE_MASK)
+       if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
+           size == 0 || size & ~PAGE_MASK)
                return -EINVAL;
 
        /* make sure object fit at this offset */
@@ -2409,7 +2409,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
                        after->start = eaddr + 1;
                        after->last = tmp->last;
                        after->offset = tmp->offset;
-                       after->offset += after->start - tmp->start;
+                       after->offset += (after->start - tmp->start) << PAGE_SHIFT;
                        after->flags = tmp->flags;
                        after->bo_va = tmp->bo_va;
                        list_add(&after->list, &tmp->bo_va->invalids);
index 7944781e1086bee1b9e381007d598fa4400f75ef..19abb740a1693000d05e3032373d0ac55fa92a9c 100644 (file)
@@ -2897,6 +2897,11 @@ static int dce_v10_0_hw_fini(void *handle)
 static int dce_v10_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
 
        adev->mode_info.bl_level =
                amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -2921,8 +2926,10 @@ static int dce_v10_0_resume(void *handle)
                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
                                                    bl_level);
        }
+       if (ret)
+               return ret;
 
-       return ret;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_v10_0_is_idle(void *handle)
index 1b6ff04700118dd30d3bdf64f7ca2df697f21221..320ec35bfd371cb4d166804059467365e0c183ca 100644 (file)
@@ -3027,6 +3027,11 @@ static int dce_v11_0_hw_fini(void *handle)
 static int dce_v11_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
 
        adev->mode_info.bl_level =
                amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -3051,8 +3056,10 @@ static int dce_v11_0_resume(void *handle)
                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
                                                    bl_level);
        }
+       if (ret)
+               return ret;
 
-       return ret;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_v11_0_is_idle(void *handle)
index 83a88385b7620d1c56762980ce96e84bbca3f4c8..13322000ebd67c22c0a46fe70a5f15ab104d1f98 100644 (file)
@@ -2770,7 +2770,11 @@ static int dce_v6_0_hw_fini(void *handle)
 static int dce_v6_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
 
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
        adev->mode_info.bl_level =
                amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
 
@@ -2794,8 +2798,10 @@ static int dce_v6_0_resume(void *handle)
                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
                                                    bl_level);
        }
+       if (ret)
+               return ret;
 
-       return ret;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_v6_0_is_idle(void *handle)
index 224b30214427f8df2ff0920718d891381d958a1c..04ebf02e5b8c6b219503d3d150b1edaf36d03c1a 100644 (file)
@@ -2796,6 +2796,11 @@ static int dce_v8_0_hw_fini(void *handle)
 static int dce_v8_0_suspend(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
 
        adev->mode_info.bl_level =
                amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
@@ -2820,8 +2825,10 @@ static int dce_v8_0_resume(void *handle)
                amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder,
                                                    bl_level);
        }
+       if (ret)
+               return ret;
 
-       return ret;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_v8_0_is_idle(void *handle)
index 9810af712cc0d1d01164b1255a0db093fa33e6f9..5c11144da0513dc4a92064e6d50e3ad8bb7d95b2 100644 (file)
@@ -39,6 +39,7 @@
 #include "dce_v11_0.h"
 #include "dce_virtual.h"
 #include "ivsrcid/ivsrcid_vislands30.h"
+#include "amdgpu_display.h"
 
 #define DCE_VIRTUAL_VBLANK_PERIOD 16666666
 
@@ -491,12 +492,24 @@ static int dce_virtual_hw_fini(void *handle)
 
 static int dce_virtual_suspend(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_display_suspend_helper(adev);
+       if (r)
+               return r;
        return dce_virtual_hw_fini(handle);
 }
 
 static int dce_virtual_resume(void *handle)
 {
-       return dce_virtual_hw_init(handle);
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = dce_virtual_hw_init(handle);
+       if (r)
+               return r;
+       return amdgpu_display_resume_helper(adev);
 }
 
 static bool dce_virtual_is_idle(void *handle)
index b258a3dae767f83ee1cd07c1c189ca82c371ba1f..159add0f5aaae26fe0a7c8824d93f0a1127a5808 100644 (file)
@@ -155,7 +155,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
 
        /* Wait till CP writes sync code: */
        status = amdkfd_fence_wait_timeout(
-                       (unsigned int *) rm_state,
+                       rm_state,
                        QUEUESTATE__ACTIVE, 1500);
 
        kfd_gtt_sa_free(dbgdev->dev, mem_obj);
index e686ce2bf3b3cd032ff7cb90df15924d66295523..4598a9a58125114e2124e1a3a82650b2c9bf7b1c 100644 (file)
@@ -1167,7 +1167,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
        if (retval)
                goto fail_allocate_vidmem;
 
-       dqm->fence_addr = dqm->fence_mem->cpu_ptr;
+       dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
        dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
 
        init_interrupts(dqm);
@@ -1340,8 +1340,8 @@ out:
        return retval;
 }
 
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
-                               unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+                               uint64_t fence_value,
                                unsigned int timeout_ms)
 {
        unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
index 7351dd195274e9b15d18be51e0f85378f63b2c3e..45f81594655441e3ddded9617efb232144fcb794 100644 (file)
@@ -192,7 +192,7 @@ struct device_queue_manager {
        uint16_t                vmid_pasid[VMID_NUM];
        uint64_t                pipelines_addr;
        uint64_t                fence_gpu_addr;
-       unsigned int            *fence_addr;
+       uint64_t                *fence_addr;
        struct kfd_mem_obj      *fence_mem;
        bool                    active_runlist;
        int                     sched_policy;
index 5d541e0cc8ca277754db8fa522eeab0c74dc6962..f71a7fa6680c8b50ef6f72a2a79d4c1a8007f3cd 100644 (file)
@@ -347,7 +347,7 @@ fail_create_runlist_ib:
 }
 
 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
-                       uint32_t fence_value)
+                       uint64_t fence_value)
 {
        uint32_t *buffer, size;
        int retval = 0;
index dfaf771a42e66e47800f0cbb55ea4dca460a09a8..e3ba0cd3b6fa717966e179acf6b2e36e621ddc21 100644 (file)
@@ -283,7 +283,7 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
 }
 
 static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value)
+                       uint64_t fence_address, uint64_t fence_value)
 {
        struct pm4_mes_query_status *packet;
 
index a852e0d7d804fdba2053808ff6520d860deb4d6f..08442e7d9944074d8da030261610a24d01f544c2 100644 (file)
@@ -263,7 +263,7 @@ static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
 }
 
 static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value)
+                       uint64_t fence_address, uint64_t fence_value)
 {
        struct pm4_mes_query_status *packet;
 
index 09599efa41fc918095549e0d5dc44202e00aa24c..f304d1f8df5f5c18e40dd8c17d1c64ab470b2c38 100644 (file)
@@ -1003,8 +1003,8 @@ int pqm_get_wave_state(struct process_queue_manager *pqm,
                       u32 *ctl_stack_used_size,
                       u32 *save_area_used_size);
 
-int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
-                             unsigned int fence_value,
+int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
+                             uint64_t fence_value,
                              unsigned int timeout_ms);
 
 /* Packet Manager */
@@ -1040,7 +1040,7 @@ struct packet_manager_funcs {
                        uint32_t filter_param, bool reset,
                        unsigned int sdma_engine);
        int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
-                       uint64_t fence_address, uint32_t fence_value);
+                       uint64_t fence_address, uint64_t fence_value);
        int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
 
        /* Packet sizes */
@@ -1062,7 +1062,7 @@ int pm_send_set_resources(struct packet_manager *pm,
                                struct scheduling_resources *res);
 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
-                               uint32_t fence_value);
+                               uint64_t fence_value);
 
 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
                        enum kfd_unmap_queues_filter mode,
index fa013496e26baf8ae1aa817d6c9fcf389633420b..2f9bfaeaba8d612382fd990591aebe0e23ad6435 100644 (file)
@@ -341,8 +341,7 @@ void enc2_hw_init(struct link_encoder *enc)
        } else {
                AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110);
 
-               AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d);
-
+               AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a);
        }
 
        //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32;
index a2681fe875ed6fb4a7242e5e3d36356f7e012d2e..d0ec83881fc59e7775a55ef062c91b221bbc0da8 100644 (file)
@@ -587,6 +587,48 @@ static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
                        tmp, MC_CG_ARB_FREQ_F0);
 }
 
+static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+       uint16_t pcie_gen = 0;
+
+       if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 &&
+           adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4)
+               pcie_gen = 3;
+       else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 &&
+               adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
+               pcie_gen = 2;
+       else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 &&
+               adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2)
+               pcie_gen = 1;
+       else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 &&
+               adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1)
+               pcie_gen = 0;
+
+       return pcie_gen;
+}
+
+static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+       uint16_t pcie_width = 0;
+
+       if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+               pcie_width = 16;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+               pcie_width = 12;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+               pcie_width = 8;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+               pcie_width = 4;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+               pcie_width = 2;
+       else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+               pcie_width = 1;
+
+       return pcie_width;
+}
+
 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
 {
        struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
@@ -683,6 +725,11 @@ static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
                                        PP_Min_PCIEGen),
                        get_pcie_lane_support(data->pcie_lane_cap,
                                        PP_Max_PCIELane));
+
+               if (data->pcie_dpm_key_disabled)
+                       phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
+                               data->dpm_table.pcie_speed_table.count,
+                               smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr));
        }
        return 0;
 }
@@ -1248,6 +1295,13 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
                                                NULL)),
                                "Failed to enable pcie DPM during DPM Start Function!",
                                return -EINVAL);
+       } else {
+               PP_ASSERT_WITH_CODE(
+                               (0 == smum_send_msg_to_smc(hwmgr,
+                                               PPSMC_MSG_PCIeDPM_Disable,
+                                               NULL)),
+                               "Failed to disble pcie DPM during DPM Start Function!",
+                               return -EINVAL);
        }
 
        if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -3276,7 +3330,8 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
 
        disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
                                                !hwmgr->display_config->multi_monitor_in_sync) ||
-                                               smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
+                                               (hwmgr->display_config->num_display &&
+                                               smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
 
        disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
                                         disable_mclk_switching_for_display;
index 22b636e2b89bed7b7e08ec369e7daf298d318bca..599ec972660187971b82e734559ff8904c1bfec0 100644 (file)
@@ -54,6 +54,9 @@
 #include "smuio/smuio_9_0_offset.h"
 #include "smuio/smuio_9_0_sh_mask.h"
 
+#define smnPCIE_LC_SPEED_CNTL                  0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL             0x11140288
+
 #define HBM_MEMORY_CHANNEL_WIDTH    128
 
 static const uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
@@ -443,8 +446,7 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        if (PP_CAP(PHM_PlatformCaps_VCEDPM))
                data->smu_features[GNLD_DPM_VCE].supported = true;
 
-       if (!data->registry_data.pcie_dpm_key_disabled)
-               data->smu_features[GNLD_DPM_LINK].supported = true;
+       data->smu_features[GNLD_DPM_LINK].supported = true;
 
        if (!data->registry_data.dcefclk_dpm_key_disabled)
                data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
@@ -1544,6 +1546,13 @@ static int vega10_override_pcie_parameters(struct pp_hwmgr *hwmgr)
                        pp_table->PcieLaneCount[i] = pcie_width;
        }
 
+       if (data->registry_data.pcie_dpm_key_disabled) {
+               for (i = 0; i < NUM_LINK_LEVELS; i++) {
+                       pp_table->PcieGenSpeed[i] = pcie_gen;
+                       pp_table->PcieLaneCount[i] = pcie_width;
+               }
+       }
+
        return 0;
 }
 
@@ -2966,6 +2975,14 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
                }
        }
 
+       if (data->registry_data.pcie_dpm_key_disabled) {
+               PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr,
+                               false, data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap),
+               "Attempt to Disable Link DPM feature Failed!", return -EINVAL);
+               data->smu_features[GNLD_DPM_LINK].enabled = false;
+               data->smu_features[GNLD_DPM_LINK].supported = false;
+       }
+
        return 0;
 }
 
@@ -4584,6 +4601,24 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
        return 0;
 }
 
+static int vega10_get_current_pcie_link_width_level(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+
+       return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+               PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+               >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+}
+
+static int vega10_get_current_pcie_link_speed_level(struct pp_hwmgr *hwmgr)
+{
+       struct amdgpu_device *adev = hwmgr->adev;
+
+       return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+               PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+               >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+}
+
 static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                enum pp_clock_type type, char *buf)
 {
@@ -4592,8 +4627,9 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
        struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
        struct vega10_single_dpm_table *soc_table = &(data->dpm_table.soc_table);
        struct vega10_single_dpm_table *dcef_table = &(data->dpm_table.dcef_table);
-       struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
        struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL;
+       uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
+       PPTable_t *pptable = &(data->smc_state_table.pp_table);
 
        int i, now, size = 0, count = 0;
 
@@ -4650,15 +4686,31 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
                                        "*" : "");
                break;
        case PP_PCIE:
-               smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now);
-
-               for (i = 0; i < pcie_table->count; i++)
-                       size += sprintf(buf + size, "%d: %s %s\n", i,
-                                       (pcie_table->pcie_gen[i] == 0) ? "2.5GT/s, x1" :
-                                       (pcie_table->pcie_gen[i] == 1) ? "5.0GT/s, x16" :
-                                       (pcie_table->pcie_gen[i] == 2) ? "8.0GT/s, x16" : "",
-                                       (i == now) ? "*" : "");
+               current_gen_speed =
+                       vega10_get_current_pcie_link_speed_level(hwmgr);
+               current_lane_width =
+                       vega10_get_current_pcie_link_width_level(hwmgr);
+               for (i = 0; i < NUM_LINK_LEVELS; i++) {
+                       gen_speed = pptable->PcieGenSpeed[i];
+                       lane_width = pptable->PcieLaneCount[i];
+
+                       size += sprintf(buf + size, "%d: %s %s %s\n", i,
+                                       (gen_speed == 0) ? "2.5GT/s," :
+                                       (gen_speed == 1) ? "5.0GT/s," :
+                                       (gen_speed == 2) ? "8.0GT/s," :
+                                       (gen_speed == 3) ? "16.0GT/s," : "",
+                                       (lane_width == 1) ? "x1" :
+                                       (lane_width == 2) ? "x2" :
+                                       (lane_width == 3) ? "x4" :
+                                       (lane_width == 4) ? "x8" :
+                                       (lane_width == 5) ? "x12" :
+                                       (lane_width == 6) ? "x16" : "",
+                                       (current_gen_speed == gen_speed) &&
+                                       (current_lane_width == lane_width) ?
+                                       "*" : "");
+               }
                break;
+
        case OD_SCLK:
                if (hwmgr->od_enabled) {
                        size = sprintf(buf, "%s:\n", "OD_SCLK");
index 43e01d880f7ce8bdc1313ef1c0cfb73499238fb5..4f6da11e8f10687aa5f5a5e2efeab3152dd23e94 100644 (file)
@@ -133,6 +133,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
        data->registry_data.auto_wattman_debug = 0;
        data->registry_data.auto_wattman_sample_period = 100;
        data->registry_data.auto_wattman_threshold = 50;
+       data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
 }
 
 static int vega12_set_features_platform_caps(struct pp_hwmgr *hwmgr)
@@ -539,6 +540,29 @@ static int vega12_override_pcie_parameters(struct pp_hwmgr *hwmgr)
                pp_table->PcieLaneCount[i] = pcie_width_arg;
        }
 
+       /* override to the highest if it's disabled from ppfeaturmask */
+       if (data->registry_data.pcie_dpm_key_disabled) {
+               for (i = 0; i < NUM_LINK_LEVELS; i++) {
+                       smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
+                       ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+                               NULL);
+                       PP_ASSERT_WITH_CODE(!ret,
+                               "[OverridePcieParameters] Attempt to override pcie params failed!",
+                               return ret);
+
+                       pp_table->PcieGenSpeed[i] = pcie_gen;
+                       pp_table->PcieLaneCount[i] = pcie_width;
+               }
+               ret = vega12_enable_smc_features(hwmgr,
+                               false,
+                               data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
+               PP_ASSERT_WITH_CODE(!ret,
+                               "Attempt to Disable DPM LINK Failed!",
+                               return ret);
+               data->smu_features[GNLD_DPM_LINK].enabled = false;
+               data->smu_features[GNLD_DPM_LINK].supported = false;
+       }
        return 0;
 }
 
index f19964c69a0027aaef02c5419a98d490553aeb45..b6ee3a285c9d9198acbda2b3b51d1e554b9e3e8b 100644 (file)
@@ -171,6 +171,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
        data->registry_data.gfxoff_controlled_by_driver = 1;
        data->gfxoff_allowed = false;
        data->counter_gfxoff = 0;
+       data->registry_data.pcie_dpm_key_disabled = !(hwmgr->feature_mask & PP_PCIE_DPM_MASK);
 }
 
 static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr)
@@ -884,6 +885,30 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
                pp_table->PcieLaneCount[i] = pcie_width_arg;
        }
 
+       /* override to the highest if it's disabled from ppfeaturmask */
+       if (data->registry_data.pcie_dpm_key_disabled) {
+               for (i = 0; i < NUM_LINK_LEVELS; i++) {
+                       smu_pcie_arg = (i << 16) | (pcie_gen << 8) | pcie_width;
+                       ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                               PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+                               NULL);
+                       PP_ASSERT_WITH_CODE(!ret,
+                               "[OverridePcieParameters] Attempt to override pcie params failed!",
+                               return ret);
+
+                       pp_table->PcieGenSpeed[i] = pcie_gen;
+                       pp_table->PcieLaneCount[i] = pcie_width;
+               }
+               ret = vega20_enable_smc_features(hwmgr,
+                               false,
+                               data->smu_features[GNLD_DPM_LINK].smu_feature_bitmap);
+               PP_ASSERT_WITH_CODE(!ret,
+                               "Attempt to Disable DPM LINK Failed!",
+                               return ret);
+               data->smu_features[GNLD_DPM_LINK].enabled = false;
+               data->smu_features[GNLD_DPM_LINK].supported = false;
+       }
+
        return 0;
 }
 
index d143ef1b460bf6f887d0aaf49c97150f8a604445..cd905e41080e5332c8c7b638c397dbef3b928ff2 100644 (file)
@@ -1294,7 +1294,7 @@ static int smu_disable_dpms(struct smu_context *smu)
        bool use_baco = !smu->is_apu &&
                ((amdgpu_in_reset(adev) &&
                  (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
-                ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
+                ((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
 
        /*
         * For custom pptable uploading, skip the DPM features
@@ -1431,7 +1431,8 @@ static int smu_suspend(void *handle)
 
        smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
 
-       if (smu->is_apu)
+       /* skip CGPG when in S0ix */
+       if (smu->is_apu && !adev->in_s0ix)
                smu_set_gfx_cgpg(&adev->smu, false);
 
        return 0;
index 7ddbaecb11c2dcd83b20b81101882b7ec794c1b7..101eaa20db9bd2b53657bf26a1c621fa9c1ea577 100644 (file)
@@ -384,10 +384,15 @@ static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 
 static bool vangogh_is_dpm_running(struct smu_context *smu)
 {
+       struct amdgpu_device *adev = smu->adev;
        int ret = 0;
        uint32_t feature_mask[2];
        uint64_t feature_enabled;
 
+       /* we need to re-init after suspend so return false */
+       if (adev->in_suspend)
+               return false;
+
        ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
 
        if (ret)
index 6d38c5c17f23eb8522e4d8558af56b19be66da80..db69f19ab5bcad8790d4f35760e3d019dfff85e0 100644 (file)
@@ -689,7 +689,8 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
                struct page **pages = pvec + pinned;
 
                ret = pin_user_pages_fast(ptr, num_pages,
-                                         !userptr->ro ? FOLL_WRITE : 0, pages);
+                                         FOLL_WRITE | FOLL_FORCE | FOLL_LONGTERM,
+                                         pages);
                if (ret < 0) {
                        unpin_user_pages(pvec, pinned);
                        kvfree(pvec);
index 1f79bc2a881e1f3889e5161ef14fbba33d1764be..1510e4e2973ccaa09079c896a4502c3f2600cc66 100644 (file)
@@ -13,7 +13,6 @@
 #include <linux/irq.h>
 #include <linux/mfd/syscon.h>
 #include <linux/of_device.h>
-#include <linux/of_gpio.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/regmap.h>
index 4683f98f7e543ab79a5699c1cbe2055693727ee9..c3f2962aa1ebc5507702e7bebafb7c7fa5095b63 100644 (file)
@@ -317,12 +317,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
        if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc)
                return 0;
 
-       new_crtc_state->enabled_planes |= BIT(plane->id);
-
        ret = plane->check_plane(new_crtc_state, new_plane_state);
        if (ret)
                return ret;
 
+       if (fb)
+               new_crtc_state->enabled_planes |= BIT(plane->id);
+
        /* FIXME pre-g4x don't work like this */
        if (new_plane_state->uapi.visible)
                new_crtc_state->active_planes |= BIT(plane->id);
index 8c12d5375607a57cd2755dade7c18b7381bfa82f..775d89b6c3fc4aa7f39ebd0194e97216e5dbfe39 100644 (file)
@@ -3619,9 +3619,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
 {
        int ret;
 
-       intel_dp_lttpr_init(intel_dp);
-
-       if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd))
+       if (intel_dp_init_lttpr_and_dprx_caps(intel_dp) < 0)
                return false;
 
        /*
index eaebf123310a437de066e0f6f6f66ec20d82452a..10fe17b7280dbf2d955ce234ae21266c1fef3140 100644 (file)
@@ -133,6 +133,7 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
        else
                precharge = 5;
 
+       /* Max timeout value on G4x-BDW: 1.6ms */
        if (IS_BROADWELL(dev_priv))
                timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
        else
@@ -159,6 +160,12 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
        enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
        u32 ret;
 
+       /*
+        * Max timeout values:
+        * SKL-GLK: 1.6ms
+        * CNL: 3.2ms
+        * ICL+: 4ms
+        */
        ret = DP_AUX_CH_CTL_SEND_BUSY |
              DP_AUX_CH_CTL_DONE |
              DP_AUX_CH_CTL_INTERRUPT |
index 892d7db7d94f699e996d27bc3fc552818935a1e7..be6ac0dd846e8acf046defa4f7e66d285adc0f50 100644 (file)
@@ -34,6 +34,11 @@ intel_dp_dump_link_status(const u8 link_status[DP_LINK_STATUS_SIZE])
                      link_status[3], link_status[4], link_status[5]);
 }
 
+static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
+{
+       memset(&intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps));
+}
+
 static void intel_dp_reset_lttpr_count(struct intel_dp *intel_dp)
 {
        intel_dp->lttpr_common_caps[DP_PHY_REPEATER_CNT -
@@ -81,19 +86,36 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
 
 static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
 {
-       if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
-                                         intel_dp->lttpr_common_caps) < 0) {
-               memset(intel_dp->lttpr_common_caps, 0,
-                      sizeof(intel_dp->lttpr_common_caps));
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+       if (intel_dp_is_edp(intel_dp))
                return false;
-       }
+
+       /*
+        * Detecting LTTPRs must be avoided on platforms with an AUX timeout
+        * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
+        */
+       if (INTEL_GEN(i915) < 10)
+               return false;
+
+       if (drm_dp_read_lttpr_common_caps(&intel_dp->aux,
+                                         intel_dp->lttpr_common_caps) < 0)
+               goto reset_caps;
 
        drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
                    "LTTPR common capabilities: %*ph\n",
                    (int)sizeof(intel_dp->lttpr_common_caps),
                    intel_dp->lttpr_common_caps);
 
+       /* The minimum value of LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV is 1.4 */
+       if (intel_dp->lttpr_common_caps[0] < 0x14)
+               goto reset_caps;
+
        return true;
+
+reset_caps:
+       intel_dp_reset_lttpr_common_caps(intel_dp);
+       return false;
 }
 
 static bool
@@ -106,33 +128,49 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
 }
 
 /**
- * intel_dp_lttpr_init - detect LTTPRs and init the LTTPR link training mode
+ * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
  * @intel_dp: Intel DP struct
  *
- * Read the LTTPR common capabilities, switch to non-transparent link training
- * mode if any is detected and read the PHY capabilities for all detected
- * LTTPRs. In case of an LTTPR detection error or if the number of
+ * Read the LTTPR common and DPRX capabilities and switch to non-transparent
+ * link training mode if any is detected and read the PHY capabilities for all
+ * detected LTTPRs. In case of an LTTPR detection error or if the number of
  * LTTPRs is more than is supported (8), fall back to the no-LTTPR,
  * transparent mode link training mode.
  *
  * Returns:
- *   >0  if LTTPRs were detected and the non-transparent LT mode was set
+ *   >0  if LTTPRs were detected and the non-transparent LT mode was set. The
+ *       DPRX capabilities are read out.
  *    0  if no LTTPRs or more than 8 LTTPRs were detected or in case of a
- *       detection failure and the transparent LT mode was set
+ *       detection failure and the transparent LT mode was set. The DPRX
+ *       capabilities are read out.
+ *   <0  Reading out the DPRX capabilities failed.
  */
-int intel_dp_lttpr_init(struct intel_dp *intel_dp)
+int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
 {
        int lttpr_count;
        bool ret;
        int i;
 
-       if (intel_dp_is_edp(intel_dp))
-               return 0;
-
        ret = intel_dp_read_lttpr_common_caps(intel_dp);
+
+       /* The DPTX shall read the DPRX caps after LTTPR detection. */
+       if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd)) {
+               intel_dp_reset_lttpr_common_caps(intel_dp);
+               return -EIO;
+       }
+
        if (!ret)
                return 0;
 
+       /*
+        * The 0xF0000-0xF02FF range is only valid if the DPCD revision is
+        * at least 1.4.
+        */
+       if (intel_dp->dpcd[DP_DPCD_REV] < 0x14) {
+               intel_dp_reset_lttpr_common_caps(intel_dp);
+               return 0;
+       }
+
        lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
        /*
         * Prevent setting LTTPR transparent mode explicitly if no LTTPRs are
@@ -172,7 +210,7 @@ int intel_dp_lttpr_init(struct intel_dp *intel_dp)
 
        return lttpr_count;
 }
-EXPORT_SYMBOL(intel_dp_lttpr_init);
+EXPORT_SYMBOL(intel_dp_init_lttpr_and_dprx_caps);
 
 static u8 dp_voltage_max(u8 preemph)
 {
@@ -807,7 +845,10 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp,
         * TODO: Reiniting LTTPRs here won't be needed once proper connector
         * HW state readout is added.
         */
-       int lttpr_count = intel_dp_lttpr_init(intel_dp);
+       int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp);
+
+       if (lttpr_count < 0)
+               return;
 
        if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count))
                intel_dp_schedule_fallback_link_training(intel_dp, crtc_state);
index 6a1f76bd8c7586a6f40fea2e4b8b61cb30e97225..9cb7c28027f0cc5ee3005f921b2c6fcc6764df39 100644 (file)
@@ -11,7 +11,7 @@
 struct intel_crtc_state;
 struct intel_dp;
 
-int intel_dp_lttpr_init(struct intel_dp *intel_dp);
+int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp);
 
 void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
                               const struct intel_crtc_state *crtc_state,
index f58cc5700784e2dd46bcbe73fa30ef49909b8bf3..a86c57d117f2b6a3568ea3718b5cecb902df6e74 100644 (file)
@@ -1014,20 +1014,14 @@ static i915_reg_t dss_ctl1_reg(const struct intel_crtc_state *crtc_state)
 {
        enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
 
-       if (crtc_state->cpu_transcoder == TRANSCODER_EDP)
-               return DSS_CTL1;
-
-       return ICL_PIPE_DSS_CTL1(pipe);
+       return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL1(pipe) : DSS_CTL1;
 }
 
 static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state)
 {
        enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
 
-       if (crtc_state->cpu_transcoder == TRANSCODER_EDP)
-               return DSS_CTL2;
-
-       return ICL_PIPE_DSS_CTL2(pipe);
+       return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL2(pipe) : DSS_CTL2;
 }
 
 void intel_dsc_enable(struct intel_encoder *encoder,
index a357bb4318152ba61c4be3309c49e315a665aacf..67de2b1895989d54a9c1612742cfd3ef459019d3 100644 (file)
@@ -316,7 +316,18 @@ void i915_vma_revoke_fence(struct i915_vma *vma)
        WRITE_ONCE(fence->vma, NULL);
        vma->fence = NULL;
 
-       with_intel_runtime_pm_if_in_use(fence_to_uncore(fence)->rpm, wakeref)
+       /*
+        * Skip the write to HW if and only if the device is currently
+        * suspended.
+        *
+        * If the driver does not currently hold a wakeref (if_in_use == 0),
+        * the device may currently be runtime suspended, or it may be woken
+        * up before the suspend takes place. If the device is not suspended
+        * (powered down) and we skip clearing the fence register, the HW is
+        * left in an undefined state where we may end up with multiple
+        * registers overlapping.
+        */
+       with_intel_runtime_pm_if_active(fence_to_uncore(fence)->rpm, wakeref)
                fence_write(fence);
 }
 
index 153ca9e65382ebb8690b058a8263d23080f2227f..8b725efb2254c7d609bb346d34396fd12cd9e4fa 100644 (file)
@@ -412,12 +412,20 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
 }
 
 /**
- * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
+ * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active
  * @rpm: the intel_runtime_pm structure
+ * @ignore_usecount: get a ref even if dev->power.usage_count is 0
  *
  * This function grabs a device-level runtime pm reference if the device is
- * already in use and ensures that it is powered up. It is illegal to try
- * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
+ * already active and ensures that it is powered up. It is illegal to try
+ * and access the HW should intel_runtime_pm_get_if_active() report failure.
+ *
+ * If @ignore_usecount=true, a reference will be acquired even if there is no
+ * user requiring the device to be powered up (dev->power.usage_count == 0).
+ * If the function returns false in this case then it's guaranteed that the
+ * device's runtime suspend hook has been called already or that it will be
+ * called (and hence it's also guaranteed that the device's runtime resume
+ * hook will be called eventually).
  *
  * Any runtime pm reference obtained by this function must have a symmetric
  * call to intel_runtime_pm_put() to release the reference again.
@@ -425,7 +433,8 @@ intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
  * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
  * as True if the wakeref was acquired, or False otherwise.
  */
-intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
+static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
+                                                       bool ignore_usecount)
 {
        if (IS_ENABLED(CONFIG_PM)) {
                /*
@@ -434,7 +443,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
                 * function, since the power state is undefined. This applies
                 * atm to the late/early system suspend/resume handlers.
                 */
-               if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
+               if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0)
                        return 0;
        }
 
@@ -443,6 +452,16 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
        return track_intel_runtime_pm_wakeref(rpm);
 }
 
+intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
+{
+       return __intel_runtime_pm_get_if_active(rpm, false);
+}
+
+intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
+{
+       return __intel_runtime_pm_get_if_active(rpm, true);
+}
+
 /**
  * intel_runtime_pm_get_noresume - grab a runtime pm reference
  * @rpm: the intel_runtime_pm structure
index ae64ff14c6425f6f2bedc5bb2225e521627db7fc..1e4ddd11c12bb0008bd5c6538a23b3bdd78e4e34 100644 (file)
@@ -177,6 +177,7 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
 
 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
+intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm);
 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
 
@@ -188,6 +189,10 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
        for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
             intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
 
+#define with_intel_runtime_pm_if_active(rpm, wf) \
+       for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
+            intel_runtime_pm_put((rpm), (wf)), (wf) = 0)
+
 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
index d1a9841adeedf7eaeee20f4ce3207db4042e260b..e6a88c8cbd691a55bb900594c5e90363c461fb0d 100644 (file)
@@ -215,7 +215,7 @@ static int imx_drm_bind(struct device *dev)
 
        ret = drmm_mode_config_init(drm);
        if (ret)
-               return ret;
+               goto err_kms;
 
        ret = drm_vblank_init(drm, MAX_CRTC);
        if (ret)
index dbfe39e2f7f614151795f82e6b5b626ae2214b1c..ffdc492c5bc51ea2d5727bd701ef18e3c34df4f0 100644 (file)
@@ -197,6 +197,11 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
        int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
        int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
 
+       if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+               dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+               return;
+       }
+
        drm_panel_prepare(imx_ldb_ch->panel);
 
        if (dual) {
@@ -255,6 +260,11 @@ imx_ldb_encoder_atomic_mode_set(struct drm_encoder *encoder,
        int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
        u32 bus_format = imx_ldb_ch->bus_format;
 
+       if (mux < 0 || mux >= ARRAY_SIZE(ldb->clk_sel)) {
+               dev_warn(ldb->dev, "%s: invalid mux %d\n", __func__, mux);
+               return;
+       }
+
        if (mode->clock > 170000) {
                dev_warn(ldb->dev,
                         "%s: mode exceeds 170 MHz pixel clock\n", __func__);
@@ -583,7 +593,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
                struct imx_ldb_channel *channel = &imx_ldb->channel[i];
 
                if (!channel->ldb)
-                       break;
+                       continue;
 
                ret = imx_ldb_register(drm, channel);
                if (ret)
index 5ccc9da455a1699933ee52a4b1581f5b595893c5..c35b06b46fcc81d21c50bc6f6aaede6621fd2d47 100644 (file)
@@ -304,7 +304,7 @@ int a5xx_power_init(struct msm_gpu *gpu)
        /* Set up the limits management */
        if (adreno_is_a530(adreno_gpu))
                a530_lm_setup(gpu);
-       else
+       else if (adreno_is_a540(adreno_gpu))
                a540_lm_setup(gpu);
 
        /* Set up SP/TP power collpase */
index 71c917f909af7235fc9540f9923d65c1e9abd917..91cf46f840257f4901c07666fbe7b708d142512f 100644 (file)
@@ -339,7 +339,7 @@ void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
        else
                bit = a6xx_gmu_oob_bits[state].ack_new;
 
-       gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, bit);
+       gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << bit);
 }
 
 /* Enable CPU control of SPTP power power collapse */
index ba8e9d3cf0fef931fa63bcda3257408bd8f34cbe..690409ca8a18681dc78a71c9a9448480457b024a 100644 (file)
@@ -522,28 +522,73 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
        return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
 }
 
-static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
+/*
+ * Check that the microcode version is new enough to include several key
+ * security fixes. Return true if the ucode is safe.
+ */
+static bool a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
                struct drm_gem_object *obj)
 {
+       struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+       struct msm_gpu *gpu = &adreno_gpu->base;
        u32 *buf = msm_gem_get_vaddr(obj);
+       bool ret = false;
 
        if (IS_ERR(buf))
-               return;
+               return false;
 
        /*
-        * If the lowest nibble is 0xa that is an indication that this microcode
-        * has been patched. The actual version is in dword [3] but we only care
-        * about the patchlevel which is the lowest nibble of dword [3]
-        *
-        * Otherwise check that the firmware is greater than or equal to 1.90
-        * which was the first version that had this fix built in
+        * Targets up to a640 (a618, a630 and a640) need to check for a
+        * microcode version that is patched to support the whereami opcode or
+        * one that is new enough to include it by default.
         */
-       if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
-               a6xx_gpu->has_whereami = true;
-       else if ((buf[0] & 0xfff) > 0x190)
-               a6xx_gpu->has_whereami = true;
+       if (adreno_is_a618(adreno_gpu) || adreno_is_a630(adreno_gpu) ||
+               adreno_is_a640(adreno_gpu)) {
+               /*
+                * If the lowest nibble is 0xa that is an indication that this
+                * microcode has been patched. The actual version is in dword
+                * [3] but we only care about the patchlevel which is the lowest
+                * nibble of dword [3]
+                *
+                * Otherwise check that the firmware is greater than or equal
+                * to 1.90 which was the first version that had this fix built
+                * in
+                */
+               if ((((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1) ||
+                       (buf[0] & 0xfff) >= 0x190) {
+                       a6xx_gpu->has_whereami = true;
+                       ret = true;
+                       goto out;
+               }
+
+               DRM_DEV_ERROR(&gpu->pdev->dev,
+                       "a630 SQE ucode is too old. Have version %x need at least %x\n",
+                       buf[0] & 0xfff, 0x190);
+       }  else {
+               /*
+                * a650 tier targets don't need whereami but still need to be
+                * equal to or newer than 1.95 for other security fixes
+                */
+               if (adreno_is_a650(adreno_gpu)) {
+                       if ((buf[0] & 0xfff) >= 0x195) {
+                               ret = true;
+                               goto out;
+                       }
+
+                       DRM_DEV_ERROR(&gpu->pdev->dev,
+                               "a650 SQE ucode is too old. Have version %x need at least %x\n",
+                               buf[0] & 0xfff, 0x195);
+               }
 
+               /*
+                * When a660 is added those targets should return true here
+                * since those have all the critical security fixes built in
+                * from the start
+                */
+       }
+out:
        msm_gem_put_vaddr(obj);
+       return ret;
 }
 
 static int a6xx_ucode_init(struct msm_gpu *gpu)
@@ -566,7 +611,13 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
                }
 
                msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
-               a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo);
+               if (!a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo)) {
+                       msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+                       drm_gem_object_put(a6xx_gpu->sqe_bo);
+
+                       a6xx_gpu->sqe_bo = NULL;
+                       return -EPERM;
+               }
        }
 
        gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -1350,35 +1401,20 @@ static int a6xx_set_supported_hw(struct device *dev, struct a6xx_gpu *a6xx_gpu,
                u32 revn)
 {
        struct opp_table *opp_table;
-       struct nvmem_cell *cell;
        u32 supp_hw = UINT_MAX;
-       void *buf;
-
-       cell = nvmem_cell_get(dev, "speed_bin");
-       /*
-        * -ENOENT means that the platform doesn't support speedbin which is
-        * fine
-        */
-       if (PTR_ERR(cell) == -ENOENT)
-               return 0;
-       else if (IS_ERR(cell)) {
-               DRM_DEV_ERROR(dev,
-                               "failed to read speed-bin. Some OPPs may not be supported by hardware");
-               goto done;
-       }
+       u16 speedbin;
+       int ret;
 
-       buf = nvmem_cell_read(cell, NULL);
-       if (IS_ERR(buf)) {
-               nvmem_cell_put(cell);
+       ret = nvmem_cell_read_u16(dev, "speed_bin", &speedbin);
+       if (ret) {
                DRM_DEV_ERROR(dev,
-                               "failed to read speed-bin. Some OPPs may not be supported by hardware");
+                             "failed to read speed-bin (%d). Some OPPs may not be supported by hardware",
+                             ret);
                goto done;
        }
+       speedbin = le16_to_cpu(speedbin);
 
-       supp_hw = fuse_to_supp_hw(dev, revn, *((u32 *) buf));
-
-       kfree(buf);
-       nvmem_cell_put(cell);
+       supp_hw = fuse_to_supp_hw(dev, revn, speedbin);
 
 done:
        opp_table = dev_pm_opp_set_supported_hw(dev, &supp_hw, 1);
index 5a8e3e1fc48c3c2c2df6934fd7b2fcb05f8a35b0..85f2c3564c96668e36b7c032dcbc21642cbbd2aa 100644 (file)
@@ -43,6 +43,8 @@
 #define DPU_DEBUGFS_DIR "msm_dpu"
 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
 
+#define MIN_IB_BW      400000000ULL /* Min ib vote 400MB */
+
 static int dpu_kms_hw_init(struct msm_kms *kms);
 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
 
@@ -931,6 +933,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
                DPU_DEBUG("REG_DMA is not defined");
        }
 
+       if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
+               dpu_kms_parse_data_bus_icc_path(dpu_kms);
+
        pm_runtime_get_sync(&dpu_kms->pdev->dev);
 
        dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
@@ -1032,9 +1037,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
 
        dpu_vbif_init_memtypes(dpu_kms);
 
-       if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
-               dpu_kms_parse_data_bus_icc_path(dpu_kms);
-
        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 
        return 0;
@@ -1191,10 +1193,10 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
 
        ddev = dpu_kms->dev;
 
+       WARN_ON(!(dpu_kms->num_paths));
        /* Min vote of BW is required before turning on AXI clk */
        for (i = 0; i < dpu_kms->num_paths; i++)
-               icc_set_bw(dpu_kms->path[i], 0,
-                       dpu_kms->catalog->perf.min_dram_ib);
+               icc_set_bw(dpu_kms->path[i], 0, Bps_to_icc(MIN_IB_BW));
 
        rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
        if (rc) {
index 1c6e1d2b947c7fd25179fbd74268f5a8057343fd..7c22bfe0fc7d79a82aef3890d855112da02dc2e1 100644 (file)
@@ -32,6 +32,8 @@ struct dp_aux_private {
        struct drm_dp_aux dp_aux;
 };
 
+#define MAX_AUX_RETRIES                        5
+
 static const char *dp_aux_get_error(u32 aux_error)
 {
        switch (aux_error) {
@@ -377,6 +379,11 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
        ret = dp_aux_cmd_fifo_tx(aux, msg);
 
        if (ret < 0) {
+               if (aux->native) {
+                       aux->retry_cnt++;
+                       if (!(aux->retry_cnt % MAX_AUX_RETRIES))
+                               dp_catalog_aux_update_cfg(aux->catalog);
+               }
                usleep_range(400, 500); /* at least 400us to next try */
                goto unlock_exit;
        }
index a45fe95aff494938f864304fcc8688f40ea3253b..3dc65877fa10daf92dcf25fd79a489a025bd2ac9 100644 (file)
@@ -163,7 +163,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
                break;
        case MSM_DSI_PHY_7NM:
        case MSM_DSI_PHY_7NM_V4_1:
-               pll = msm_dsi_pll_7nm_init(pdev, id);
+               pll = msm_dsi_pll_7nm_init(pdev, type, id);
                break;
        default:
                pll = ERR_PTR(-ENXIO);
index 3405982a092c4653b44b083a7481b1109f2ab9a6..bbecb1de5678e60fab83899b7dbea0acd56c7cc8 100644 (file)
@@ -117,10 +117,12 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
 }
 #endif
 #ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
-struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id);
+struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
+                                       enum msm_dsi_phy_type type, int id);
 #else
 static inline struct msm_dsi_pll *
-msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
+msm_dsi_pll_7nm_init(struct platform_device *pdev,
+                                       enum msm_dsi_phy_type type, int id)
 {
        return ERR_PTR(-ENODEV);
 }
index 93bf142e4a4e6ad0c4048ce3d2c3f9674f9460c4..e29b3bfd63d129e5048255e8553f4d4dc0608c9e 100644 (file)
@@ -325,7 +325,7 @@ static void dsi_pll_commit(struct dsi_pll_7nm *pll)
        pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
        pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
        pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
-       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, reg->pll_lockdet_rate);
        pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
        pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
        pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
@@ -509,6 +509,7 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
 {
        struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
        struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       struct dsi_pll_config *config = &pll_7nm->pll_configuration;
        void __iomem *base = pll_7nm->mmio;
        u64 ref_clk = pll_7nm->vco_ref_clk_rate;
        u64 vco_rate = 0x0;
@@ -529,9 +530,8 @@ static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
        /*
         * TODO:
         *      1. Assumes prescaler is disabled
-        *      2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
         */
-       multiplier = 1 << 18;
+       multiplier = 1 << config->frac_bits;
        pll_freq = dec * (ref_clk * 2);
        tmp64 = (ref_clk * 2 * frac);
        pll_freq += div_u64(tmp64, multiplier);
@@ -852,7 +852,8 @@ err_base_clk_hw:
        return ret;
 }
 
-struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
+struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev,
+                                       enum msm_dsi_phy_type type, int id)
 {
        struct dsi_pll_7nm *pll_7nm;
        struct msm_dsi_pll *pll;
@@ -885,7 +886,7 @@ struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
        pll = &pll_7nm->base;
        pll->min_rate = 1000000000UL;
        pll->max_rate = 3500000000UL;
-       if (pll->type == MSM_DSI_PHY_7NM_V4_1) {
+       if (type == MSM_DSI_PHY_7NM_V4_1) {
                pll->min_rate = 600000000UL;
                pll->max_rate = (unsigned long)5000000000ULL;
                /* workaround for max rate overflowing on 32-bit builds: */
index 6a326761dc4ad6e9cb47177e840d39c7ec63d150..edcaccaa27e606525d73b217563e810b642fe1d9 100644 (file)
@@ -57,10 +57,13 @@ static void vblank_put(struct msm_kms *kms, unsigned crtc_mask)
 
 static void lock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
 {
+       int crtc_index;
        struct drm_crtc *crtc;
 
-       for_each_crtc_mask(kms->dev, crtc, crtc_mask)
-               mutex_lock(&kms->commit_lock[drm_crtc_index(crtc)]);
+       for_each_crtc_mask(kms->dev, crtc, crtc_mask) {
+               crtc_index = drm_crtc_index(crtc);
+               mutex_lock_nested(&kms->commit_lock[crtc_index], crtc_index);
+       }
 }
 
 static void unlock_crtcs(struct msm_kms *kms, unsigned int crtc_mask)
index 94525ac76d4e6f94947e7f722d84fb6630c31e4c..a5c6b8c233366d698e436bc70108e5a38cc2a621 100644 (file)
@@ -1072,6 +1072,10 @@ static int __maybe_unused msm_pm_resume(struct device *dev)
 static int __maybe_unused msm_pm_prepare(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
+       struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
+
+       if (!priv || !priv->kms)
+               return 0;
 
        return drm_mode_config_helper_suspend(ddev);
 }
@@ -1079,6 +1083,10 @@ static int __maybe_unused msm_pm_prepare(struct device *dev)
 static void __maybe_unused msm_pm_complete(struct device *dev)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
+       struct msm_drm_private *priv = ddev ? ddev->dev_private : NULL;
+
+       if (!priv || !priv->kms)
+               return;
 
        drm_mode_config_helper_resume(ddev);
 }
@@ -1311,6 +1319,10 @@ static int msm_pdev_remove(struct platform_device *pdev)
 static void msm_pdev_shutdown(struct platform_device *pdev)
 {
        struct drm_device *drm = platform_get_drvdata(pdev);
+       struct msm_drm_private *priv = drm ? drm->dev_private : NULL;
+
+       if (!priv || !priv->kms)
+               return;
 
        drm_atomic_helper_shutdown(drm);
 }
index ad2703698b052bc6132fb597a0f7120f1988b9fc..cd59a591803851655426b1670530b4ca114b4689 100644 (file)
@@ -45,7 +45,7 @@ int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
        int ret;
 
        if (fence > fctx->last_fence) {
-               DRM_ERROR("%s: waiting on invalid fence: %u (of %u)\n",
+               DRM_ERROR_RATELIMITED("%s: waiting on invalid fence: %u (of %u)\n",
                                fctx->name, fence, fctx->last_fence);
                return -EINVAL;
        }
index 4735251a394d8be03784c7b8dc8b5636744f750d..d8151a89e1631c2176d2eb576b0dbfbdcdc66378 100644 (file)
@@ -157,7 +157,6 @@ struct msm_kms {
         * from the crtc's pending_timer close to end of the frame:
         */
        struct mutex commit_lock[MAX_CRTCS];
-       struct lock_class_key commit_lock_keys[MAX_CRTCS];
        unsigned pending_crtc_mask;
        struct msm_pending_timer pending_timers[MAX_CRTCS];
 };
@@ -167,11 +166,8 @@ static inline int msm_kms_init(struct msm_kms *kms,
 {
        unsigned i, ret;
 
-       for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++) {
-               lockdep_register_key(&kms->commit_lock_keys[i]);
-               __mutex_init(&kms->commit_lock[i], "&kms->commit_lock[i]",
-                            &kms->commit_lock_keys[i]);
-       }
+       for (i = 0; i < ARRAY_SIZE(kms->commit_lock); i++)
+               mutex_init(&kms->commit_lock[i]);
 
        kms->funcs = funcs;
 
index 196612addfd615ef5c8b324f9df94bdd595d8e87..1c9c0cdf85dbc8ccad324d6975372abf8e460926 100644 (file)
@@ -2693,9 +2693,20 @@ nv50_display_create(struct drm_device *dev)
        else
                nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
 
-       if (disp->disp->object.oclass >= GK104_DISP) {
+       /* FIXME: 256x256 cursors are supported on Kepler, however unlike Maxwell and later
+        * generations Kepler requires that we use small pages (4K) for cursor scanout surfaces. The
+        * proper fix for this is to teach nouveau to migrate fbs being used for the cursor plane to
+        * small page allocations in prepare_fb(). When this is implemented, we should also force
+        * large pages (128K) for ovly fbs in order to fix Kepler ovlys.
+        * But until then, just limit cursors to 128x128 - which is small enough to avoid ever using
+        * large pages.
+        */
+       if (disp->disp->object.oclass >= GM107_DISP) {
                dev->mode_config.cursor_width = 256;
                dev->mode_config.cursor_height = 256;
+       } else if (disp->disp->object.oclass >= GK104_DISP) {
+               dev->mode_config.cursor_width = 128;
+               dev->mode_config.cursor_height = 128;
        } else {
                dev->mode_config.cursor_width = 64;
                dev->mode_config.cursor_height = 64;
index ba8c6038cd63c9fac7c9738c8391a678e0415b6a..ca3761772211e60934ed40f6da887bd718afde97 100644 (file)
@@ -48,21 +48,12 @@ static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
 static const struct drm_encoder_funcs rcar_du_encoder_funcs = {
 };
 
-static void rcar_du_encoder_release(struct drm_device *dev, void *res)
-{
-       struct rcar_du_encoder *renc = res;
-
-       drm_encoder_cleanup(&renc->base);
-       kfree(renc);
-}
-
 int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                         enum rcar_du_output output,
                         struct device_node *enc_node)
 {
        struct rcar_du_encoder *renc;
        struct drm_bridge *bridge;
-       int ret;
 
        /*
         * Locate the DRM bridge from the DT node. For the DPAD outputs, if the
@@ -101,26 +92,16 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
                        return -ENOLINK;
        }
 
-       renc = kzalloc(sizeof(*renc), GFP_KERNEL);
-       if (renc == NULL)
-               return -ENOMEM;
-
-       renc->output = output;
-
        dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
                enc_node, output);
 
-       ret = drm_encoder_init(&rcdu->ddev, &renc->base, &rcar_du_encoder_funcs,
-                              DRM_MODE_ENCODER_NONE, NULL);
-       if (ret < 0) {
-               kfree(renc);
-               return ret;
-       }
+       renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base,
+                                 &rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
+                                 NULL);
+       if (!renc)
+               return -ENOMEM;
 
-       ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_encoder_release,
-                                      renc);
-       if (ret)
-               return ret;
+       renc->output = output;
 
        /*
         * Attach the bridge to the encoder. The bridge will create the
index 0ae3a025efe9d6f9566ef69a05021c627c16583a..134986dc2783f77ad6ef295692d7f2a5bfed479d 100644 (file)
@@ -1688,6 +1688,11 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
                        dev_err(dc->dev,
                                "failed to set clock rate to %lu Hz\n",
                                state->pclk);
+
+               err = clk_set_rate(dc->clk, state->pclk);
+               if (err < 0)
+                       dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
+                               dc->clk, state->pclk, err);
        }
 
        DRM_DEBUG_KMS("rate: %lu, div: %u\n", clk_get_rate(dc->clk),
@@ -1698,11 +1703,6 @@ static void tegra_dc_commit_state(struct tegra_dc *dc,
                value = SHIFT_CLK_DIVIDER(state->div) | PIXEL_CLK_DIVIDER_PCD1;
                tegra_dc_writel(dc, value, DC_DISP_DISP_CLOCK_CONTROL);
        }
-
-       err = clk_set_rate(dc->clk, state->pclk);
-       if (err < 0)
-               dev_err(dc->dev, "failed to set clock %pC to %lu Hz: %d\n",
-                       dc->clk, state->pclk, err);
 }
 
 static void tegra_dc_stop(struct tegra_dc *dc)
@@ -2501,22 +2501,18 @@ static int tegra_dc_couple(struct tegra_dc *dc)
         * POWER_CONTROL registers during CRTC enabling.
         */
        if (dc->soc->coupled_pm && dc->pipe == 1) {
-               u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
-               struct device_link *link;
-               struct device *partner;
+               struct device *companion;
+               struct tegra_dc *parent;
 
-               partner = driver_find_device(dc->dev->driver, NULL, NULL,
-                                            tegra_dc_match_by_pipe);
-               if (!partner)
+               companion = driver_find_device(dc->dev->driver, NULL, (const void *)0,
+                                              tegra_dc_match_by_pipe);
+               if (!companion)
                        return -EPROBE_DEFER;
 
-               link = device_link_add(dc->dev, partner, flags);
-               if (!link) {
-                       dev_err(dc->dev, "failed to link controllers\n");
-                       return -EINVAL;
-               }
+               parent = dev_get_drvdata(companion);
+               dc->client.parent = &parent->client;
 
-               dev_dbg(dc->dev, "coupled to %s\n", dev_name(partner));
+               dev_dbg(dc->dev, "coupled to %s\n", dev_name(companion));
        }
 
        return 0;
index f02a035dda4532083540cef021d0c0e90d9b96d7..7b88261f57bb698bac7e38495a8843e6ac6b1658 100644 (file)
@@ -3115,6 +3115,12 @@ static int tegra_sor_init(struct host1x_client *client)
         * kernel is possible.
         */
        if (sor->rst) {
+               err = pm_runtime_resume_and_get(sor->dev);
+               if (err < 0) {
+                       dev_err(sor->dev, "failed to get runtime PM: %d\n", err);
+                       return err;
+               }
+
                err = reset_control_acquire(sor->rst);
                if (err < 0) {
                        dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
@@ -3148,6 +3154,7 @@ static int tegra_sor_init(struct host1x_client *client)
                }
 
                reset_control_release(sor->rst);
+               pm_runtime_put(sor->dev);
        }
 
        err = clk_prepare_enable(sor->clk_safe);
index 347fb962b6c936ecb25f59807b1a673c385262ed..68a766ff0e9d2479ce3a594410098cfd9f3dccfc 100644 (file)
@@ -705,8 +705,9 @@ void host1x_driver_unregister(struct host1x_driver *driver)
 EXPORT_SYMBOL(host1x_driver_unregister);
 
 /**
- * host1x_client_register() - register a host1x client
+ * __host1x_client_register() - register a host1x client
  * @client: host1x client
+ * @key: lock class key for the client-specific mutex
  *
  * Registers a host1x client with each host1x controller instance. Note that
  * each client will only match their parent host1x controller and will only be
@@ -715,13 +716,14 @@ EXPORT_SYMBOL(host1x_driver_unregister);
  * device and call host1x_device_init(), which will in turn call each client's
  * &host1x_client_ops.init implementation.
  */
-int host1x_client_register(struct host1x_client *client)
+int __host1x_client_register(struct host1x_client *client,
+                            struct lock_class_key *key)
 {
        struct host1x *host1x;
        int err;
 
        INIT_LIST_HEAD(&client->list);
-       mutex_init(&client->lock);
+       __mutex_init(&client->lock, "host1x client lock", key);
        client->usecount = 0;
 
        mutex_lock(&devices_lock);
@@ -742,7 +744,7 @@ int host1x_client_register(struct host1x_client *client)
 
        return 0;
 }
-EXPORT_SYMBOL(host1x_client_register);
+EXPORT_SYMBOL(__host1x_client_register);
 
 /**
  * host1x_client_unregister() - unregister a host1x client
index 8769e7aa097f4f6c009ee14f2793e3e9330327e8..81903749d24156ae227fd12705f2bfe915afeec8 100644 (file)
@@ -3610,13 +3610,13 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
            ep->com.local_addr.ss_family == AF_INET) {
                err = cxgb4_remove_server_filter(
                        ep->com.dev->rdev.lldi.ports[0], ep->stid,
-                       ep->com.dev->rdev.lldi.rxq_ids[0], 0);
+                       ep->com.dev->rdev.lldi.rxq_ids[0], false);
        } else {
                struct sockaddr_in6 *sin6;
                c4iw_init_wr_wait(ep->com.wr_waitp);
                err = cxgb4_remove_server(
                                ep->com.dev->rdev.lldi.ports[0], ep->stid,
-                               ep->com.dev->rdev.lldi.rxq_ids[0], 0);
+                               ep->com.dev->rdev.lldi.rxq_ids[0], true);
                if (err)
                        goto done;
                err = c4iw_wait_for_reply(&ep->com.dev->rdev, ep->com.wr_waitp,
index c3934abeb260959bb1f71e1228cb4e08fa9bfcc0..ce26f97b2ca26cc8373e8153d27e374c3c2fe95b 100644 (file)
@@ -1194,8 +1194,10 @@ static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
                           upper_32_bits(dma));
                roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
                           (u32)ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
-               roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
+
+               /* Make sure to write tail first and then head */
                roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
+               roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
        } else {
                roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
                roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
index de3c2fc6f361c1356443327754b46d3a2ae8c480..07b8350929cd62dedf5c513cd43bdde3e72b09ba 100644 (file)
@@ -1116,7 +1116,7 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
        case MLX5_CMD_OP_CREATE_MKEY:
                MLX5_SET(destroy_mkey_in, din, opcode,
                         MLX5_CMD_OP_DESTROY_MKEY);
-               MLX5_SET(destroy_mkey_in, in, mkey_index, *obj_id);
+               MLX5_SET(destroy_mkey_in, din, mkey_index, *obj_id);
                break;
        case MLX5_CMD_OP_CREATE_CQ:
                MLX5_SET(destroy_cq_in, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
index ec4b3f6a8222ff89c37e87811568ebbba12474ac..f5a52a6fae4372c07c2d87fc74b0767cd3473f75 100644 (file)
@@ -1078,7 +1078,7 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev,
 
        qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
        MLX5_SET(qpc, qpc, uar_page, uar_index);
-       MLX5_SET(qpc, qpc, ts_format, MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT);
+       MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
        MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
 
        /* Set "fast registration enabled" for all kernel QPs */
@@ -1188,7 +1188,8 @@ static int get_rq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
                }
                return MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING;
        }
-       return MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT;
+       return fr_supported ? MLX5_RQC_TIMESTAMP_FORMAT_FREE_RUNNING :
+                             MLX5_RQC_TIMESTAMP_FORMAT_DEFAULT;
 }
 
 static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
@@ -1206,7 +1207,8 @@ static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq)
                }
                return MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING;
        }
-       return MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT;
+       return fr_supported ? MLX5_SQC_TIMESTAMP_FORMAT_FREE_RUNNING :
+                             MLX5_SQC_TIMESTAMP_FORMAT_DEFAULT;
 }
 
 static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
@@ -1217,7 +1219,8 @@ static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq,
                        MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING ||
                MLX5_CAP_ROCE(dev->mdev, qp_ts_format) ==
                        MLX5_QP_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
-       int ts_format = MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
+       int ts_format = fr_supported ? MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING :
+                                      MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
 
        if (recv_cq &&
            recv_cq->create_flags & IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION)
@@ -1930,6 +1933,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        if (qp->flags & IB_QP_CREATE_MANAGED_RECV)
                MLX5_SET(qpc, qpc, cd_slave_receive, 1);
 
+       MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(dev->mdev));
        MLX5_SET(qpc, qpc, rq_type, MLX5_SRQ_RQ);
        MLX5_SET(qpc, qpc, no_sq, 1);
        MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
@@ -4873,6 +4877,7 @@ static int  create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
        struct mlx5_ib_dev *dev;
        int has_net_offloads;
        __be64 *rq_pas0;
+       int ts_format;
        void *in;
        void *rqc;
        void *wq;
@@ -4881,6 +4886,10 @@ static int  create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
 
        dev = to_mdev(pd->device);
 
+       ts_format = get_rq_ts_format(dev, to_mcq(init_attr->cq));
+       if (ts_format < 0)
+               return ts_format;
+
        inlen = MLX5_ST_SZ_BYTES(create_rq_in) + sizeof(u64) * rwq->rq_num_pas;
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in)
@@ -4890,6 +4899,7 @@ static int  create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
        rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
        MLX5_SET(rqc,  rqc, mem_rq_type,
                 MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
+       MLX5_SET(rqc, rqc, ts_format, ts_format);
        MLX5_SET(rqc, rqc, user_index, rwq->user_index);
        MLX5_SET(rqc,  rqc, cqn, to_mcq(init_attr->cq)->mcq.cqn);
        MLX5_SET(rqc,  rqc, state, MLX5_RQC_STATE_RST);
index 7168778fbbe1900ceda0c0a030135ffdaa520744..cb0afe8971623668b232173e38dc7aad63d887c4 100644 (file)
@@ -721,7 +721,7 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
  * Return value: CAPI result code
  */
 
-u16 capi20_get_manufacturer(u32 contr, u8 *buf)
+u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN])
 {
        struct capi_ctr *ctr;
        u16 ret;
@@ -787,7 +787,7 @@ u16 capi20_get_version(u32 contr, struct capi_version *verp)
  * Return value: CAPI result code
  */
 
-u16 capi20_get_serial(u32 contr, u8 *serial)
+u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN])
 {
        struct capi_ctr *ctr;
        u16 ret;
index ec475087fbf931171a1f69a218ed4aa1fdf64c6d..39f841b4248830b41817da9dabcf6d3b404994c1 100644 (file)
@@ -694,7 +694,7 @@ isac_release(struct isac_hw *isac)
 {
        if (isac->type & IPAC_TYPE_ISACX)
                WriteISAC(isac, ISACX_MASK, 0xff);
-       else
+       else if (isac->type != 0)
                WriteISAC(isac, ISAC_MASK, 0xff);
        if (isac->dch.timer.function != NULL) {
                del_timer(&isac->dch.timer);
index 5e306bba437514dc88d7953da912db7dc2ca1d3e..1ca65b434f1faef16ec606319e4afd5dbdc64405 100644 (file)
@@ -529,7 +529,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
         * Grab our output buffer.
         */
        nl = orig_nl = get_result_buffer(param, param_size, &len);
-       if (len < needed) {
+       if (len < needed || len < sizeof(nl->dev)) {
                param->flags |= DM_BUFFER_FULL_FLAG;
                goto out;
        }
index 95391f78b8d55e9b36e1d4ed036700c7c6b7b1ef..e5f0f1703c5dcdc0a5a3b98b7ea77a47082247fd 100644 (file)
@@ -1594,6 +1594,13 @@ static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev,
        return blk_queue_zoned_model(q) != *zoned_model;
 }
 
+/*
+ * Check the device zoned model based on the target feature flag. If the target
+ * has the DM_TARGET_ZONED_HM feature flag set, host-managed zoned devices are
+ * also accepted but all devices must have the same zoned model. If the target
+ * has the DM_TARGET_MIXED_ZONED_MODEL feature set, the devices can have any
+ * zoned model with all zoned devices having the same zone size.
+ */
 static bool dm_table_supports_zoned_model(struct dm_table *t,
                                          enum blk_zoned_model zoned_model)
 {
@@ -1603,13 +1610,15 @@ static bool dm_table_supports_zoned_model(struct dm_table *t,
        for (i = 0; i < dm_table_get_num_targets(t); i++) {
                ti = dm_table_get_target(t, i);
 
-               if (zoned_model == BLK_ZONED_HM &&
-                   !dm_target_supports_zoned_hm(ti->type))
-                       return false;
-
-               if (!ti->type->iterate_devices ||
-                   ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model))
-                       return false;
+               if (dm_target_supports_zoned_hm(ti->type)) {
+                       if (!ti->type->iterate_devices ||
+                           ti->type->iterate_devices(ti, device_not_zoned_model,
+                                                     &zoned_model))
+                               return false;
+               } else if (!dm_target_supports_mixed_zoned_model(ti->type)) {
+                       if (zoned_model == BLK_ZONED_HM)
+                               return false;
+               }
        }
 
        return true;
@@ -1621,9 +1630,17 @@ static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *
        struct request_queue *q = bdev_get_queue(dev->bdev);
        unsigned int *zone_sectors = data;
 
+       if (!blk_queue_is_zoned(q))
+               return 0;
+
        return blk_queue_zone_sectors(q) != *zone_sectors;
 }
 
+/*
+ * Check consistency of zoned model and zone sectors across all targets. For
+ * zone sectors, if the destination device is a zoned block device, it shall
+ * have the specified zone_sectors.
+ */
 static int validate_hardware_zoned_model(struct dm_table *table,
                                         enum blk_zoned_model zoned_model,
                                         unsigned int zone_sectors)
@@ -1642,7 +1659,7 @@ static int validate_hardware_zoned_model(struct dm_table *table,
                return -EINVAL;
 
        if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) {
-               DMERR("%s: zone sectors is not consistent across all devices",
+               DMERR("%s: zone sectors is not consistent across all zoned devices",
                      dm_device_name(table->md));
                return -EINVAL;
        }
index 6b8e5bdd8526d99467182cdf8f1c8faba3183935..808a98ef624c305c53815efb82809f27f0810fb9 100644 (file)
@@ -34,7 +34,7 @@
 #define DM_VERITY_OPT_IGN_ZEROES       "ignore_zero_blocks"
 #define DM_VERITY_OPT_AT_MOST_ONCE     "check_at_most_once"
 
-#define DM_VERITY_OPTS_MAX             (2 + DM_VERITY_OPTS_FEC + \
+#define DM_VERITY_OPTS_MAX             (3 + DM_VERITY_OPTS_FEC + \
                                         DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
 
 static unsigned dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
index 697f9de37355eecc555bc5052f13e8867571b9e0..7e88df64d197b149a2a92bd1bc42eb35375c51bd 100644 (file)
@@ -1143,7 +1143,7 @@ static int dmz_message(struct dm_target *ti, unsigned int argc, char **argv,
 static struct target_type dmz_type = {
        .name            = "zoned",
        .version         = {2, 0, 0},
-       .features        = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
+       .features        = DM_TARGET_SINGLETON | DM_TARGET_MIXED_ZONED_MODEL,
        .module          = THIS_MODULE,
        .ctr             = dmz_ctr,
        .dtr             = dmz_dtr,
index 50b693d776d603727bb4cabc1f4b452c7e262d78..3f3be9408afa74053d09bcfdc0579421fea5c18e 100644 (file)
@@ -2036,7 +2036,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
        if (size != dm_get_size(md))
                memset(&md->geometry, 0, sizeof(md->geometry));
 
-       set_capacity_and_notify(md->disk, size);
+       if (!get_capacity(md->disk))
+               set_capacity(md->disk, size);
+       else
+               set_capacity_and_notify(md->disk, size);
 
        dm_table_event_callback(t, event_callback, md);
 
index fe8ca945f3672337fffed4d73b7a5488832db9bc..b67cb0a3ab053a932e36b111cd40deaedadafd27 100644 (file)
@@ -72,7 +72,8 @@ static const struct dmi_system_id dmi_platform_info[] = {
        {}
 };
 
-static const struct resource intel_quark_i2c_res[] = {
+/* This is used as a place holder and will be modified at run-time */
+static struct resource intel_quark_i2c_res[] = {
        [INTEL_QUARK_IORES_MEM] = {
                .flags = IORESOURCE_MEM,
        },
@@ -85,7 +86,8 @@ static struct mfd_cell_acpi_match intel_quark_acpi_match_i2c = {
        .adr = MFD_ACPI_MATCH_I2C,
 };
 
-static const struct resource intel_quark_gpio_res[] = {
+/* This is used as a place holder and will be modified at run-time */
+static struct resource intel_quark_gpio_res[] = {
        [INTEL_QUARK_IORES_MEM] = {
                .flags = IORESOURCE_MEM,
        },
index 8bdc44b7e09a188e678eaade1faf5fe035fd0434..3c8f665c15580bcb4c0fcabe2a4c4c41369a59ef 100644 (file)
@@ -127,6 +127,8 @@ static int com20020pci_probe(struct pci_dev *pdev,
        int i, ioaddr, ret;
        struct resource *r;
 
+       ret = 0;
+
        if (pci_enable_device(pdev))
                return -EIO;
 
@@ -139,6 +141,8 @@ static int com20020pci_probe(struct pci_dev *pdev,
        priv->ci = ci;
        mm = &ci->misc_map;
 
+       pci_set_drvdata(pdev, priv);
+
        INIT_LIST_HEAD(&priv->list_dev);
 
        if (mm->size) {
@@ -161,7 +165,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
                dev = alloc_arcdev(device);
                if (!dev) {
                        ret = -ENOMEM;
-                       goto out_port;
+                       break;
                }
                dev->dev_port = i;
 
@@ -178,7 +182,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
                        pr_err("IO region %xh-%xh already allocated\n",
                               ioaddr, ioaddr + cm->size - 1);
                        ret = -EBUSY;
-                       goto out_port;
+                       goto err_free_arcdev;
                }
 
                /* Dummy access after Reset
@@ -216,18 +220,18 @@ static int com20020pci_probe(struct pci_dev *pdev,
                if (arcnet_inb(ioaddr, COM20020_REG_R_STATUS) == 0xFF) {
                        pr_err("IO address %Xh is empty!\n", ioaddr);
                        ret = -EIO;
-                       goto out_port;
+                       goto err_free_arcdev;
                }
                if (com20020_check(dev)) {
                        ret = -EIO;
-                       goto out_port;
+                       goto err_free_arcdev;
                }
 
                card = devm_kzalloc(&pdev->dev, sizeof(struct com20020_dev),
                                    GFP_KERNEL);
                if (!card) {
                        ret = -ENOMEM;
-                       goto out_port;
+                       goto err_free_arcdev;
                }
 
                card->index = i;
@@ -253,29 +257,29 @@ static int com20020pci_probe(struct pci_dev *pdev,
 
                ret = devm_led_classdev_register(&pdev->dev, &card->tx_led);
                if (ret)
-                       goto out_port;
+                       goto err_free_arcdev;
 
                ret = devm_led_classdev_register(&pdev->dev, &card->recon_led);
                if (ret)
-                       goto out_port;
+                       goto err_free_arcdev;
 
                dev_set_drvdata(&dev->dev, card);
 
                ret = com20020_found(dev, IRQF_SHARED);
                if (ret)
-                       goto out_port;
+                       goto err_free_arcdev;
 
                devm_arcnet_led_init(dev, dev->dev_id, i);
 
                list_add(&card->list, &priv->list_dev);
-       }
+               continue;
 
-       pci_set_drvdata(pdev, priv);
-
-       return 0;
-
-out_port:
-       com20020pci_remove(pdev);
+err_free_arcdev:
+               free_arcdev(dev);
+               break;
+       }
+       if (ret)
+               com20020pci_remove(pdev);
        return ret;
 }
 
index 456315bef3a8b7084960ab2e87a79cd09066b48b..74cbbb22470b5ec22015bf5c7563a22edda0e1d3 100644 (file)
@@ -3978,15 +3978,11 @@ static int bond_neigh_init(struct neighbour *n)
 
        rcu_read_lock();
        slave = bond_first_slave_rcu(bond);
-       if (!slave) {
-               ret = -EINVAL;
+       if (!slave)
                goto out;
-       }
        slave_ops = slave->dev->netdev_ops;
-       if (!slave_ops->ndo_neigh_setup) {
-               ret = -EINVAL;
+       if (!slave_ops->ndo_neigh_setup)
                goto out;
-       }
 
        /* TODO: find another way [1] to implement this.
         * Passing a zeroed structure is fragile,
index ef474bae47a14912f8e689bdcc9492ca32a53d14..6958830cb98330d5597d4ef4068b72f57a14b284 100644 (file)
@@ -212,18 +212,6 @@ static const struct can_bittiming_const c_can_bittiming_const = {
        .brp_inc = 1,
 };
 
-static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
-{
-       if (priv->device)
-               pm_runtime_enable(priv->device);
-}
-
-static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
-{
-       if (priv->device)
-               pm_runtime_disable(priv->device);
-}
-
 static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
 {
        if (priv->device)
@@ -1335,7 +1323,6 @@ static const struct net_device_ops c_can_netdev_ops = {
 
 int register_c_can_dev(struct net_device *dev)
 {
-       struct c_can_priv *priv = netdev_priv(dev);
        int err;
 
        /* Deactivate pins to prevent DRA7 DCAN IP from being
@@ -1345,28 +1332,19 @@ int register_c_can_dev(struct net_device *dev)
         */
        pinctrl_pm_select_sleep_state(dev->dev.parent);
 
-       c_can_pm_runtime_enable(priv);
-
        dev->flags |= IFF_ECHO; /* we support local echo */
        dev->netdev_ops = &c_can_netdev_ops;
 
        err = register_candev(dev);
-       if (err)
-               c_can_pm_runtime_disable(priv);
-       else
+       if (!err)
                devm_can_led_init(dev);
-
        return err;
 }
 EXPORT_SYMBOL_GPL(register_c_can_dev);
 
 void unregister_c_can_dev(struct net_device *dev)
 {
-       struct c_can_priv *priv = netdev_priv(dev);
-
        unregister_candev(dev);
-
-       c_can_pm_runtime_disable(priv);
 }
 EXPORT_SYMBOL_GPL(unregister_c_can_dev);
 
index 406b4847e5dc323c35d444a7cfabda4119404c70..7efb60b50876288bc33d6e7449663fd2e821497a 100644 (file)
@@ -239,12 +239,13 @@ static void c_can_pci_remove(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct c_can_priv *priv = netdev_priv(dev);
+       void __iomem *addr = priv->base;
 
        unregister_c_can_dev(dev);
 
        free_c_can_dev(dev);
 
-       pci_iounmap(pdev, priv->base);
+       pci_iounmap(pdev, addr);
        pci_disable_msi(pdev);
        pci_clear_master(pdev);
        pci_release_regions(pdev);
index 05f425ceb53a2df4ed8eddafdaec100a753197cc..47b251b1607cef2869d150958b11a3cefae8886e 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/list.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/clk.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -386,6 +387,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, dev);
        SET_NETDEV_DEV(dev, &pdev->dev);
 
+       pm_runtime_enable(priv->device);
        ret = register_c_can_dev(dev);
        if (ret) {
                dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
@@ -398,6 +400,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
        return 0;
 
 exit_free_device:
+       pm_runtime_disable(priv->device);
        free_c_can_dev(dev);
 exit:
        dev_err(&pdev->dev, "probe failed\n");
@@ -408,9 +411,10 @@ exit:
 static int c_can_plat_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
+       struct c_can_priv *priv = netdev_priv(dev);
 
        unregister_c_can_dev(dev);
-
+       pm_runtime_disable(priv->device);
        free_c_can_dev(dev);
 
        return 0;
index 867f6be31230b7723ab9b9c1aa42e2a6a2d40b2b..f5d79e6e548387254f51ab58d187e04c9d28141a 100644 (file)
@@ -355,6 +355,7 @@ static void can_dellink(struct net_device *dev, struct list_head *head)
 
 struct rtnl_link_ops can_link_ops __read_mostly = {
        .kind           = "can",
+       .netns_refund   = true,
        .maxtype        = IFLA_CAN_MAX,
        .policy         = can_policy,
        .setup          = can_setup,
index 134c05757a3bb19d2eeae17abe7d9232bc28743b..57f3635ad8d7d0e7962e43d81d55e8b75a1bf6e9 100644 (file)
@@ -697,9 +697,15 @@ static int flexcan_chip_disable(struct flexcan_priv *priv)
 static int flexcan_chip_freeze(struct flexcan_priv *priv)
 {
        struct flexcan_regs __iomem *regs = priv->regs;
-       unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
+       unsigned int timeout;
+       u32 bitrate = priv->can.bittiming.bitrate;
        u32 reg;
 
+       if (bitrate)
+               timeout = 1000 * 1000 * 10 / bitrate;
+       else
+               timeout = FLEXCAN_TIMEOUT_US / 10;
+
        reg = priv->read(&regs->mcr);
        reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT;
        priv->write(reg, &regs->mcr);
index 37e05010ca914261e245dd0163f5181ee803351f..74d9899fc904c099d51f23c591ec71b2acb6c37e 100644 (file)
@@ -57,6 +57,7 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418
 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c
 #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420
+#define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424
 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428
 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430
 /* Loopback control register */
@@ -949,6 +950,9 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
                timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer,
                            0);
 
+               /* Disable Bus load reporting */
+               iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG);
+
                tx_npackets = ioread32(can->reg_base +
                                       KVASER_PCIEFD_KCAN_TX_NPACKETS_REG);
                if (((tx_npackets >> KVASER_PCIEFD_KCAN_TX_NPACKETS_MAX_SHIFT) &
index 3752520a7d4b74a00bc47d1067300742fd02c58d..0c8d36bc668c8219ce134a7b7736bda771f5fcd1 100644 (file)
@@ -501,9 +501,6 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
        }
 
        while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
-               if (rxfs & RXFS_RFL)
-                       netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
-
                m_can_read_fifo(dev, rxfs);
 
                quota--;
@@ -876,7 +873,7 @@ static int m_can_rx_peripheral(struct net_device *dev)
 {
        struct m_can_classdev *cdev = netdev_priv(dev);
 
-       m_can_rx_handler(dev, 1);
+       m_can_rx_handler(dev, M_CAN_NAPI_WEIGHT);
 
        m_can_enable_all_interrupts(cdev);
 
index c1e5d5b570b65ea9e881a7fc74d15b3814a1c06e..538f4d9adb914c084ded6ba3ce69b87334c11ffc 100644 (file)
@@ -73,6 +73,7 @@ config CAN_KVASER_USB
            - Kvaser Memorator Pro 5xHS
            - Kvaser USBcan Light 4xHS
            - Kvaser USBcan Pro 2xHS v2
+           - Kvaser USBcan Pro 4xHS
            - Kvaser USBcan Pro 5xHS
            - Kvaser U100
            - Kvaser U100P
index 2b7efd296758d7cdd5ec83b7bd55ce527886289c..4e97da8434ab5bf815ad96b07c5da60eb78d0781 100644 (file)
@@ -86,8 +86,9 @@
 #define USB_U100_PRODUCT_ID                    273
 #define USB_U100P_PRODUCT_ID                   274
 #define USB_U100S_PRODUCT_ID                   275
+#define USB_USBCAN_PRO_4HS_PRODUCT_ID          276
 #define USB_HYDRA_PRODUCT_ID_END \
-       USB_U100S_PRODUCT_ID
+       USB_USBCAN_PRO_4HS_PRODUCT_ID
 
 static inline bool kvaser_is_leaf(const struct usb_device_id *id)
 {
@@ -193,6 +194,7 @@ static const struct usb_device_id kvaser_usb_table[] = {
        { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID) },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID) },
        { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID) },
+       { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
index a162499bcafc2796a047d87c30b2baab11644dbf..eb443721c58e78cdbf73f6db15353ffc033183be 100644 (file)
@@ -1105,13 +1105,6 @@ static int b53_setup(struct dsa_switch *ds)
                        b53_disable_port(ds, port);
        }
 
-       /* Let DSA handle the case were multiple bridges span the same switch
-        * device and different VLAN awareness settings are requested, which
-        * would be breaking filtering semantics for any of the other bridge
-        * devices. (not hardware supported)
-        */
-       ds->vlan_filtering_is_global = true;
-
        return b53_setup_devlink_resources(ds);
 }
 
@@ -2664,6 +2657,13 @@ struct b53_device *b53_switch_alloc(struct device *base,
        ds->ops = &b53_switch_ops;
        ds->untag_bridge_pvid = true;
        dev->vlan_enabled = true;
+       /* Let DSA handle the case were multiple bridges span the same switch
+        * device and different VLAN awareness settings are requested, which
+        * would be breaking filtering semantics for any of the other bridge
+        * devices. (not hardware supported)
+        */
+       ds->vlan_filtering_is_global = true;
+
        mutex_init(&dev->reg_mutex);
        mutex_init(&dev->stats_mutex);
 
index f277df922fcded6db853d155399e8354287ea299..ba5d546d06aa3629f7ec6c71c25d65bdf2fa615f 100644 (file)
@@ -114,7 +114,10 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
                /* Force link status for IMP port */
                reg = core_readl(priv, offset);
                reg |= (MII_SW_OR | LINK_STS);
-               reg &= ~GMII_SPEED_UP_2G;
+               if (priv->type == BCM4908_DEVICE_ID)
+                       reg |= GMII_SPEED_UP_2G;
+               else
+                       reg &= ~GMII_SPEED_UP_2G;
                core_writel(priv, reg, offset);
 
                /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
@@ -585,8 +588,10 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
         * in bits 15:8 and the patch level in bits 7:0 which is exactly what
         * the REG_PHY_REVISION register layout is.
         */
-
-       return priv->hw_params.gphy_rev;
+       if (priv->int_phy_mask & BIT(port))
+               return priv->hw_params.gphy_rev;
+       else
+               return 0;
 }
 
 static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
index f06f5fa2f898c115c4999cc793c8c91634f2acd0..9871d7cff93a85fd4ba04b7463a72cb3763c3a7d 100644 (file)
@@ -436,34 +436,32 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
                             TD_DM_DRVP(8) | TD_DM_DRVN(8));
 
        /* Setup core clock for MT7530 */
-       if (!trgint) {
-               /* Disable MT7530 core clock */
-               core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
-
-               /* Disable PLL, since phy_device has not yet been created
-                * provided for phy_[read,write]_mmd_indirect is called, we
-                * provide our own core_write_mmd_indirect to complete this
-                * function.
-                */
-               core_write_mmd_indirect(priv,
-                                       CORE_GSWPLL_GRP1,
-                                       MDIO_MMD_VEND2,
-                                       0);
-
-               /* Set core clock into 500Mhz */
-               core_write(priv, CORE_GSWPLL_GRP2,
-                          RG_GSWPLL_POSDIV_500M(1) |
-                          RG_GSWPLL_FBKDIV_500M(25));
+       /* Disable MT7530 core clock */
+       core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
 
-               /* Enable PLL */
-               core_write(priv, CORE_GSWPLL_GRP1,
-                          RG_GSWPLL_EN_PRE |
-                          RG_GSWPLL_POSDIV_200M(2) |
-                          RG_GSWPLL_FBKDIV_200M(32));
-
-               /* Enable MT7530 core clock */
-               core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
-       }
+       /* Disable PLL, since phy_device has not yet been created
+        * provided for phy_[read,write]_mmd_indirect is called, we
+        * provide our own core_write_mmd_indirect to complete this
+        * function.
+        */
+       core_write_mmd_indirect(priv,
+                               CORE_GSWPLL_GRP1,
+                               MDIO_MMD_VEND2,
+                               0);
+
+       /* Set core clock into 500Mhz */
+       core_write(priv, CORE_GSWPLL_GRP2,
+                  RG_GSWPLL_POSDIV_500M(1) |
+                  RG_GSWPLL_FBKDIV_500M(25));
+
+       /* Enable PLL */
+       core_write(priv, CORE_GSWPLL_GRP1,
+                  RG_GSWPLL_EN_PRE |
+                  RG_GSWPLL_POSDIV_200M(2) |
+                  RG_GSWPLL_FBKDIV_200M(32));
+
+       /* Enable MT7530 core clock */
+       core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
 
        /* Setup the MT7530 TRGMII Tx Clock */
        core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
index f8a168b73307c03b9de71ad9fefb8bc58a68bf4c..cb88ffb8f12fa7ef4202871f5d3d328b6c486338 100644 (file)
@@ -54,7 +54,7 @@ config B44_PCI
 config BCM4908_ENET
        tristate "Broadcom BCM4908 internal mac support"
        depends on ARCH_BCM4908 || COMPILE_TEST
-       default y
+       default y if ARCH_BCM4908
        help
          This driver supports Ethernet controller integrated into Broadcom
          BCM4908 family SoCs.
index 169e10c913780659bcd6e3128815a996a3896e3f..1115b8f9ea4e393180f6379b108917030e70dce2 100644 (file)
@@ -722,7 +722,7 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
                kvfree(tx_info);
                return 0;
        }
-       tx_info->open_state = false;
+       tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
        spin_unlock(&tx_info->lock);
 
        complete(&tx_info->completion);
index 88bfe21079386193c2397eaac860e49763daa5a6..04421aec2dfd61eb2c299cb27f500abbd1a4aba1 100644 (file)
@@ -1337,6 +1337,7 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
         */
        if (unlikely(priv->need_mac_restart)) {
                ftgmac100_start_hw(priv);
+               priv->need_mac_restart = false;
 
                /* Re-enable "bad" interrupts */
                iowrite32(FTGMAC100_INT_BAD,
index 88faf05e23baf0c76ce0a3cb5b84c8dac2faa803..0b1e890dd583bf6c796df7166e20d903f3a56f95 100644 (file)
@@ -899,6 +899,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
        } else {
                data &= ~IGP02E1000_PM_D0_LPLU;
                ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+               if (ret_val)
+                       return ret_val;
                /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
                 * during Dx states where the power conservation is most
                 * important.  During driver activity we should enable
index 69a2329ea463d7bfe428b7e39539e61921680999..db79c4e6413e5e611803363b4211709ce15ebade 100644 (file)
@@ -1,8 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright(c) 1999 - 2018 Intel Corporation. */
 
-#ifndef _E1000_HW_H_
-#define _E1000_HW_H_
+#ifndef _E1000E_HW_H_
+#define _E1000E_HW_H_
 
 #include "regs.h"
 #include "defines.h"
@@ -714,4 +714,4 @@ struct e1000_hw {
 #include "80003es2lan.h"
 #include "ich8lan.h"
 
-#endif
+#endif /* _E1000E_HW_H_ */
index e9b82c209c2df60534a2eef83fe5d8e230846554..a0948002ddf8584b59956755afadbf0cc3cdf8c3 100644 (file)
@@ -5974,15 +5974,19 @@ static void e1000_reset_task(struct work_struct *work)
        struct e1000_adapter *adapter;
        adapter = container_of(work, struct e1000_adapter, reset_task);
 
+       rtnl_lock();
        /* don't run the task if already down */
-       if (test_bit(__E1000_DOWN, &adapter->state))
+       if (test_bit(__E1000_DOWN, &adapter->state)) {
+               rtnl_unlock();
                return;
+       }
 
        if (!(adapter->flags & FLAG_RESTART_NOW)) {
                e1000e_dump(adapter);
                e_err("Reset adapter unexpectedly\n");
        }
        e1000e_reinit_locked(adapter);
+       rtnl_unlock();
 }
 
 /**
index 353deae139f9415fb2aa8441aa47556cba2aa2d9..17f3b800640e0d3024de1d02468849dc14c8a1bc 100644 (file)
@@ -3258,6 +3258,17 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
        return 0;
 }
 
+/**
+ * i40e_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
+{
+       return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
+}
+
 /**
  * i40e_configure_rx_ring - Configure a receive ring context
  * @ring: The Rx ring to configure
@@ -3369,6 +3380,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        else
                set_ring_build_skb_enabled(ring);
 
+       ring->rx_offset = i40e_rx_offset(ring);
+
        /* cache tail for quicker writes, and clear the reg before use */
        ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
        writel(0, ring->tail);
index 627794b31e33398ad276e76322ad04568993733e..5747a99122fb4b49863efaeac49fcbc2d9cec021 100644 (file)
@@ -1569,17 +1569,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
        }
 }
 
-/**
- * i40e_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int i40e_rx_offset(struct i40e_ring *rx_ring)
-{
-       return ring_uses_build_skb(rx_ring) ? I40E_SKB_PAD : 0;
-}
-
 /**
  * i40e_setup_rx_descriptors - Allocate Rx descriptors
  * @rx_ring: Rx descriptor ring (for a specific queue) to setup
@@ -1608,7 +1597,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
        rx_ring->next_to_alloc = 0;
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
-       rx_ring->rx_offset = i40e_rx_offset(rx_ring);
 
        /* XDP RX-queue info only needed for RX rings exposed to XDP */
        if (rx_ring->vsi->type == I40E_VSI_MAIN) {
index 3124a3bf519a826b6f59e1f1fdd12ae599307e1b..1148d768f8ed1bf48b615a025862dd8d57f88f6d 100644 (file)
@@ -274,6 +274,22 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
        tlan_ctx->legacy_int = ICE_TX_LEGACY;
 }
 
+/**
+ * ice_rx_offset - Return expected offset into page to access data
+ * @rx_ring: Ring we are requesting offset of
+ *
+ * Returns the offset value for ring into the data buffer.
+ */
+static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
+{
+       if (ice_ring_uses_build_skb(rx_ring))
+               return ICE_SKB_PAD;
+       else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
+               return XDP_PACKET_HEADROOM;
+
+       return 0;
+}
+
 /**
  * ice_setup_rx_ctx - Configure a receive ring context
  * @ring: The Rx ring to configure
@@ -413,11 +429,15 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
        else
                ice_set_ring_build_skb_ena(ring);
 
+       ring->rx_offset = ice_rx_offset(ring);
+
        /* init queue specific tail register */
        ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
        writel(0, ring->tail);
 
        if (ring->xsk_pool) {
+               bool ok;
+
                if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
                        dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
                                 num_bufs, ring->q_index);
@@ -426,8 +446,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
                        return 0;
                }
 
-               err = ice_alloc_rx_bufs_zc(ring, num_bufs);
-               if (err)
+               ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
+               if (!ok)
                        dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
                                 ring->q_index, pf_q);
                return 0;
index b7dc25da1202af4576a1c75ec28d768787740350..b91dcfd12727df31963a648101f1a6bef1ebcca1 100644 (file)
@@ -443,22 +443,6 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
        }
 }
 
-/**
- * ice_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
-{
-       if (ice_ring_uses_build_skb(rx_ring))
-               return ICE_SKB_PAD;
-       else if (ice_is_xdp_ena_vsi(rx_ring->vsi))
-               return XDP_PACKET_HEADROOM;
-
-       return 0;
-}
-
 /**
  * ice_setup_rx_ring - Allocate the Rx descriptors
  * @rx_ring: the Rx ring to set up
@@ -493,7 +477,6 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
 
        rx_ring->next_to_use = 0;
        rx_ring->next_to_clean = 0;
-       rx_ring->rx_offset = ice_rx_offset(rx_ring);
 
        if (ice_is_xdp_ena_vsi(rx_ring->vsi))
                WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
index 83f3c9574ed1ebde51eeb8b090584e771adc603f..9f94d9159acde05604601af0aaa6292d8af42944 100644 (file)
@@ -358,18 +358,18 @@ xsk_pool_if_up:
  * This function allocates a number of Rx buffers from the fill ring
  * or the internal recycle mechanism and places them on the Rx ring.
  *
- * Returns false if all allocations were successful, true if any fail.
+ * Returns true if all allocations were successful, false if any fail.
  */
 bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
 {
        union ice_32b_rx_flex_desc *rx_desc;
        u16 ntu = rx_ring->next_to_use;
        struct ice_rx_buf *rx_buf;
-       bool ret = false;
+       bool ok = true;
        dma_addr_t dma;
 
        if (!count)
-               return false;
+               return true;
 
        rx_desc = ICE_RX_DESC(rx_ring, ntu);
        rx_buf = &rx_ring->rx_buf[ntu];
@@ -377,7 +377,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
        do {
                rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
                if (!rx_buf->xdp) {
-                       ret = true;
+                       ok = false;
                        break;
                }
 
@@ -402,7 +402,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
                ice_release_rx_desc(rx_ring, ntu);
        }
 
-       return ret;
+       return ok;
 }
 
 /**
index 5d87957b2627c06561f978e84c76a82eb9c5cac3..44111f65afc759fb5a8b76119b2ed97a28c2d9a5 100644 (file)
@@ -1,8 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright(c) 2007 - 2018 Intel Corporation. */
 
-#ifndef _E1000_HW_H_
-#define _E1000_HW_H_
+#ifndef _E1000_IGB_HW_H_
+#define _E1000_IGB_HW_H_
 
 #include <linux/types.h>
 #include <linux/delay.h>
@@ -551,4 +551,4 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
 
 void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
 void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
-#endif /* _E1000_HW_H_ */
+#endif /* _E1000_IGB_HW_H_ */
index aaa954aae574487127197967897a71a5944d971d..7bda8c5edea5dcee1bb0016a118c52c92c1428e7 100644 (file)
@@ -748,8 +748,8 @@ void igb_ptp_suspend(struct igb_adapter *adapter);
 void igb_ptp_rx_hang(struct igb_adapter *adapter);
 void igb_ptp_tx_hang(struct igb_adapter *adapter);
 void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
-                        struct sk_buff *skb);
+int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+                       struct sk_buff *skb);
 int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
 void igb_set_flag_queue_pairs(struct igb_adapter *, const u32);
index 878b31d534ec436a88269b9742b4899c653cea1b..a45cd2b416c897729dae08ef969b4745894b9488 100644 (file)
@@ -8214,7 +8214,8 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
        new_buff->pagecnt_bias  = old_buff->pagecnt_bias;
 }
 
-static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
+static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
+                                 int rx_buf_pgcnt)
 {
        unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
        struct page *page = rx_buffer->page;
@@ -8225,7 +8226,7 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
 
 #if (PAGE_SIZE < 8192)
        /* if we are only owner of page we can reuse it */
-       if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
+       if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
                return false;
 #else
 #define IGB_LAST_OFFSET \
@@ -8301,9 +8302,10 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
                return NULL;
 
        if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
-               igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb);
-               xdp->data += IGB_TS_HDR_LEN;
-               size -= IGB_TS_HDR_LEN;
+               if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) {
+                       xdp->data += IGB_TS_HDR_LEN;
+                       size -= IGB_TS_HDR_LEN;
+               }
        }
 
        /* Determine available headroom for copy */
@@ -8364,8 +8366,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
 
        /* pull timestamp out of packet data */
        if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
-               igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
-               __skb_pull(skb, IGB_TS_HDR_LEN);
+               if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb))
+                       __skb_pull(skb, IGB_TS_HDR_LEN);
        }
 
        /* update buffer offset */
@@ -8614,11 +8616,17 @@ static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
 }
 
 static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
-                                              const unsigned int size)
+                                              const unsigned int size, int *rx_buf_pgcnt)
 {
        struct igb_rx_buffer *rx_buffer;
 
        rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+       *rx_buf_pgcnt =
+#if (PAGE_SIZE < 8192)
+               page_count(rx_buffer->page);
+#else
+               0;
+#endif
        prefetchw(rx_buffer->page);
 
        /* we are reusing so sync this buffer for CPU use */
@@ -8634,9 +8642,9 @@ static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
 }
 
 static void igb_put_rx_buffer(struct igb_ring *rx_ring,
-                             struct igb_rx_buffer *rx_buffer)
+                             struct igb_rx_buffer *rx_buffer, int rx_buf_pgcnt)
 {
-       if (igb_can_reuse_rx_page(rx_buffer)) {
+       if (igb_can_reuse_rx_page(rx_buffer, rx_buf_pgcnt)) {
                /* hand second half of page back to the ring */
                igb_reuse_rx_page(rx_ring, rx_buffer);
        } else {
@@ -8664,6 +8672,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
        unsigned int xdp_xmit = 0;
        struct xdp_buff xdp;
        u32 frame_sz = 0;
+       int rx_buf_pgcnt;
 
        /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
 #if (PAGE_SIZE < 8192)
@@ -8693,7 +8702,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                 */
                dma_rmb();
 
-               rx_buffer = igb_get_rx_buffer(rx_ring, size);
+               rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt);
 
                /* retrieve a buffer from the ring */
                if (!skb) {
@@ -8736,7 +8745,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
                        break;
                }
 
-               igb_put_rx_buffer(rx_ring, rx_buffer);
+               igb_put_rx_buffer(rx_ring, rx_buffer, rx_buf_pgcnt);
                cleaned_count++;
 
                /* fetch next buffer in frame if non-eop */
index 7cc5428c3b3d2562034ffa54da740acd3234dec9..86a576201f5ff9e4fa3bf0252ea9852ffb8ca0bd 100644 (file)
@@ -856,6 +856,9 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
        dev_kfree_skb_any(skb);
 }
 
+#define IGB_RET_PTP_DISABLED 1
+#define IGB_RET_PTP_INVALID 2
+
 /**
  * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
  * @q_vector: Pointer to interrupt specific structure
@@ -864,19 +867,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
  *
  * This function is meant to retrieve a timestamp from the first buffer of an
  * incoming frame.  The value is stored in little endian format starting on
- * byte 8.
+ * byte 8
+ *
+ * Returns: 0 if success, nonzero if failure
  **/
-void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
-                        struct sk_buff *skb)
+int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
+                       struct sk_buff *skb)
 {
-       __le64 *regval = (__le64 *)va;
        struct igb_adapter *adapter = q_vector->adapter;
+       __le64 *regval = (__le64 *)va;
        int adjust = 0;
 
+       if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
+               return IGB_RET_PTP_DISABLED;
+
        /* The timestamp is recorded in little endian format.
         * DWORD: 0        1        2        3
         * Field: Reserved Reserved SYSTIML  SYSTIMH
         */
+
+       /* check reserved dwords are zero, be/le doesn't matter for zero */
+       if (regval[0])
+               return IGB_RET_PTP_INVALID;
+
        igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
                                   le64_to_cpu(regval[1]));
 
@@ -896,6 +909,8 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
        }
        skb_hwtstamps(skb)->hwtstamp =
                ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+
+       return 0;
 }
 
 /**
@@ -906,13 +921,15 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va,
  * This function is meant to retrieve a timestamp from the internal registers
  * of the adapter and store it in the skb.
  **/
-void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
-                        struct sk_buff *skb)
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
 {
        struct igb_adapter *adapter = q_vector->adapter;
        struct e1000_hw *hw = &adapter->hw;
-       u64 regval;
        int adjust = 0;
+       u64 regval;
+
+       if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
+               return;
 
        /* If this bit is set, then the RX registers contain the time stamp. No
         * other packet will be time stamped until we read these registers, so
index 5d2809dfd06a46bee9811a5b45374ae51e8895ac..1b08a7dc7bc4a57d28003e0a3c9ef9c5ef6213fb 100644 (file)
@@ -547,7 +547,7 @@ void igc_ptp_init(struct igc_adapter *adapter);
 void igc_ptp_reset(struct igc_adapter *adapter);
 void igc_ptp_suspend(struct igc_adapter *adapter);
 void igc_ptp_stop(struct igc_adapter *adapter);
-void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
+void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
                         struct sk_buff *skb);
 int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
 int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
index 824a6c454bca362ab5b49e1e3a6678c496d21081..8722294ab90ce9131b17b4a519d87dc94d543a23 100644 (file)
@@ -1711,6 +1711,9 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
                                                     Autoneg);
        }
 
+       /* Set pause flow control settings */
+       ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+
        switch (hw->fc.requested_mode) {
        case igc_fc_full:
                ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
@@ -1725,9 +1728,7 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
                                                     Asym_Pause);
                break;
        default:
-               ethtool_link_ksettings_add_link_mode(cmd, advertising, Pause);
-               ethtool_link_ksettings_add_link_mode(cmd, advertising,
-                                                    Asym_Pause);
+               break;
        }
 
        status = pm_runtime_suspended(&adapter->pdev->dev) ?
index 7ac9597ddb84507c32bbd9ba5aed2561fb77a734..4d989ebc971342f5329f290768dd53ee2a5d2fbb 100644 (file)
@@ -3831,10 +3831,19 @@ static void igc_reset_task(struct work_struct *work)
 
        adapter = container_of(work, struct igc_adapter, reset_task);
 
+       rtnl_lock();
+       /* If we're already down or resetting, just bail */
+       if (test_bit(__IGC_DOWN, &adapter->state) ||
+           test_bit(__IGC_RESETTING, &adapter->state)) {
+               rtnl_unlock();
+               return;
+       }
+
        igc_rings_dump(adapter);
        igc_regs_dump(adapter);
        netdev_err(adapter->netdev, "Reset adapter\n");
        igc_reinit_locked(adapter);
+       rtnl_unlock();
 }
 
 /**
index ac0b9c85da7ca8b7b2260f7c9bc7c7b09bcbe008..545f4d0e67cf44469455c44a00635be88c49b0a8 100644 (file)
@@ -152,46 +152,54 @@ static void igc_ptp_systim_to_hwtstamp(struct igc_adapter *adapter,
 }
 
 /**
- * igc_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+ * igc_ptp_rx_pktstamp - Retrieve timestamp from Rx packet buffer
  * @q_vector: Pointer to interrupt specific structure
  * @va: Pointer to address containing Rx buffer
  * @skb: Buffer containing timestamp and packet
  *
- * This function is meant to retrieve the first timestamp from the
- * first buffer of an incoming frame. The value is stored in little
- * endian format starting on byte 0. There's a second timestamp
- * starting on byte 8.
- **/
-void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
+ * This function retrieves the timestamp saved in the beginning of packet
+ * buffer. While two timestamps are available, one in timer0 reference and the
+ * other in timer1 reference, this function considers only the timestamp in
+ * timer0 reference.
+ */
+void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, __le32 *va,
                         struct sk_buff *skb)
 {
        struct igc_adapter *adapter = q_vector->adapter;
-       __le64 *regval = (__le64 *)va;
-       int adjust = 0;
-
-       /* The timestamp is recorded in little endian format.
-        * DWORD: | 0          | 1           | 2          | 3
-        * Field: | Timer0 Low | Timer0 High | Timer1 Low | Timer1 High
+       u64 regval;
+       int adjust;
+
+       /* Timestamps are saved in little endian at the beginning of the packet
+        * buffer following the layout:
+        *
+        * DWORD: | 0              | 1              | 2              | 3              |
+        * Field: | Timer1 SYSTIML | Timer1 SYSTIMH | Timer0 SYSTIML | Timer0 SYSTIMH |
+        *
+        * SYSTIML holds the nanoseconds part while SYSTIMH holds the seconds
+        * part of the timestamp.
         */
-       igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
-                                  le64_to_cpu(regval[0]));
-
-       /* adjust timestamp for the RX latency based on link speed */
-       if (adapter->hw.mac.type == igc_i225) {
-               switch (adapter->link_speed) {
-               case SPEED_10:
-                       adjust = IGC_I225_RX_LATENCY_10;
-                       break;
-               case SPEED_100:
-                       adjust = IGC_I225_RX_LATENCY_100;
-                       break;
-               case SPEED_1000:
-                       adjust = IGC_I225_RX_LATENCY_1000;
-                       break;
-               case SPEED_2500:
-                       adjust = IGC_I225_RX_LATENCY_2500;
-                       break;
-               }
+       regval = le32_to_cpu(va[2]);
+       regval |= (u64)le32_to_cpu(va[3]) << 32;
+       igc_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+
+       /* Adjust timestamp for the RX latency based on link speed */
+       switch (adapter->link_speed) {
+       case SPEED_10:
+               adjust = IGC_I225_RX_LATENCY_10;
+               break;
+       case SPEED_100:
+               adjust = IGC_I225_RX_LATENCY_100;
+               break;
+       case SPEED_1000:
+               adjust = IGC_I225_RX_LATENCY_1000;
+               break;
+       case SPEED_2500:
+               adjust = IGC_I225_RX_LATENCY_2500;
+               break;
+       default:
+               adjust = 0;
+               netdev_warn_once(adapter->netdev, "Imprecise timestamp\n");
+               break;
        }
        skb_hwtstamps(skb)->hwtstamp =
                ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
index 9f3f12e2ccf231a7e9002d89096ab86e4e74d050..03d9aad516d4e3c3a8005b40c521f3882d7694b5 100644 (file)
@@ -4118,6 +4118,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
 #endif
        }
 
+       ring->rx_offset = ixgbe_rx_offset(ring);
+
        if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
                u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
 
@@ -6578,7 +6580,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
-       rx_ring->rx_offset = ixgbe_rx_offset(rx_ring);
 
        /* XDP RX-queue info */
        if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, adapter->netdev,
index 7fe15a3286f4a6e2e93df98ff0c50e5b8429d886..fe0989c0fc254be7386f3206d633fcb6d81e0d1f 100644 (file)
@@ -6,7 +6,7 @@
 config NET_VENDOR_MARVELL
        bool "Marvell devices"
        default y
-       depends on PCI || CPU_PXA168 || MV64X60 || PPC32 || PLAT_ORION || INET || COMPILE_TEST
+       depends on PCI || CPU_PXA168 || PPC32 || PLAT_ORION || INET || COMPILE_TEST
        help
          If you have a network (Ethernet) card belonging to this class, say Y.
 
@@ -19,7 +19,7 @@ if NET_VENDOR_MARVELL
 
 config MV643XX_ETH
        tristate "Marvell Discovery (643XX) and Orion ethernet support"
-       depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST
+       depends on PPC32 || PLAT_ORION || COMPILE_TEST
        depends on INET
        select PHYLIB
        select MVMDIO
index 90e6111ce534dba7f16de983549ade748890124c..3bfb659b5c99f91968bfe0a12eed7a737cb95fcf 100644 (file)
@@ -2684,7 +2684,7 @@ static const struct of_device_id mv643xx_eth_shared_ids[] = {
 MODULE_DEVICE_TABLE(of, mv643xx_eth_shared_ids);
 #endif
 
-#if defined(CONFIG_OF_IRQ) && !defined(CONFIG_MV64X60)
+#ifdef CONFIG_OF_IRQ
 #define mv643xx_eth_property(_np, _name, _v)                           \
        do {                                                            \
                u32 tmp;                                                \
index b192692b4fc4b7bd3e598446c7c3a193c9fcb7d6..5c372d2c24a167ca9298b15b22b7ab94085c43b8 100644 (file)
@@ -13499,8 +13499,6 @@ static struct npc_mcam_kex npc_mkex_default = {
                        [NPC_LT_LC_IP] = {
                                /* SIP+DIP: 8 bytes, KW2[63:0] */
                                KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
-                               /* TOS: 1 byte, KW1[63:56] */
-                               KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
                        },
                        /* Layer C: IPv6 */
                        [NPC_LT_LC_IP6] = {
index d9a1a71c7ccc94f906d0d1d8627f3b19d751a8f5..ab24a5e8ee8a8d5ce95356b2048c1b18222b13ed 100644 (file)
@@ -2462,8 +2462,10 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
                    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
 
        for (irq = 0; irq < rvu->num_vec; irq++) {
-               if (rvu->irq_allocated[irq])
+               if (rvu->irq_allocated[irq]) {
                        free_irq(pci_irq_vector(rvu->pdev, irq), rvu);
+                       rvu->irq_allocated[irq] = false;
+               }
        }
 
        pci_free_irq_vectors(rvu->pdev);
@@ -2975,8 +2977,8 @@ static void rvu_remove(struct pci_dev *pdev)
        struct rvu *rvu = pci_get_drvdata(pdev);
 
        rvu_dbg_exit(rvu);
-       rvu_unregister_interrupts(rvu);
        rvu_unregister_dl(rvu);
+       rvu_unregister_interrupts(rvu);
        rvu_flr_wq_destroy(rvu);
        rvu_cgx_exit(rvu);
        rvu_fwdata_exit(rvu);
index fa6e46e36ae4df8a11b49b20788451a51dd42b4c..76f399229ddb7f10c6196e55e8522548ad070fe9 100644 (file)
@@ -678,6 +678,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
                         u8 *intf, u8 *ena);
 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
 u32  rvu_cgx_get_fifolen(struct rvu *rvu);
+void *rvu_first_cgx_pdata(struct rvu *rvu);
 
 /* CPT APIs */
 int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
index e668e482383aaef0a908195620653f1cec261763..6e2bf4fcd29cfc0fbc9b967c9f13f435c92a05b2 100644 (file)
@@ -89,6 +89,21 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
        return rvu->cgx_idmap[cgx_id];
 }
 
+/* Return first enabled CGX instance if none are enabled then return NULL */
+void *rvu_first_cgx_pdata(struct rvu *rvu)
+{
+       int first_enabled_cgx = 0;
+       void *cgxd = NULL;
+
+       for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
+               cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
+               if (cgxd)
+                       break;
+       }
+
+       return cgxd;
+}
+
 /* Based on P2X connectivity find mapped NIX block for a PF */
 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
                                  int cgx_id, int lmac_id)
@@ -711,10 +726,9 @@ int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
 {
        struct mac_ops *mac_ops;
-       int rvu_def_cgx_id = 0;
        u32 fifo_len;
 
-       mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
+       mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
        fifo_len = mac_ops ? mac_ops->fifo_len : 0;
 
        return fifo_len;
index aa2ca8780b9c7f91047e65f29fb97bef5cca2181..de3968d2e5ce72aa9e492fe61537bbec038296d4 100644 (file)
@@ -234,12 +234,14 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
                                          char __user *buffer,
                                          size_t count, loff_t *ppos)
 {
-       int index, off = 0, flag = 0, go_back = 0, off_prev;
+       int index, off = 0, flag = 0, go_back = 0, len = 0;
        struct rvu *rvu = filp->private_data;
        int lf, pf, vf, pcifunc;
        struct rvu_block block;
        int bytes_not_copied;
+       int lf_str_size = 12;
        int buf_size = 2048;
+       char *lfs;
        char *buf;
 
        /* don't allow partial reads */
@@ -249,12 +251,20 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
        buf = kzalloc(buf_size, GFP_KERNEL);
        if (!buf)
                return -ENOSPC;
-       off +=  scnprintf(&buf[off], buf_size - 1 - off, "\npcifunc\t\t");
+
+       lfs = kzalloc(lf_str_size, GFP_KERNEL);
+       if (!lfs) {
+               kfree(buf);
+               return -ENOMEM;
+       }
+       off +=  scnprintf(&buf[off], buf_size - 1 - off, "%-*s", lf_str_size,
+                         "pcifunc");
        for (index = 0; index < BLK_COUNT; index++)
-               if (strlen(rvu->hw->block[index].name))
-                       off +=  scnprintf(&buf[off], buf_size - 1 - off,
-                                         "%*s\t", (index - 1) * 2,
-                                         rvu->hw->block[index].name);
+               if (strlen(rvu->hw->block[index].name)) {
+                       off += scnprintf(&buf[off], buf_size - 1 - off,
+                                        "%-*s", lf_str_size,
+                                        rvu->hw->block[index].name);
+               }
        off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
        for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
                for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
@@ -263,14 +273,15 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
                                continue;
 
                        if (vf) {
+                               sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
                                go_back = scnprintf(&buf[off],
                                                    buf_size - 1 - off,
-                                                   "PF%d:VF%d\t\t", pf,
-                                                   vf - 1);
+                                                   "%-*s", lf_str_size, lfs);
                        } else {
+                               sprintf(lfs, "PF%d", pf);
                                go_back = scnprintf(&buf[off],
                                                    buf_size - 1 - off,
-                                                   "PF%d\t\t", pf);
+                                                   "%-*s", lf_str_size, lfs);
                        }
 
                        off += go_back;
@@ -278,20 +289,22 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
                                block = rvu->hw->block[index];
                                if (!strlen(block.name))
                                        continue;
-                               off_prev = off;
+                               len = 0;
+                               lfs[len] = '\0';
                                for (lf = 0; lf < block.lf.max; lf++) {
                                        if (block.fn_map[lf] != pcifunc)
                                                continue;
                                        flag = 1;
-                                       off += scnprintf(&buf[off], buf_size - 1
-                                                       - off, "%3d,", lf);
+                                       len += sprintf(&lfs[len], "%d,", lf);
                                }
-                               if (flag && off_prev != off)
-                                       off--;
-                               else
-                                       go_back++;
+
+                               if (flag)
+                                       len--;
+                               lfs[len] = '\0';
                                off += scnprintf(&buf[off], buf_size - 1 - off,
-                                               "\t");
+                                                "%-*s", lf_str_size, lfs);
+                               if (!strlen(lfs))
+                                       go_back += lf_str_size;
                        }
                        if (!flag)
                                off -= go_back;
@@ -303,6 +316,7 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
        }
 
        bytes_not_copied = copy_to_user(buffer, buf, off);
+       kfree(lfs);
        kfree(buf);
 
        if (bytes_not_copied)
@@ -319,7 +333,6 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
        struct rvu *rvu = filp->private;
        struct pci_dev *pdev = NULL;
        struct mac_ops *mac_ops;
-       int rvu_def_cgx_id = 0;
        char cgx[10], lmac[10];
        struct rvu_pfvf *pfvf;
        int pf, domain, blkid;
@@ -327,7 +340,10 @@ static int rvu_dbg_rvu_pf_cgx_map_display(struct seq_file *filp, void *unused)
        u16 pcifunc;
 
        domain = 2;
-       mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
+       mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
+       /* There can be no CGX devices at all */
+       if (!mac_ops)
+               return 0;
        seq_printf(filp, "PCI dev\t\tRVU PF Func\tNIX block\t%s\tLMAC\n",
                   mac_ops->name);
        for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
@@ -1818,7 +1834,6 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
 {
        struct mac_ops *mac_ops;
        unsigned long lmac_bmap;
-       int rvu_def_cgx_id = 0;
        int i, lmac_id;
        char dname[20];
        void *cgx;
@@ -1826,7 +1841,7 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
        if (!cgx_get_cgxcnt_max())
                return;
 
-       mac_ops = get_mac_ops(rvu_cgx_pdata(rvu_def_cgx_id, rvu));
+       mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
        if (!mac_ops)
                return;
 
index d3000194e2d333e32df0ac94831d3f15b1c966e0..3d068b7d46bd60d0cac9077cffd86a43f029823a 100644 (file)
@@ -2629,7 +2629,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
        struct nix_rx_flowkey_alg *field;
        struct nix_rx_flowkey_alg tmp;
        u32 key_type, valid_key;
-       int l4_key_offset;
+       int l4_key_offset = 0;
 
        if (!alg)
                return -EINVAL;
index 04bb0803a5c506fd2f9445c37d9882c6dc9f4669..0bd49c7080a62574f21d8a39c2c4a5f0b7782dd4 100644 (file)
@@ -2490,10 +2490,10 @@ int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu,
                index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
                if (index >= mcam->bmap_entries)
                        break;
+               entry = index + 1;
                if (mcam->entry2cntr_map[index] != req->cntr)
                        continue;
 
-               entry = index + 1;
                npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
                                              index, req->cntr);
        }
index 0dbbf38e059759763cb51ba0b677de53581d64b4..dc17784209785953786974e30d574d35927a34b8 100644 (file)
@@ -257,17 +257,19 @@ int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
                       u32 *rule_locs)
 {
+       u32 rule_cnt = nfc->rule_cnt;
        u32 location = 0;
        int idx = 0;
        int err = 0;
 
        nfc->data = pfvf->flow_cfg->ntuple_max_flows;
-       while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) {
+       while ((!err || err == -ENOENT) && idx < rule_cnt) {
                err = otx2_get_flow(pfvf, nfc, location);
                if (!err)
                        rule_locs[idx++] = location;
                location++;
        }
+       nfc->rule_cnt = rule_cnt;
 
        return err;
 }
index 53ab1814d74b30f30ef881526e49a46cc5483083..2fd3d235d292230a6778f97ca27301d735d1222a 100644 (file)
@@ -1672,6 +1672,7 @@ int otx2_stop(struct net_device *netdev)
        struct otx2_nic *pf = netdev_priv(netdev);
        struct otx2_cq_poll *cq_poll = NULL;
        struct otx2_qset *qset = &pf->qset;
+       struct otx2_rss_info *rss;
        int qidx, vec, wrk;
 
        netif_carrier_off(netdev);
@@ -1684,6 +1685,10 @@ int otx2_stop(struct net_device *netdev)
        /* First stop packet Rx/Tx */
        otx2_rxtx_enable(pf, false);
 
+       /* Clear RSS enable flag */
+       rss = &pf->hw.rss_info;
+       rss->enable = false;
+
        /* Cleanup Queue IRQ */
        vec = pci_irq_vector(pf->pdev,
                             pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
index d1e4d42e497d8f37d8f5006861ceba13ac3e4f3a..3712e1786091f90c2f51ec9d0e5acc4fd39c924f 100644 (file)
@@ -1544,8 +1544,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
        clk_disable_unprepare(pep->clk);
        mdiobus_unregister(pep->smi_bus);
        mdiobus_free(pep->smi_bus);
-       unregister_netdev(dev);
        cancel_work_sync(&pep->tx_timeout_task);
+       unregister_netdev(dev);
        free_netdev(dev);
        return 0;
 }
index 7435fe6829b6bf3d8ae6e25b4a41373d47e40dc1..304b296fe8b989210824b62606bd934b0972c9c4 100644 (file)
@@ -92,14 +92,15 @@ struct page_pool;
                                    MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
 #define MLX5_MPWRQ_PAGES_PER_WQE               BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
 
-#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
+#define MLX5_ALIGN_MTTS(mtts)          (ALIGN(mtts, 8))
+#define MLX5_ALIGNED_MTTS_OCTW(mtts)   ((mtts) / 2)
+#define MLX5_MTT_OCTW(mtts)            (MLX5_ALIGNED_MTTS_OCTW(MLX5_ALIGN_MTTS(mtts)))
 /* Add another page to MLX5E_REQUIRED_WQE_MTTS as a buffer between
  * WQEs, This page will absorb write overflow by the hardware, when
  * receiving packets larger than MTU. These oversize packets are
  * dropped by the driver at a later stage.
  */
-#define MLX5E_REQUIRED_WQE_MTTS                (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE + 1, 8))
-#define MLX5E_LOG_ALIGNED_MPWQE_PPW    (ilog2(MLX5E_REQUIRED_WQE_MTTS))
+#define MLX5E_REQUIRED_WQE_MTTS                (MLX5_ALIGN_MTTS(MLX5_MPWRQ_PAGES_PER_WQE + 1))
 #define MLX5E_REQUIRED_MTTS(wqes)      (wqes * MLX5E_REQUIRED_WQE_MTTS)
 #define MLX5E_MAX_RQ_NUM_MTTS  \
        ((1 << 16) * 2) /* So that MLX5_MTT_OCTW(num_mtts) fits into u16 */
index f3f6eb0819489410aa0039e0262016f69eea4ac3..b2cd29847a371ee3dfb4b3b74a2d1154c88ab535 100644 (file)
@@ -1181,7 +1181,8 @@ int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
 
        mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG,
                                        &ctstate, &ctstate_mask);
-       if (ctstate_mask)
+
+       if ((ctstate & ctstate_mask) == MLX5_CT_STATE_TRK_BIT)
                return -EOPNOTSUPP;
 
        ctstate_mask |= MLX5_CT_STATE_TRK_BIT;
index f8075a604605d15be8959bc6a148a3c994b003ce..172e0474f2e6e0bdd76519d5bb609c623607a0fa 100644 (file)
@@ -685,14 +685,14 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
        u16 vport_num;
        int err = 0;
 
-       if (flow_attr->ip_version == 4) {
+       if (flow_attr->tun_ip_version == 4) {
                /* Addresses are swapped for decap */
                attr.fl.fl4.saddr = esw_attr->rx_tun_attr->dst_ip.v4;
                attr.fl.fl4.daddr = esw_attr->rx_tun_attr->src_ip.v4;
                err = mlx5e_route_lookup_ipv4_get(priv, priv->netdev, &attr);
        }
 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
-       else if (flow_attr->ip_version == 6) {
+       else if (flow_attr->tun_ip_version == 6) {
                /* Addresses are swapped for decap */
                attr.fl.fl6.saddr = esw_attr->rx_tun_attr->dst_ip.v6;
                attr.fl.fl6.daddr = esw_attr->rx_tun_attr->src_ip.v6;
@@ -718,10 +718,10 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
        esw_attr->rx_tun_attr->decap_vport = vport_num;
 
 out:
-       if (flow_attr->ip_version == 4)
+       if (flow_attr->tun_ip_version == 4)
                mlx5e_route_lookup_ipv4_put(&attr);
 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
-       else if (flow_attr->ip_version == 6)
+       else if (flow_attr->tun_ip_version == 6)
                mlx5e_route_lookup_ipv6_put(&attr);
 #endif
        return err;
index 6a116335bb21c1b5aece90bbba1899a4a3e939e3..7f7b0f6dcdf954f87d5681454f9082c063fac387 100644 (file)
@@ -89,6 +89,7 @@ int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
         * required to establish routing.
         */
        flow_flag_set(flow, TUN_RX);
+       flow->attr->tun_ip_version = ip_version;
        return 0;
 }
 
@@ -1091,7 +1092,7 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
        if (err || !esw_attr->rx_tun_attr->decap_vport)
                goto out;
 
-       key.ip_version = attr->ip_version;
+       key.ip_version = attr->tun_ip_version;
        if (key.ip_version == 4)
                key.endpoint_ip.v4 = esw_attr->rx_tun_attr->dst_ip.v4;
        else
index e472ed0eacfbc72b6e9e15a7bc7539a6bbe57df7..7ed3f9f79f11ac95ce86f9b1c8630b43c689d2e7 100644 (file)
@@ -227,6 +227,10 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
        option_key = (struct geneve_opt *)&enc_opts.key->data[0];
        option_mask = (struct geneve_opt *)&enc_opts.mask->data[0];
 
+       if (option_mask->opt_class == 0 && option_mask->type == 0 &&
+           !memchr_inv(option_mask->opt_data, 0, option_mask->length * 4))
+               return 0;
+
        if (option_key->length > max_tlv_option_data_len) {
                NL_SET_ERR_MSG_MOD(extack,
                                   "Matching on GENEVE options: unsupported option len");
index abdf721bb264927f9e941f625d90182804cccfff..f5f2a8fd004695def84fde768e7268ec297fb4bd 100644 (file)
@@ -1887,6 +1887,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5_core_dev *mdev = priv->mdev;
+       int err;
 
        if (!MLX5_CAP_GEN(mdev, cqe_compression))
                return -EOPNOTSUPP;
@@ -1896,7 +1897,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
                return -EINVAL;
        }
 
-       mlx5e_modify_rx_cqe_compression_locked(priv, enable);
+       err = mlx5e_modify_rx_cqe_compression_locked(priv, enable);
+       if (err)
+               return err;
+
        priv->channels.params.rx_cqe_compress_def = enable;
 
        return 0;
@@ -2014,8 +2018,13 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
         */
 
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+               struct mlx5e_params old_params;
+
+               old_params = priv->channels.params;
                priv->channels.params = new_channels.params;
                err = mlx5e_num_channels_changed(priv);
+               if (err)
+                       priv->channels.params = old_params;
                goto out;
        }
 
index ec2fcb2a297755f2325e160abb1408aa6205b23e..158f947a85031ea0147e5d50f8c259813f87733a 100644 (file)
@@ -334,9 +334,9 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
                                     rq->wqe_overflow.addr);
 }
 
-static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
+static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
 {
-       return (wqe_ix << MLX5E_LOG_ALIGNED_MPWQE_PPW) << PAGE_SHIFT;
+       return MLX5E_REQUIRED_MTTS(wqe_ix) << PAGE_SHIFT;
 }
 
 static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
@@ -577,7 +577,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
                                mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i);
                        u32 byte_count =
                                rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
-                       u64 dma_offset = mlx5e_get_mpwqe_offset(rq, i);
+                       u64 dma_offset = mlx5e_get_mpwqe_offset(i);
 
                        wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
                        wqe->data[0].byte_count = cpu_to_be32(byte_count);
@@ -2368,8 +2368,9 @@ static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
 {
        switch (params->rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               return order_base_2(MLX5E_UMR_WQEBBS) +
-                       mlx5e_get_rq_log_wq_sz(rqp->rqc);
+               return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
+                            order_base_2(MLX5E_UMR_WQEBBS) +
+                            mlx5e_get_rq_log_wq_sz(rqp->rqc));
        default: /* MLX5_WQ_TYPE_CYCLIC */
                return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
        }
@@ -2502,8 +2503,10 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
 {
        int i;
 
-       if (chs->port_ptp)
+       if (chs->port_ptp) {
                mlx5e_port_ptp_close(chs->port_ptp);
+               chs->port_ptp = NULL;
+       }
 
        for (i = 0; i < chs->num; i++)
                mlx5e_close_channel(chs->c[i]);
@@ -3810,6 +3813,15 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
                for (j = 0; j < priv->max_opened_tc; j++) {
                        struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j];
 
+                       s->tx_packets    += sq_stats->packets;
+                       s->tx_bytes      += sq_stats->bytes;
+                       s->tx_dropped    += sq_stats->dropped;
+               }
+       }
+       if (priv->port_ptp_opened) {
+               for (i = 0; i < priv->max_opened_tc; i++) {
+                       struct mlx5e_sq_stats *sq_stats = &priv->port_ptp_stats.sq[i];
+
                        s->tx_packets    += sq_stats->packets;
                        s->tx_bytes      += sq_stats->bytes;
                        s->tx_dropped    += sq_stats->dropped;
@@ -3834,10 +3846,17 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
        }
 
        if (mlx5e_is_uplink_rep(priv)) {
+               struct mlx5e_vport_stats *vstats = &priv->stats.vport;
+
                stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
                stats->rx_bytes   = PPORT_802_3_GET(pstats, a_octets_received_ok);
                stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
                stats->tx_bytes   = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
+
+               /* vport multicast also counts packets that are dropped due to steering
+                * or rx out of buffer
+                */
+               stats->multicast = VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
        } else {
                mlx5e_fold_sw_stats64(priv, stats);
        }
@@ -4683,8 +4702,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
                struct mlx5e_channel *c = priv->channels.c[i];
 
                mlx5e_rq_replace_xdp_prog(&c->rq, prog);
-               if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state))
+               if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) {
+                       bpf_prog_inc(prog);
                        mlx5e_rq_replace_xdp_prog(&c->xskrq, prog);
+               }
        }
 
 unlock:
@@ -4958,6 +4979,11 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
                                     priv->max_nch);
        params->num_tc       = 1;
 
+       /* Set an initial non-zero value, so that mlx5e_select_queue won't
+        * divide by zero if called before first activating channels.
+        */
+       priv->num_tc_x_num_ch = params->num_channels * params->num_tc;
+
        /* SQ */
        params->log_sq_size = is_kdump_kernel() ?
                MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
@@ -5474,8 +5500,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
                    struct net_device *netdev,
                    struct mlx5_core_dev *mdev)
 {
-       memset(priv, 0, sizeof(*priv));
-
        /* priv init */
        priv->mdev        = mdev;
        priv->netdev      = netdev;
@@ -5508,12 +5532,18 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
 {
        int i;
 
+       /* bail if change profile failed and also rollback failed */
+       if (!priv->mdev)
+               return;
+
        destroy_workqueue(priv->wq);
        free_cpumask_var(priv->scratchpad.cpumask);
 
        for (i = 0; i < priv->htb.max_qos_sqs; i++)
                kfree(priv->htb.qos_sq_stats[i]);
        kvfree(priv->htb.qos_sq_stats);
+
+       memset(priv, 0, sizeof(*priv));
 }
 
 struct net_device *
@@ -5630,11 +5660,10 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
 }
 
 static int
-mlx5e_netdev_attach_profile(struct mlx5e_priv *priv,
+mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
                            const struct mlx5e_profile *new_profile, void *new_ppriv)
 {
-       struct net_device *netdev = priv->netdev;
-       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_priv *priv = netdev_priv(netdev);
        int err;
 
        err = mlx5e_priv_init(priv, netdev, mdev);
@@ -5647,10 +5676,16 @@ mlx5e_netdev_attach_profile(struct mlx5e_priv *priv,
        priv->ppriv = new_ppriv;
        err = new_profile->init(priv->mdev, priv->netdev);
        if (err)
-               return err;
+               goto priv_cleanup;
        err = mlx5e_attach_netdev(priv);
        if (err)
-               new_profile->cleanup(priv);
+               goto profile_cleanup;
+       return err;
+
+profile_cleanup:
+       new_profile->cleanup(priv);
+priv_cleanup:
+       mlx5e_priv_cleanup(priv);
        return err;
 }
 
@@ -5659,13 +5694,14 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
 {
        unsigned int new_max_nch = mlx5e_calc_max_nch(priv, new_profile);
        const struct mlx5e_profile *orig_profile = priv->profile;
+       struct net_device *netdev = priv->netdev;
+       struct mlx5_core_dev *mdev = priv->mdev;
        void *orig_ppriv = priv->ppriv;
        int err, rollback_err;
 
        /* sanity */
        if (new_max_nch != priv->max_nch) {
-               netdev_warn(priv->netdev,
-                           "%s: Replacing profile with different max channels\n",
+               netdev_warn(netdev, "%s: Replacing profile with different max channels\n",
                            __func__);
                return -EINVAL;
        }
@@ -5675,22 +5711,19 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
        priv->profile->cleanup(priv);
        mlx5e_priv_cleanup(priv);
 
-       err = mlx5e_netdev_attach_profile(priv, new_profile, new_ppriv);
+       err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv);
        if (err) { /* roll back to original profile */
-               netdev_warn(priv->netdev, "%s: new profile init failed, %d\n",
-                           __func__, err);
+               netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err);
                goto rollback;
        }
 
        return 0;
 
 rollback:
-       rollback_err = mlx5e_netdev_attach_profile(priv, orig_profile, orig_ppriv);
-       if (rollback_err) {
-               netdev_err(priv->netdev,
-                          "%s: failed to rollback to orig profile, %d\n",
+       rollback_err = mlx5e_netdev_attach_profile(netdev, mdev, orig_profile, orig_ppriv);
+       if (rollback_err)
+               netdev_err(netdev, "%s: failed to rollback to orig profile, %d\n",
                           __func__, rollback_err);
-       }
        return err;
 }
 
index 1b6ad94ebb10342293d348b425269638149ed4b9..249d8905e644aaf8adbd7e3a0f1ed64f2ef9f4f5 100644 (file)
@@ -500,7 +500,6 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
        struct mlx5e_icosq *sq = rq->icosq;
        struct mlx5_wq_cyc *wq = &sq->wq;
        struct mlx5e_umr_wqe *umr_wqe;
-       u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1);
        u16 pi;
        int err;
        int i;
@@ -531,7 +530,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
        umr_wqe->ctrl.opmod_idx_opcode =
                cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
                            MLX5_OPCODE_UMR);
-       umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
+       umr_wqe->uctrl.xlt_offset =
+               cpu_to_be16(MLX5_ALIGNED_MTTS_OCTW(MLX5E_REQUIRED_MTTS(ix)));
 
        sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
                .wqe_type   = MLX5E_ICOSQ_WQE_UMR_RX,
index 0da69b98f38fe4920624c67773a8e8ffe140a6c9..df2a0af854bbf6c7789585d090589c64d31bc6f6 100644 (file)
@@ -2296,6 +2296,16 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                        *match_level = MLX5_MATCH_L4;
        }
 
+       /* Currenlty supported only for MPLS over UDP */
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
+           !netif_is_bareudp(filter_dev)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "Matching on MPLS is supported only for MPLS over UDP");
+               netdev_err(priv->netdev,
+                          "Matching on MPLS is supported only for MPLS over UDP\n");
+               return -EOPNOTSUPP;
+       }
+
        return 0;
 }
 
@@ -2899,6 +2909,37 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
        return 0;
 }
 
+static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
+                                  bool ct_flow, struct netlink_ext_ack *extack,
+                                  struct mlx5e_priv *priv,
+                                  struct mlx5_flow_spec *spec)
+{
+       if (!modify_tuple || ct_clear)
+               return true;
+
+       if (ct_flow) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "can't offload tuple modification with non-clear ct()");
+               netdev_info(priv->netdev,
+                           "can't offload tuple modification with non-clear ct()");
+               return false;
+       }
+
+       /* Add ct_state=-trk match so it will be offloaded for non ct flows
+        * (or after clear action), as otherwise, since the tuple is changed,
+        * we can't restore ct state
+        */
+       if (mlx5_tc_ct_add_no_trk_match(spec)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "can't offload tuple modification with ct matches and no ct(clear) action");
+               netdev_info(priv->netdev,
+                           "can't offload tuple modification with ct matches and no ct(clear) action");
+               return false;
+       }
+
+       return true;
+}
+
 static bool modify_header_match_supported(struct mlx5e_priv *priv,
                                          struct mlx5_flow_spec *spec,
                                          struct flow_action *flow_action,
@@ -2937,18 +2978,9 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
                        return err;
        }
 
-       /* Add ct_state=-trk match so it will be offloaded for non ct flows
-        * (or after clear action), as otherwise, since the tuple is changed,
-        *  we can't restore ct state
-        */
-       if (!ct_clear && modify_tuple &&
-           mlx5_tc_ct_add_no_trk_match(spec)) {
-               NL_SET_ERR_MSG_MOD(extack,
-                                  "can't offload tuple modify header with ct matches");
-               netdev_info(priv->netdev,
-                           "can't offload tuple modify header with ct matches");
+       if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
+                                   priv, spec))
                return false;
-       }
 
        ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
        if (modify_ip_header && ip_proto != IPPROTO_TCP &&
@@ -4445,7 +4477,8 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
         */
        if (rate) {
                rate = (rate * BITS_PER_BYTE) + 500000;
-               rate_mbps = max_t(u64, do_div(rate, 1000000), 1);
+               do_div(rate, 1000000);
+               rate_mbps = max_t(u32, rate, 1);
        }
 
        err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
index 89003ae7775a811d8e700dbf267c0537c4185ea5..25c091795bcd863148fb12f28217603e84b96136 100644 (file)
@@ -79,6 +79,7 @@ struct mlx5_flow_attr {
        u8 inner_match_level;
        u8 outer_match_level;
        u8 ip_version;
+       u8 tun_ip_version;
        u32 flags;
        union {
                struct mlx5_esw_flow_attr esw_attr[0];
index 94cb0217b4f39202da2f91ce910842548053b276..8694b83968b4c4fae3a681b8c39200407a8b1242 100644 (file)
@@ -551,7 +551,8 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
 
        if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
            MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve) &&
-           mlx5_eswitch_vport_match_metadata_enabled(esw))
+           mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+           MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
                attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
 
        if (attr->dest_ft) {
index 80da50e1291532e99525ea5da838eedec1f64f2b..bd66ab2af5b5419c9bac13f66b7b122c043377fc 100644 (file)
@@ -575,6 +575,7 @@ static int mlx5_fpga_conn_create_qp(struct mlx5_fpga_conn *conn,
        MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size));
        MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn);
        MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn);
+       MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
        MLX5_SET64(qpc, qpc, dbr_addr, conn->qp.wq_ctrl.db.dma);
        if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
                MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
index 1eeca45cfcdf2560e66f791066bfe98550101f99..6f7cef47e04cd66d1359ab672de0d52735e68ffc 100644 (file)
@@ -233,6 +233,7 @@ int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
        }
 
        qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+       MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(priv->mdev));
        MLX5_SET(qpc, qpc, st, MLX5_QP_ST_UD);
        MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
        MLX5_SET(qpc, qpc, ulp_stateless_offload_mode,
@@ -694,6 +695,7 @@ static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
 static void mlx5_rdma_netdev_free(struct net_device *netdev)
 {
        struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+       struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5i_priv *ipriv = priv->ppriv;
        const struct mlx5e_profile *profile = priv->profile;
 
@@ -702,7 +704,7 @@ static void mlx5_rdma_netdev_free(struct net_device *netdev)
 
        if (!ipriv->sub_interface) {
                mlx5i_pkey_qpn_ht_cleanup(netdev);
-               mlx5e_destroy_mdev_resources(priv->mdev);
+               mlx5e_destroy_mdev_resources(mdev);
        }
 }
 
index b0e129d0f6d8f70c69fa7de039d1573f9f542498..1e7f26b240de028e3c28e73eb82a1a50105f7e57 100644 (file)
@@ -495,15 +495,15 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
                return -EINVAL;
 
        field_select = MLX5_MTPPS_FS_ENABLE;
+       pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
+       if (pin < 0)
+               return -EBUSY;
+
        if (on) {
                bool rt_mode = mlx5_real_time_mode(mdev);
                u32 nsec;
                s64 sec;
 
-               pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT, rq->perout.index);
-               if (pin < 0)
-                       return -EBUSY;
-
                pin_mode = MLX5_PIN_MODE_OUT;
                pattern = MLX5_OUT_PATTERN_PERIODIC;
                ts.tv_sec = rq->perout.period.sec;
index b265f27b2166d5be72854adacbda9adaff85a3a4..90b524c59f3c324eb49c385fe7ea98200bf308ac 100644 (file)
@@ -181,15 +181,13 @@ static int mlx5_sf_dev_vhca_arm_all(struct mlx5_sf_dev_table *table)
        u16 max_functions;
        u16 function_id;
        int err = 0;
-       bool ecpu;
        int i;
 
        max_functions = mlx5_sf_max_functions(dev);
        function_id = MLX5_CAP_GEN(dev, sf_base_id);
-       ecpu = mlx5_read_embedded_cpu(dev);
        /* Arm the vhca context as the vhca event notifier */
        for (i = 0; i < max_functions; i++) {
-               err = mlx5_vhca_event_arm(dev, function_id, ecpu);
+               err = mlx5_vhca_event_arm(dev, function_id);
                if (err)
                        return err;
 
index 58b6be0b03d7fda16992250ad2561297d9ff6c08..a5a0f60bef66b94f1171dae22b9e7ecd1a174048 100644 (file)
@@ -6,7 +6,7 @@
 #include "sf.h"
 #include "mlx5_ifc_vhca_event.h"
 #include "vhca_event.h"
-#include "ecpf.h"
+#include "mlx5_core.h"
 
 struct mlx5_sf_hw {
        u32 usr_sfnum;
@@ -18,7 +18,6 @@ struct mlx5_sf_hw_table {
        struct mlx5_core_dev *dev;
        struct mlx5_sf_hw *sfs;
        int max_local_functions;
-       u8 ecpu: 1;
        struct mutex table_lock; /* Serializes sf deletion and vhca state change handler. */
        struct notifier_block vhca_nb;
 };
@@ -64,7 +63,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
        }
        if (sw_id == -ENOSPC) {
                err = -ENOSPC;
-               goto err;
+               goto exist_err;
        }
 
        hw_fn_id = mlx5_sf_sw_to_hw_id(table->dev, sw_id);
@@ -72,7 +71,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 usr_sfnum)
        if (err)
                goto err;
 
-       err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, table->ecpu, usr_sfnum);
+       err = mlx5_modify_vhca_sw_id(dev, hw_fn_id, usr_sfnum);
        if (err)
                goto vhca_err;
 
@@ -118,7 +117,7 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u16 id)
 
        hw_fn_id = mlx5_sf_sw_to_hw_id(dev, id);
        mutex_lock(&table->table_lock);
-       err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, table->ecpu, out, sizeof(out));
+       err = mlx5_cmd_query_vhca_state(dev, hw_fn_id, out, sizeof(out));
        if (err)
                goto err;
        state = MLX5_GET(query_vhca_state_out, out, vhca_state_context.vhca_state);
@@ -164,7 +163,6 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
        table->dev = dev;
        table->sfs = sfs;
        table->max_local_functions = max_functions;
-       table->ecpu = mlx5_read_embedded_cpu(dev);
        dev->priv.sf_hw_table = table;
        mlx5_core_dbg(dev, "SF HW table: max sfs = %d\n", max_functions);
        return 0;
index 1daf5a122ba303c79a585a23ff2ef23276e64e78..4fc870140d710115d7a39588852bc2094e958340 100644 (file)
@@ -20,7 +20,7 @@ struct mlx5_ifc_vhca_state_context_bits {
 
        u8         sw_function_id[0x20];
 
-       u8         reserved_at_40[0x80];
+       u8         reserved_at_40[0x40];
 };
 
 struct mlx5_ifc_query_vhca_state_out_bits {
index af2f2dd9db25026e9e75a640e738bc6ad9eb54ea..28b14b05086f636a1b2923252e3f0b74742bf0ec 100644 (file)
@@ -19,52 +19,51 @@ struct mlx5_vhca_event_work {
        struct mlx5_vhca_state_event event;
 };
 
-int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
-                             bool ecpu, u32 *out, u32 outlen)
+int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id, u32 *out, u32 outlen)
 {
        u32 in[MLX5_ST_SZ_DW(query_vhca_state_in)] = {};
 
        MLX5_SET(query_vhca_state_in, in, opcode, MLX5_CMD_OP_QUERY_VHCA_STATE);
        MLX5_SET(query_vhca_state_in, in, function_id, function_id);
-       MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, ecpu);
+       MLX5_SET(query_vhca_state_in, in, embedded_cpu_function, 0);
 
        return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
 }
 
 static int mlx5_cmd_modify_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
-                                     bool ecpu, u32 *in, u32 inlen)
+                                     u32 *in, u32 inlen)
 {
        u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
 
        MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
        MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
-       MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
+       MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0);
 
        return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
 }
 
-int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id)
+int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id)
 {
        u32 out[MLX5_ST_SZ_DW(modify_vhca_state_out)] = {};
        u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
 
        MLX5_SET(modify_vhca_state_in, in, opcode, MLX5_CMD_OP_MODIFY_VHCA_STATE);
        MLX5_SET(modify_vhca_state_in, in, function_id, function_id);
-       MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, ecpu);
+       MLX5_SET(modify_vhca_state_in, in, embedded_cpu_function, 0);
        MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.sw_function_id, 1);
        MLX5_SET(modify_vhca_state_in, in, vhca_state_context.sw_function_id, sw_fn_id);
 
        return mlx5_cmd_exec_inout(dev, modify_vhca_state, in, out);
 }
 
-int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu)
+int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id)
 {
        u32 in[MLX5_ST_SZ_DW(modify_vhca_state_in)] = {};
 
        MLX5_SET(modify_vhca_state_in, in, vhca_state_context.arm_change_event, 1);
        MLX5_SET(modify_vhca_state_in, in, vhca_state_field_select.arm_change_event, 1);
 
-       return mlx5_cmd_modify_vhca_state(dev, function_id, ecpu, in, sizeof(in));
+       return mlx5_cmd_modify_vhca_state(dev, function_id, in, sizeof(in));
 }
 
 static void
@@ -73,7 +72,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
        u32 out[MLX5_ST_SZ_DW(query_vhca_state_out)] = {};
        int err;
 
-       err = mlx5_cmd_query_vhca_state(dev, event->function_id, event->ecpu, out, sizeof(out));
+       err = mlx5_cmd_query_vhca_state(dev, event->function_id, out, sizeof(out));
        if (err)
                return;
 
@@ -82,7 +81,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
        event->new_vhca_state = MLX5_GET(query_vhca_state_out, out,
                                         vhca_state_context.vhca_state);
 
-       mlx5_vhca_event_arm(dev, event->function_id, event->ecpu);
+       mlx5_vhca_event_arm(dev, event->function_id);
 
        blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
 }
@@ -94,6 +93,7 @@ static void mlx5_vhca_state_work_handler(struct work_struct *_work)
        struct mlx5_core_dev *dev = notifier->dev;
 
        mlx5_vhca_event_notify(dev, &work->event);
+       kfree(work);
 }
 
 static int
@@ -110,7 +110,6 @@ mlx5_vhca_state_change_notifier(struct notifier_block *nb, unsigned long type, v
        INIT_WORK(&work->work, &mlx5_vhca_state_work_handler);
        work->notifier = notifier;
        work->event.function_id = be16_to_cpu(eqe->data.vhca_state.function_id);
-       work->event.ecpu = be16_to_cpu(eqe->data.vhca_state.ec_function);
        mlx5_events_work_enqueue(notifier->dev, &work->work);
        return NOTIFY_OK;
 }
index 1fe1ec6f4d4b01bd24b1564cae30a539a3240c16..013cdfe90616fe3b76e06b3ea0d72fa14b76320d 100644 (file)
@@ -10,7 +10,6 @@ struct mlx5_vhca_state_event {
        u16 function_id;
        u16 sw_function_id;
        u8 new_vhca_state;
-       bool ecpu;
 };
 
 static inline bool mlx5_vhca_event_supported(const struct mlx5_core_dev *dev)
@@ -25,10 +24,10 @@ void mlx5_vhca_event_start(struct mlx5_core_dev *dev);
 void mlx5_vhca_event_stop(struct mlx5_core_dev *dev);
 int mlx5_vhca_event_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
 void mlx5_vhca_event_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
-int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, bool ecpu, u32 sw_fn_id);
-int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id, bool ecpu);
+int mlx5_modify_vhca_sw_id(struct mlx5_core_dev *dev, u16 function_id, u32 sw_fn_id);
+int mlx5_vhca_event_arm(struct mlx5_core_dev *dev, u16 function_id);
 int mlx5_cmd_query_vhca_state(struct mlx5_core_dev *dev, u16 function_id,
-                             bool ecpu, u32 *out, u32 outlen);
+                             u32 *out, u32 outlen);
 #else
 
 static inline void mlx5_vhca_state_cap_handle(struct mlx5_core_dev *dev, void *set_hca_cap)
index 83c4c877d558c02673fdcb27cddee60f5cea27e0..8a6a56f9dc4ec4a7a13f09efb40cb3d852695cbd 100644 (file)
@@ -169,6 +169,7 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
        MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
        MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
        MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
+       MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
        MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma);
        if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
                MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
index 4088d6e515082f729bcde3e2c7b2f378eb337ff3..9143ec326ebfbda60c7317002a6870f63c9f8610 100644 (file)
@@ -264,8 +264,8 @@ static void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
 static u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
 {
        u64 index =
-               (MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
-                MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32) << 26);
+               ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
+                ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26);
 
        return index << 6;
 }
index 5defd31d481c26a6bb650301bc4cd16c977f5cb5..aa06fcb38f8b993a1f6c684d2f9c66eb98e3e429 100644 (file)
@@ -327,8 +327,14 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
                goto err_free_ctx_entry;
        }
 
+       /* Do net allocate a mask-id for pre_tun_rules. These flows are used to
+        * configure the pre_tun table and are never actually send to the
+        * firmware as an add-flow message. This causes the mask-id allocation
+        * on the firmware to get out of sync if allocated here.
+        */
        new_mask_id = 0;
-       if (!nfp_check_mask_add(app, nfp_flow->mask_data,
+       if (!nfp_flow->pre_tun_rule.dev &&
+           !nfp_check_mask_add(app, nfp_flow->mask_data,
                                nfp_flow->meta.mask_len,
                                &nfp_flow->meta.flags, &new_mask_id)) {
                NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot allocate a new mask id");
@@ -359,7 +365,8 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
                        goto err_remove_mask;
                }
 
-               if (!nfp_check_mask_remove(app, nfp_flow->mask_data,
+               if (!nfp_flow->pre_tun_rule.dev &&
+                   !nfp_check_mask_remove(app, nfp_flow->mask_data,
                                           nfp_flow->meta.mask_len,
                                           NULL, &new_mask_id)) {
                        NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot release mask id");
@@ -374,8 +381,10 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
        return 0;
 
 err_remove_mask:
-       nfp_check_mask_remove(app, nfp_flow->mask_data, nfp_flow->meta.mask_len,
-                             NULL, &new_mask_id);
+       if (!nfp_flow->pre_tun_rule.dev)
+               nfp_check_mask_remove(app, nfp_flow->mask_data,
+                                     nfp_flow->meta.mask_len,
+                                     NULL, &new_mask_id);
 err_remove_rhash:
        WARN_ON_ONCE(rhashtable_remove_fast(&priv->stats_ctx_table,
                                            &ctx_entry->ht_node,
@@ -406,9 +415,10 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
 
        __nfp_modify_flow_metadata(priv, nfp_flow);
 
-       nfp_check_mask_remove(app, nfp_flow->mask_data,
-                             nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
-                             &new_mask_id);
+       if (!nfp_flow->pre_tun_rule.dev)
+               nfp_check_mask_remove(app, nfp_flow->mask_data,
+                                     nfp_flow->meta.mask_len, &nfp_flow->meta.flags,
+                                     &new_mask_id);
 
        /* Update flow payload with mask ids. */
        nfp_flow->unmasked_data[NFP_FL_MASK_ID_LOCATION] = new_mask_id;
index 1c59aff2163c7e193d626e2eda9ffefa5604d478..d72225d64a75da13a123a3fcf7d362b5310a3a4f 100644 (file)
@@ -1142,6 +1142,12 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
                return -EOPNOTSUPP;
        }
 
+       if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
+           !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
+               NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: match on ipv4/ipv6 eth_type must be present");
+               return -EOPNOTSUPP;
+       }
+
        /* Skip fields known to exist. */
        mask += sizeof(struct nfp_flower_meta_tci);
        ext += sizeof(struct nfp_flower_meta_tci);
@@ -1152,6 +1158,13 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
        mask += sizeof(struct nfp_flower_in_port);
        ext += sizeof(struct nfp_flower_in_port);
 
+       /* Ensure destination MAC address matches pre_tun_dev. */
+       mac = (struct nfp_flower_mac_mpls *)ext;
+       if (memcmp(&mac->mac_dst[0], flow->pre_tun_rule.dev->dev_addr, 6)) {
+               NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: dest MAC must match output dev MAC");
+               return -EOPNOTSUPP;
+       }
+
        /* Ensure destination MAC address is fully matched. */
        mac = (struct nfp_flower_mac_mpls *)mask;
        if (!is_broadcast_ether_addr(&mac->mac_dst[0])) {
@@ -1159,6 +1172,11 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
                return -EOPNOTSUPP;
        }
 
+       if (mac->mpls_lse) {
+               NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: MPLS not supported");
+               return -EOPNOTSUPP;
+       }
+
        mask += sizeof(struct nfp_flower_mac_mpls);
        ext += sizeof(struct nfp_flower_mac_mpls);
        if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
index 7248d248f60416f18b7263622b2866da8627e410..d19c02e991145bed556f3500c6007e253057881e 100644 (file)
@@ -16,8 +16,9 @@
 #define NFP_FL_MAX_ROUTES               32
 
 #define NFP_TUN_PRE_TUN_RULE_LIMIT     32
-#define NFP_TUN_PRE_TUN_RULE_DEL       0x1
-#define NFP_TUN_PRE_TUN_IDX_BIT                0x8
+#define NFP_TUN_PRE_TUN_RULE_DEL       BIT(0)
+#define NFP_TUN_PRE_TUN_IDX_BIT                BIT(3)
+#define NFP_TUN_PRE_TUN_IPV6_BIT       BIT(7)
 
 /**
  * struct nfp_tun_pre_run_rule - rule matched before decap
@@ -1268,6 +1269,7 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
 {
        struct nfp_flower_priv *app_priv = app->priv;
        struct nfp_tun_offloaded_mac *mac_entry;
+       struct nfp_flower_meta_tci *key_meta;
        struct nfp_tun_pre_tun_rule payload;
        struct net_device *internal_dev;
        int err;
@@ -1290,6 +1292,15 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app,
        if (!mac_entry)
                return -ENOENT;
 
+       /* Set/clear IPV6 bit. cpu_to_be16() swap will lead to MSB being
+        * set/clear for port_idx.
+        */
+       key_meta = (struct nfp_flower_meta_tci *)flow->unmasked_data;
+       if (key_meta->nfp_flow_key_layer & NFP_FLOWER_LAYER_IPV6)
+               mac_entry->index |= NFP_TUN_PRE_TUN_IPV6_BIT;
+       else
+               mac_entry->index &= ~NFP_TUN_PRE_TUN_IPV6_BIT;
+
        payload.port_idx = cpu_to_be16(mac_entry->index);
 
        /* Copy mac id and vlan to flow - dev may not exist at delete time. */
index 162a1ff1e9d241e75770dd33301ed7fa18ef98f0..4087311f70821d559d1ba1588bd6a2aa15bd43c9 100644 (file)
@@ -1079,15 +1079,17 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
 {
        int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
        struct ionic_tx_stats *stats = q_to_tx_stats(q);
+       int ndescs;
        int err;
 
-       /* If TSO, need roundup(skb->len/mss) descs */
+       /* Each desc is mss long max, so a descriptor for each gso_seg */
        if (skb_is_gso(skb))
-               return (skb->len / skb_shinfo(skb)->gso_size) + 1;
+               ndescs = skb_shinfo(skb)->gso_segs;
+       else
+               ndescs = 1;
 
-       /* If non-TSO, just need 1 desc and nr_frags sg elems */
        if (skb_shinfo(skb)->nr_frags <= sg_elems)
-               return 1;
+               return ndescs;
 
        /* Too many frags, so linearize */
        err = skb_linearize(skb);
@@ -1096,8 +1098,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
 
        stats->linearize++;
 
-       /* Need 1 desc and zero sg elems */
-       return 1;
+       return ndescs;
 }
 
 static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
index 7760a3394e93c7e2e91a1487b08b14756eee2c9e..7ecb3dfe30bd22a6d6ac535bee27a0ff197e7c33 100644 (file)
@@ -1425,6 +1425,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
 
        if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
                vfree(fw_dump->tmpl_hdr);
+               fw_dump->tmpl_hdr = NULL;
 
                if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
                        extended = !qlcnic_83xx_extend_md_capab(adapter);
@@ -1443,6 +1444,8 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
                        struct qlcnic_83xx_dump_template_hdr *hdr;
 
                        hdr = fw_dump->tmpl_hdr;
+                       if (!hdr)
+                               return;
                        hdr->drv_cap_mask = 0x1f;
                        fw_dump->cap_mask = 0x1f;
                        dev_info(&pdev->dev,
index 7aad0ba53372cb5cd5765d5819d5f61935a2a2da..581a92fc32924f1cc7953142d85a7bfaf7733e56 100644 (file)
@@ -4646,6 +4646,9 @@ static void rtl8169_down(struct rtl8169_private *tp)
 
        rtl8169_update_counters(tp);
 
+       pci_clear_master(tp->pci_dev);
+       rtl_pci_commit(tp);
+
        rtl8169_cleanup(tp, true);
 
        rtl_prepare_power_down(tp);
@@ -4653,6 +4656,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
 
 static void rtl8169_up(struct rtl8169_private *tp)
 {
+       pci_set_master(tp->pci_dev);
        phy_resume(tp->phydev);
        rtl8169_init_phy(tp);
        napi_enable(&tp->napi);
@@ -5307,8 +5311,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        rtl_hw_reset(tp);
 
-       pci_set_master(pdev);
-
        rc = rtl_alloc_irq(tp);
        if (rc < 0) {
                dev_err(&pdev->dev, "Can't allocate interrupt\n");
index 3c53051bdacfa52ce1b6cde3c1a64527fbea9905..200785e703c8fc8a6dec0ab41799c1c138df424f 100644 (file)
@@ -1715,14 +1715,17 @@ static int netsec_netdev_init(struct net_device *ndev)
                goto err1;
 
        /* set phy power down */
-       data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR) |
-               BMCR_PDOWN;
-       netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
+       data = netsec_phy_read(priv->mii_bus, priv->phy_addr, MII_BMCR);
+       netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR,
+                        data | BMCR_PDOWN);
 
        ret = netsec_reset_hardware(priv, true);
        if (ret)
                goto err2;
 
+       /* Restore phy power state */
+       netsec_phy_write(priv->mii_bus, priv->phy_addr, MII_BMCR, data);
+
        spin_lock_init(&priv->desc_ring[NETSEC_RING_TX].lock);
        spin_lock_init(&priv->desc_ring[NETSEC_RING_RX].lock);
 
index 6b75cf2603ffc8ff85330519dab7018ac8dc95c8..e62efd166ec8f3a13be5f23bbe80c5c6ac6abc10 100644 (file)
@@ -1214,6 +1214,8 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
        plat_dat->init = sun8i_dwmac_init;
        plat_dat->exit = sun8i_dwmac_exit;
        plat_dat->setup = sun8i_dwmac_setup;
+       plat_dat->tx_fifo_size = 4096;
+       plat_dat->rx_fifo_size = 16384;
 
        ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
        if (ret)
index 3a8775e0ca5525e45b4c28b33dc1b9176967def1..5d677db0aee5dd0c071a387a9a914d63e7c3cac1 100644 (file)
@@ -1880,7 +1880,7 @@ static int axienet_probe(struct platform_device *pdev)
        if (IS_ERR(lp->regs)) {
                dev_err(&pdev->dev, "could not map Axi Ethernet regs.\n");
                ret = PTR_ERR(lp->regs);
-               goto free_netdev;
+               goto cleanup_clk;
        }
        lp->regs_start = ethres->start;
 
@@ -1958,18 +1958,18 @@ static int axienet_probe(struct platform_device *pdev)
                        break;
                default:
                        ret = -EINVAL;
-                       goto free_netdev;
+                       goto cleanup_clk;
                }
        } else {
                ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
                if (ret)
-                       goto free_netdev;
+                       goto cleanup_clk;
        }
        if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
            lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
                dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
                ret = -EINVAL;
-               goto free_netdev;
+               goto cleanup_clk;
        }
 
        /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
@@ -1982,7 +1982,7 @@ static int axienet_probe(struct platform_device *pdev)
                        dev_err(&pdev->dev,
                                "unable to get DMA resource\n");
                        of_node_put(np);
-                       goto free_netdev;
+                       goto cleanup_clk;
                }
                lp->dma_regs = devm_ioremap_resource(&pdev->dev,
                                                     &dmares);
@@ -2002,12 +2002,12 @@ static int axienet_probe(struct platform_device *pdev)
        if (IS_ERR(lp->dma_regs)) {
                dev_err(&pdev->dev, "could not map DMA regs\n");
                ret = PTR_ERR(lp->dma_regs);
-               goto free_netdev;
+               goto cleanup_clk;
        }
        if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
                dev_err(&pdev->dev, "could not determine irqs\n");
                ret = -ENOMEM;
-               goto free_netdev;
+               goto cleanup_clk;
        }
 
        /* Autodetect the need for 64-bit DMA pointers.
@@ -2037,7 +2037,7 @@ static int axienet_probe(struct platform_device *pdev)
        ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
        if (ret) {
                dev_err(&pdev->dev, "No suitable DMA available\n");
-               goto free_netdev;
+               goto cleanup_clk;
        }
 
        /* Check for Ethernet core IRQ (optional) */
@@ -2068,12 +2068,12 @@ static int axienet_probe(struct platform_device *pdev)
                if (!lp->phy_node) {
                        dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
                        ret = -EINVAL;
-                       goto free_netdev;
+                       goto cleanup_mdio;
                }
                lp->pcs_phy = of_mdio_find_device(lp->phy_node);
                if (!lp->pcs_phy) {
                        ret = -EPROBE_DEFER;
-                       goto free_netdev;
+                       goto cleanup_mdio;
                }
                lp->phylink_config.pcs_poll = true;
        }
@@ -2087,17 +2087,30 @@ static int axienet_probe(struct platform_device *pdev)
        if (IS_ERR(lp->phylink)) {
                ret = PTR_ERR(lp->phylink);
                dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
-               goto free_netdev;
+               goto cleanup_mdio;
        }
 
        ret = register_netdev(lp->ndev);
        if (ret) {
                dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
-               goto free_netdev;
+               goto cleanup_phylink;
        }
 
        return 0;
 
+cleanup_phylink:
+       phylink_destroy(lp->phylink);
+
+cleanup_mdio:
+       if (lp->pcs_phy)
+               put_device(&lp->pcs_phy->dev);
+       if (lp->mii_bus)
+               axienet_mdio_teardown(lp);
+       of_node_put(lp->phy_node);
+
+cleanup_clk:
+       clk_disable_unprepare(lp->clk);
+
 free_netdev:
        free_netdev(ndev);
 
index 35e35852c25c50422a9bdb164a4585eb8d5ffeba..d73b03a80ef89e2186a3ef31743b87388ea84272 100644 (file)
@@ -175,21 +175,23 @@ bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem,
                            : field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
        if (mem->offset > offset_max ||
            ipa->mem_offset > offset_max - mem->offset) {
-               dev_err(dev, "IPv%c %s%s table region offset too large "
-                             "(0x%04x + 0x%04x > 0x%04x)\n",
-                             ipv6 ? '6' : '4', hashed ? "hashed " : "",
-                             route ? "route" : "filter",
-                             ipa->mem_offset, mem->offset, offset_max);
+               dev_err(dev, "IPv%c %s%s table region offset too large\n",
+                       ipv6 ? '6' : '4', hashed ? "hashed " : "",
+                       route ? "route" : "filter");
+               dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
+                       ipa->mem_offset, mem->offset, offset_max);
+
                return false;
        }
 
        if (mem->offset > ipa->mem_size ||
            mem->size > ipa->mem_size - mem->offset) {
-               dev_err(dev, "IPv%c %s%s table region out of range "
-                             "(0x%04x + 0x%04x > 0x%04x)\n",
-                             ipv6 ? '6' : '4', hashed ? "hashed " : "",
-                             route ? "route" : "filter",
-                             mem->offset, mem->size, ipa->mem_size);
+               dev_err(dev, "IPv%c %s%s table region out of range\n",
+                       ipv6 ? '6' : '4', hashed ? "hashed " : "",
+                       route ? "route" : "filter");
+               dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
+                       mem->offset, mem->size, ipa->mem_size);
+
                return false;
        }
 
@@ -205,22 +207,36 @@ static bool ipa_cmd_header_valid(struct ipa *ipa)
        u32 size_max;
        u32 size;
 
+       /* In ipa_cmd_hdr_init_local_add() we record the offset and size
+        * of the header table memory area.  Make sure the offset and size
+        * fit in the fields that need to hold them, and that the entire
+        * range is within the overall IPA memory range.
+        */
        offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
        if (mem->offset > offset_max ||
            ipa->mem_offset > offset_max - mem->offset) {
-               dev_err(dev, "header table region offset too large "
-                             "(0x%04x + 0x%04x > 0x%04x)\n",
-                             ipa->mem_offset + mem->offset, offset_max);
+               dev_err(dev, "header table region offset too large\n");
+               dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
+                       ipa->mem_offset, mem->offset, offset_max);
+
                return false;
        }
 
        size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
        size = ipa->mem[IPA_MEM_MODEM_HEADER].size;
        size += ipa->mem[IPA_MEM_AP_HEADER].size;
-       if (mem->offset > ipa->mem_size || size > ipa->mem_size - mem->offset) {
-               dev_err(dev, "header table region out of range "
-                             "(0x%04x + 0x%04x > 0x%04x)\n",
-                             mem->offset, size, ipa->mem_size);
+
+       if (size > size_max) {
+               dev_err(dev, "header table region size too large\n");
+               dev_err(dev, "    (0x%04x > 0x%08x)\n", size, size_max);
+
+               return false;
+       }
+       if (size > ipa->mem_size || mem->offset > ipa->mem_size - size) {
+               dev_err(dev, "header table region out of range\n");
+               dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
+                       mem->offset, size, ipa->mem_size);
+
                return false;
        }
 
index 2fc64483f27538ba7b24dc50188739e00f555f37..e594bf3b600f01d6270fb23949bbef9435c58d67 100644 (file)
@@ -249,6 +249,7 @@ static const struct qmi_msg_handler ipa_server_msg_handlers[] = {
                .decoded_size   = IPA_QMI_DRIVER_INIT_COMPLETE_REQ_SZ,
                .fn             = ipa_server_driver_init_complete,
        },
+       { },
 };
 
 /* Handle an INIT_DRIVER response message from the modem. */
@@ -269,6 +270,7 @@ static const struct qmi_msg_handler ipa_client_msg_handlers[] = {
                .decoded_size   = IPA_QMI_INIT_DRIVER_RSP_SZ,
                .fn             = ipa_client_init_driver,
        },
+       { },
 };
 
 /* Return a pointer to an init modem driver request structure, which contains
index fa0be591ae79b6b88dc22ca93ea985506953c194..82fe5f43f0e913c0ebd172c2ff76482759d92fda 100644 (file)
@@ -342,6 +342,10 @@ static int bcm54xx_config_init(struct phy_device *phydev)
        bcm54xx_adjust_rxrefclk(phydev);
 
        switch (BRCM_PHY_MODEL(phydev)) {
+       case PHY_ID_BCM50610:
+       case PHY_ID_BCM50610M:
+               err = bcm54xx_config_clock_delay(phydev);
+               break;
        case PHY_ID_BCM54210E:
                err = bcm54210e_config_init(phydev);
                break;
@@ -399,6 +403,11 @@ static int bcm54xx_resume(struct phy_device *phydev)
        if (ret < 0)
                return ret;
 
+       /* Upon exiting power down, the PHY remains in an internal reset state
+        * for 40us
+        */
+       fsleep(40);
+
        return bcm54xx_config_init(phydev);
 }
 
index 053c92e02cd81a60008ecedb64bc85d3c1de3b57..dc2800beacc3296066da911ab85d27809f12cf86 100644 (file)
@@ -476,7 +476,7 @@ static void phylink_major_config(struct phylink *pl, bool restart,
                err = pl->mac_ops->mac_finish(pl->config, pl->cur_link_an_mode,
                                              state->interface);
                if (err < 0)
-                       phylink_err(pl, "mac_prepare failed: %pe\n",
+                       phylink_err(pl, "mac_finish failed: %pe\n",
                                    ERR_PTR(err));
        }
 }
index 02e6bbb17b15d24d8f14136882b83cc8f08986b1..8d1f69dad6031f9f9b1d9ff40f9e84daa2dc5ef3 100644 (file)
@@ -387,6 +387,8 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i
 
        err = register_netdev(dev);
        if (err) {
+               /* Set disconnected flag so that disconnect() returns early. */
+               pnd->disconnected = 1;
                usb_driver_release_interface(&usbpn_driver, data_intf);
                goto out;
        }
index 90f1c020004257871d8a21ab88cc40dc37caf974..20fb5638ac65350d7085d5ce98e1fb6a7fc8b4bf 100644 (file)
@@ -6553,7 +6553,10 @@ static int rtl_ops_init(struct r8152 *tp)
                ops->in_nway            = rtl8153_in_nway;
                ops->hw_phy_cfg         = r8153_hw_phy_cfg;
                ops->autosuspend_en     = rtl8153_runtime_enable;
-               tp->rx_buf_sz           = 32 * 1024;
+               if (tp->udev->speed < USB_SPEED_SUPER)
+                       tp->rx_buf_sz   = 16 * 1024;
+               else
+                       tp->rx_buf_sz   = 32 * 1024;
                tp->eee_en              = true;
                tp->eee_adv             = MDIO_EEE_1000T | MDIO_EEE_100TX;
                break;
index aa1a66ad2ce51e2be80c27d6e4366d85a2c890cf..34e49c75db42019c54513da9988cd2a15b92b819 100644 (file)
@@ -302,8 +302,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
        if (rxq < rcv->real_num_rx_queues) {
                rq = &rcv_priv->rq[rxq];
                rcv_xdp = rcu_access_pointer(rq->xdp_prog);
-               if (rcv_xdp)
-                       skb_record_rx_queue(skb, rxq);
+               skb_record_rx_queue(skb, rxq);
        }
 
        skb_tx_timestamp(skb);
index 4aaa6388b9ee01f4da67f17206a2a37d7c255424..5a6a945f6c814f9e1153a1df9fbd69926539fbee 100644 (file)
@@ -23,6 +23,8 @@
 
 struct x25_state {
        x25_hdlc_proto settings;
+       bool up;
+       spinlock_t up_lock; /* Protects "up" */
 };
 
 static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
@@ -104,6 +106,8 @@ static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
 
 static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
 {
+       hdlc_device *hdlc = dev_to_hdlc(dev);
+       struct x25_state *x25st = state(hdlc);
        int result;
 
        /* There should be a pseudo header of 1 byte added by upper layers.
@@ -114,11 +118,19 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
+       spin_lock_bh(&x25st->up_lock);
+       if (!x25st->up) {
+               spin_unlock_bh(&x25st->up_lock);
+               kfree_skb(skb);
+               return NETDEV_TX_OK;
+       }
+
        switch (skb->data[0]) {
        case X25_IFACE_DATA:    /* Data to be transmitted */
                skb_pull(skb, 1);
                if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
                        dev_kfree_skb(skb);
+               spin_unlock_bh(&x25st->up_lock);
                return NETDEV_TX_OK;
 
        case X25_IFACE_CONNECT:
@@ -147,6 +159,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
                break;
        }
 
+       spin_unlock_bh(&x25st->up_lock);
        dev_kfree_skb(skb);
        return NETDEV_TX_OK;
 }
@@ -164,6 +177,7 @@ static int x25_open(struct net_device *dev)
                .data_transmit = x25_data_transmit,
        };
        hdlc_device *hdlc = dev_to_hdlc(dev);
+       struct x25_state *x25st = state(hdlc);
        struct lapb_parms_struct params;
        int result;
 
@@ -190,6 +204,10 @@ static int x25_open(struct net_device *dev)
        if (result != LAPB_OK)
                return -EINVAL;
 
+       spin_lock_bh(&x25st->up_lock);
+       x25st->up = true;
+       spin_unlock_bh(&x25st->up_lock);
+
        return 0;
 }
 
@@ -197,6 +215,13 @@ static int x25_open(struct net_device *dev)
 
 static void x25_close(struct net_device *dev)
 {
+       hdlc_device *hdlc = dev_to_hdlc(dev);
+       struct x25_state *x25st = state(hdlc);
+
+       spin_lock_bh(&x25st->up_lock);
+       x25st->up = false;
+       spin_unlock_bh(&x25st->up_lock);
+
        lapb_unregister(dev);
 }
 
@@ -205,15 +230,28 @@ static void x25_close(struct net_device *dev)
 static int x25_rx(struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
+       hdlc_device *hdlc = dev_to_hdlc(dev);
+       struct x25_state *x25st = state(hdlc);
 
        if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
                dev->stats.rx_dropped++;
                return NET_RX_DROP;
        }
 
-       if (lapb_data_received(dev, skb) == LAPB_OK)
+       spin_lock_bh(&x25st->up_lock);
+       if (!x25st->up) {
+               spin_unlock_bh(&x25st->up_lock);
+               kfree_skb(skb);
+               dev->stats.rx_dropped++;
+               return NET_RX_DROP;
+       }
+
+       if (lapb_data_received(dev, skb) == LAPB_OK) {
+               spin_unlock_bh(&x25st->up_lock);
                return NET_RX_SUCCESS;
+       }
 
+       spin_unlock_bh(&x25st->up_lock);
        dev->stats.rx_errors++;
        dev_kfree_skb_any(skb);
        return NET_RX_DROP;
@@ -298,6 +336,8 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
                        return result;
 
                memcpy(&state(hdlc)->settings, &new_settings, size);
+               state(hdlc)->up = false;
+               spin_lock_init(&state(hdlc)->up_lock);
 
                /* There's no header_ops so hard_header_len should be 0. */
                dev->hard_header_len = 0;
index 8085782cd8f91d0bd048a9475f897966e0e78c57..9f3361c13ded81a7dd591337865f33b5640ee2c6 100644 (file)
@@ -1357,6 +1357,7 @@ static int intel_pinctrl_add_padgroups_by_gpps(struct intel_pinctrl *pctrl,
                                gpps[i].gpio_base = 0;
                                break;
                        case INTEL_GPIO_BASE_NOMAP:
+                               break;
                        default:
                                break;
                }
@@ -1393,6 +1394,7 @@ static int intel_pinctrl_add_padgroups_by_size(struct intel_pinctrl *pctrl,
                gpps[i].size = min(gpp_size, npins);
                npins -= gpps[i].size;
 
+               gpps[i].gpio_base = gpps[i].base;
                gpps[i].padown_num = padown_num;
 
                /*
@@ -1491,8 +1493,13 @@ static int intel_pinctrl_probe(struct platform_device *pdev,
                if (IS_ERR(regs))
                        return PTR_ERR(regs);
 
-               /* Determine community features based on the revision */
+               /*
+                * Determine community features based on the revision.
+                * A value of all ones means the device is not present.
+                */
                value = readl(regs + REVID);
+               if (value == ~0u)
+                       return -ENODEV;
                if (((value & REVID_MASK) >> REVID_SHIFT) >= 0x94) {
                        community->features |= PINCTRL_FEATURE_DEBOUNCE;
                        community->features |= PINCTRL_FEATURE_1K_PD;
index f35edb0eac40503ccf9d5d0b02c8b6bea576aef9..c12fa57ebd12c3efb479edfde69e6332159436ce 100644 (file)
@@ -572,7 +572,7 @@ static void microchip_sgpio_irq_settype(struct irq_data *data,
        /* Type value spread over 2 registers sets: low, high bit */
        sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, addr.bit,
                         BIT(addr.port), (!!(type & 0x1)) << addr.port);
-       sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER + SGPIO_MAX_BITS, addr.bit,
+       sgpio_clrsetbits(bank->priv, REG_INT_TRIGGER, SGPIO_MAX_BITS + addr.bit,
                         BIT(addr.port), (!!(type & 0x2)) << addr.port);
 
        if (type == SGPIO_INT_TRG_LEVEL)
index aa1a1c850d057425a02d1806184e235505f670f9..53a0badc6b035e4afd24e2d285f6e23755b65be4 100644 (file)
@@ -3727,12 +3727,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev)
 static int __maybe_unused rockchip_pinctrl_resume(struct device *dev)
 {
        struct rockchip_pinctrl *info = dev_get_drvdata(dev);
-       int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
-                              rk3288_grf_gpio6c_iomux |
-                              GPIO6C6_SEL_WRITE_ENABLE);
+       int ret;
 
-       if (ret)
-               return ret;
+       if (info->ctrl->type == RK3288) {
+               ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
+                                  rk3288_grf_gpio6c_iomux |
+                                  GPIO6C6_SEL_WRITE_ENABLE);
+               if (ret)
+                       return ret;
+       }
 
        return pinctrl_force_default(info->pctl_dev);
 }
index 369ee20a7ea950c24d28bf71c0e29076ea5fd136..2f19ab4db720834cdc29f15f4421ec776c9a6159 100644 (file)
@@ -392,7 +392,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
                          unsigned long *configs, unsigned int nconfs)
 {
        struct lpi_pinctrl *pctrl = dev_get_drvdata(pctldev->dev);
-       unsigned int param, arg, pullup, strength;
+       unsigned int param, arg, pullup = LPI_GPIO_BIAS_DISABLE, strength = 2;
        bool value, output_enabled = false;
        const struct lpi_pingroup *g;
        unsigned long sval;
index 8daccd530285400fac736ccf0735b45fa6881491..9d41abfca37ea2bf7d8c3ffe87b48d638c8494a6 100644 (file)
@@ -1439,14 +1439,14 @@ static const struct msm_pingroup sc7280_groups[] = {
        [172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, _),
        [173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, _),
        [174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, _),
-       [175] = UFS_RESET(ufs_reset, 0x1be000),
-       [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x1b3000, 15, 0),
-       [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0x1b3000, 13, 6),
-       [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x1b3000, 11, 3),
-       [179] = SDC_QDSD_PINGROUP(sdc1_data, 0x1b3000, 9, 0),
-       [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0x1b4000, 14, 6),
-       [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0x1b4000, 11, 3),
-       [182] = SDC_QDSD_PINGROUP(sdc2_data, 0x1b4000, 9, 0),
+       [175] = UFS_RESET(ufs_reset, 0xbe000),
+       [176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xb3004, 0, 6),
+       [177] = SDC_QDSD_PINGROUP(sdc1_clk, 0xb3000, 13, 6),
+       [178] = SDC_QDSD_PINGROUP(sdc1_cmd, 0xb3000, 11, 3),
+       [179] = SDC_QDSD_PINGROUP(sdc1_data, 0xb3000, 9, 0),
+       [180] = SDC_QDSD_PINGROUP(sdc2_clk, 0xb4000, 14, 6),
+       [181] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xb4000, 11, 3),
+       [182] = SDC_QDSD_PINGROUP(sdc2_data, 0xb4000, 9, 0),
 };
 
 static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
index 2b5b0e2b03adde8fda52ec753a1118c18dd25ed6..5aaf57b40407f9da80c35ab82de0c084d847b1c0 100644 (file)
@@ -423,7 +423,7 @@ static const char * const gpio_groups[] = {
 
 static const char * const qdss_stm_groups[] = {
        "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7", "gpio12", "gpio13",
-       "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19" "gpio20", "gpio21", "gpio22",
+       "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21", "gpio22",
        "gpio23", "gpio44", "gpio45", "gpio52", "gpio53", "gpio56", "gpio57", "gpio61", "gpio62",
        "gpio63", "gpio64", "gpio65", "gpio66",
 };
index ad4e630e73e26b8cc689a5f36628370e72cfd0e5..461ec61530ebf2a12883899f4734d85da62c42a8 100644 (file)
@@ -1173,15 +1173,20 @@ config INTEL_PMC_CORE
        depends on PCI
        help
          The Intel Platform Controller Hub for Intel Core SoCs provides access
-         to Power Management Controller registers via a PCI interface. This
+         to Power Management Controller registers via various interfaces. This
          driver can utilize debugging capabilities and supported features as
-         exposed by the Power Management Controller.
+         exposed by the Power Management Controller. It also may perform some
+         tasks in the PMC in order to enable transition into the SLPS0 state.
+         It should be selected on all Intel platforms supported by the driver.
 
          Supported features:
                - SLP_S0_RESIDENCY counter
                - PCH IP Power Gating status
-               - LTR Ignore
+               - LTR Ignore / LTR Show
                - MPHY/PLL gating status (Sunrisepoint PCH only)
+               - SLPS0 Debug registers (Cannonlake/Icelake PCH)
+               - Low Power Mode registers (Tigerlake and beyond)
+               - PMC quirks as needed to enable SLPS0/S0ix
 
 config INTEL_PMT_CLASS
        tristate
index 80f4b7785c6c9ad9de93f62a79388008a461ba12..091e48c217ed8dff471d97d478b5b5f7f1b46e36 100644 (file)
@@ -185,5 +185,8 @@ void exit_enum_attributes(void)
                        sysfs_remove_group(wmi_priv.enumeration_data[instance_id].attr_name_kobj,
                                                                &enumeration_attr_group);
        }
+       wmi_priv.enumeration_instances_count = 0;
+
        kfree(wmi_priv.enumeration_data);
+       wmi_priv.enumeration_data = NULL;
 }
index 75aedbb733be2ccaab6f13de34c1a7a1c8a1399c..8a49ba6e44f9a0755a6f187e10b4cfef9e49723e 100644 (file)
@@ -175,5 +175,8 @@ void exit_int_attributes(void)
                        sysfs_remove_group(wmi_priv.integer_data[instance_id].attr_name_kobj,
                                                                &integer_attr_group);
        }
+       wmi_priv.integer_instances_count = 0;
+
        kfree(wmi_priv.integer_data);
+       wmi_priv.integer_data = NULL;
 }
index 3abcd95477c0789d67aabc1e01b8bc5435e192eb..834b3e82ad9f93ef725a7a8b7214847a6918e91f 100644 (file)
@@ -183,5 +183,8 @@ void exit_po_attributes(void)
                        sysfs_remove_group(wmi_priv.po_data[instance_id].attr_name_kobj,
                                                                &po_attr_group);
        }
+       wmi_priv.po_instances_count = 0;
+
        kfree(wmi_priv.po_data);
+       wmi_priv.po_data = NULL;
 }
index ac75dce88a4c458f219ecc01c6f1891561a93cc0..552537852459a997de715a8fd58130f3fe7394bd 100644 (file)
@@ -155,5 +155,8 @@ void exit_str_attributes(void)
                        sysfs_remove_group(wmi_priv.str_data[instance_id].attr_name_kobj,
                                                                &str_attr_group);
        }
+       wmi_priv.str_instances_count = 0;
+
        kfree(wmi_priv.str_data);
+       wmi_priv.str_data = NULL;
 }
index cb81010ba1a21b108e76fd248ed8bb3ac79c8cb2..7410ccae650c2ccfd9040f5179ea1970fb4cd7a8 100644 (file)
@@ -210,25 +210,17 @@ static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
  */
 static int create_attributes_level_sysfs_files(void)
 {
-       int ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
+       int ret;
 
-       if (ret) {
-               pr_debug("could not create reset_bios file\n");
+       ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
+       if (ret)
                return ret;
-       }
 
        ret = sysfs_create_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr);
-       if (ret) {
-               pr_debug("could not create changing_pending_reboot file\n");
-               sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
-       }
-       return ret;
-}
+       if (ret)
+               return ret;
 
-static void release_reset_bios_data(void)
-{
-       sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
-       sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr);
+       return 0;
 }
 
 static ssize_t wmi_sysman_attr_show(struct kobject *kobj, struct attribute *attr,
@@ -373,8 +365,6 @@ static void destroy_attribute_objs(struct kset *kset)
  */
 static void release_attributes_data(void)
 {
-       release_reset_bios_data();
-
        mutex_lock(&wmi_priv.mutex);
        exit_enum_attributes();
        exit_int_attributes();
@@ -386,11 +376,13 @@ static void release_attributes_data(void)
                wmi_priv.authentication_dir_kset = NULL;
        }
        if (wmi_priv.main_dir_kset) {
+               sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &reset_bios.attr);
+               sysfs_remove_file(&wmi_priv.main_dir_kset->kobj, &pending_reboot.attr);
                destroy_attribute_objs(wmi_priv.main_dir_kset);
                kset_unregister(wmi_priv.main_dir_kset);
+               wmi_priv.main_dir_kset = NULL;
        }
        mutex_unlock(&wmi_priv.mutex);
-
 }
 
 /**
@@ -497,7 +489,6 @@ nextobj:
 
 err_attr_init:
        mutex_unlock(&wmi_priv.mutex);
-       release_attributes_data();
        kfree(obj);
        return retval;
 }
@@ -513,102 +504,91 @@ static int __init sysman_init(void)
        }
 
        ret = init_bios_attr_set_interface();
-       if (ret || !wmi_priv.bios_attr_wdev) {
-               pr_debug("failed to initialize set interface\n");
-               goto fail_set_interface;
-       }
+       if (ret)
+               return ret;
 
        ret = init_bios_attr_pass_interface();
-       if (ret || !wmi_priv.password_attr_wdev) {
-               pr_debug("failed to initialize pass interface\n");
-               goto fail_pass_interface;
+       if (ret)
+               goto err_exit_bios_attr_set_interface;
+
+       if (!wmi_priv.bios_attr_wdev || !wmi_priv.password_attr_wdev) {
+               pr_debug("failed to find set or pass interface\n");
+               ret = -ENODEV;
+               goto err_exit_bios_attr_pass_interface;
        }
 
        ret = class_register(&firmware_attributes_class);
        if (ret)
-               goto fail_class;
+               goto err_exit_bios_attr_pass_interface;
 
        wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
                                  NULL, "%s", DRIVER_NAME);
        if (IS_ERR(wmi_priv.class_dev)) {
                ret = PTR_ERR(wmi_priv.class_dev);
-               goto fail_classdev;
+               goto err_unregister_class;
        }
 
        wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL,
                                                     &wmi_priv.class_dev->kobj);
        if (!wmi_priv.main_dir_kset) {
                ret = -ENOMEM;
-               goto fail_main_kset;
+               goto err_destroy_classdev;
        }
 
        wmi_priv.authentication_dir_kset = kset_create_and_add("authentication", NULL,
                                                                &wmi_priv.class_dev->kobj);
        if (!wmi_priv.authentication_dir_kset) {
                ret = -ENOMEM;
-               goto fail_authentication_kset;
+               goto err_release_attributes_data;
        }
 
        ret = create_attributes_level_sysfs_files();
        if (ret) {
                pr_debug("could not create reset BIOS attribute\n");
-               goto fail_reset_bios;
+               goto err_release_attributes_data;
        }
 
        ret = init_bios_attributes(ENUM, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID);
        if (ret) {
                pr_debug("failed to populate enumeration type attributes\n");
-               goto fail_create_group;
+               goto err_release_attributes_data;
        }
 
        ret = init_bios_attributes(INT, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID);
        if (ret) {
                pr_debug("failed to populate integer type attributes\n");
-               goto fail_create_group;
+               goto err_release_attributes_data;
        }
 
        ret = init_bios_attributes(STR, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID);
        if (ret) {
                pr_debug("failed to populate string type attributes\n");
-               goto fail_create_group;
+               goto err_release_attributes_data;
        }
 
        ret = init_bios_attributes(PO, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID);
        if (ret) {
                pr_debug("failed to populate pass object type attributes\n");
-               goto fail_create_group;
+               goto err_release_attributes_data;
        }
 
        return 0;
 
-fail_create_group:
+err_release_attributes_data:
        release_attributes_data();
 
-fail_reset_bios:
-       if (wmi_priv.authentication_dir_kset) {
-               kset_unregister(wmi_priv.authentication_dir_kset);
-               wmi_priv.authentication_dir_kset = NULL;
-       }
-
-fail_authentication_kset:
-       if (wmi_priv.main_dir_kset) {
-               kset_unregister(wmi_priv.main_dir_kset);
-               wmi_priv.main_dir_kset = NULL;
-       }
-
-fail_main_kset:
+err_destroy_classdev:
        device_destroy(&firmware_attributes_class, MKDEV(0, 0));
 
-fail_classdev:
+err_unregister_class:
        class_unregister(&firmware_attributes_class);
 
-fail_class:
+err_exit_bios_attr_pass_interface:
        exit_bios_attr_pass_interface();
 
-fail_pass_interface:
+err_exit_bios_attr_set_interface:
        exit_bios_attr_set_interface();
 
-fail_set_interface:
        return ret;
 }
 
index 2f5b8d09143e306ed9750875a518898d488c6edc..57cc92891a57083b227146718ef7e2726f0d4a21 100644 (file)
@@ -90,6 +90,13 @@ static const struct dmi_system_id button_array_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x2 Detachable"),
                },
        },
+       {
+               .ident = "Lenovo ThinkPad X1 Tablet Gen 2",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_FAMILY, "ThinkPad X1 Tablet Gen 2"),
+               },
+       },
        { }
 };
 
index 8a8017f9ca9136691c5689022b98b5b7ef4b14a2..3fdf4cbec9ad2d7803ea85fdce689d482f602168 100644 (file)
@@ -48,8 +48,16 @@ static const struct key_entry intel_vbtn_keymap[] = {
 };
 
 static const struct key_entry intel_vbtn_switchmap[] = {
-       { KE_SW,     0xCA, { .sw = { SW_DOCK, 1 } } },          /* Docked */
-       { KE_SW,     0xCB, { .sw = { SW_DOCK, 0 } } },          /* Undocked */
+       /*
+        * SW_DOCK should only be reported for docking stations, but DSDTs using the
+        * intel-vbtn code, always seem to use this for 2-in-1s / convertibles and set
+        * SW_DOCK=1 when in laptop-mode (in tandem with setting SW_TABLET_MODE=0).
+        * This causes userspace to think the laptop is docked to a port-replicator
+        * and to disable suspend-on-lid-close, which is undesirable.
+        * Map the dock events to KEY_IGNORE to avoid this broken SW_DOCK reporting.
+        */
+       { KE_IGNORE, 0xCA, { .sw = { SW_DOCK, 1 } } },          /* Docked */
+       { KE_IGNORE, 0xCB, { .sw = { SW_DOCK, 0 } } },          /* Undocked */
        { KE_SW,     0xCC, { .sw = { SW_TABLET_MODE, 1 } } },   /* Tablet */
        { KE_SW,     0xCD, { .sw = { SW_TABLET_MODE, 0 } } },   /* Laptop */
        { KE_END }
index ee2f757515b0a04b9fe0ba58df118e7ec5296500..b5888aeb4bcff9a79fc7cd913d7f033b183f3257 100644 (file)
@@ -863,34 +863,45 @@ out_unlock:
 }
 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
 
-static ssize_t pmc_core_ltr_ignore_write(struct file *file,
-                                        const char __user *userbuf,
-                                        size_t count, loff_t *ppos)
+static int pmc_core_send_ltr_ignore(u32 value)
 {
        struct pmc_dev *pmcdev = &pmc;
        const struct pmc_reg_map *map = pmcdev->map;
-       u32 val, buf_size, fd;
-       int err;
-
-       buf_size = count < 64 ? count : 64;
-
-       err = kstrtou32_from_user(userbuf, buf_size, 10, &val);
-       if (err)
-               return err;
+       u32 reg;
+       int err = 0;
 
        mutex_lock(&pmcdev->lock);
 
-       if (val > map->ltr_ignore_max) {
+       if (value > map->ltr_ignore_max) {
                err = -EINVAL;
                goto out_unlock;
        }
 
-       fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
-       fd |= (1U << val);
-       pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd);
+       reg = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
+       reg |= BIT(value);
+       pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, reg);
 
 out_unlock:
        mutex_unlock(&pmcdev->lock);
+
+       return err;
+}
+
+static ssize_t pmc_core_ltr_ignore_write(struct file *file,
+                                        const char __user *userbuf,
+                                        size_t count, loff_t *ppos)
+{
+       u32 buf_size, value;
+       int err;
+
+       buf_size = min_t(u32, count, 64);
+
+       err = kstrtou32_from_user(userbuf, buf_size, 10, &value);
+       if (err)
+               return err;
+
+       err = pmc_core_send_ltr_ignore(value);
+
        return err == 0 ? count : err;
 }
 
@@ -1244,6 +1255,15 @@ static int pmc_core_probe(struct platform_device *pdev)
        pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
        dmi_check_system(pmc_core_dmi_table);
 
+       /*
+        * On TGL, due to a hardware limitation, the GBE LTR blocks PC10 when
+        * a cable is attached. Tell the PMC to ignore it.
+        */
+       if (pmcdev->map == &tgl_reg_map) {
+               dev_dbg(&pdev->dev, "ignoring GBE LTR\n");
+               pmc_core_send_ltr_ignore(3);
+       }
+
        pmc_core_dbgfs_register(pmcdev);
 
        device_initialized = true;
index c8939fba45090788519b71afebd2a7b7d2a1d11b..ee2b3bbeb83da51ab7330dbf790c77663c39b47c 100644 (file)
@@ -173,7 +173,7 @@ static int intel_pmt_dev_register(struct intel_pmt_entry *entry,
                                  struct intel_pmt_namespace *ns,
                                  struct device *parent)
 {
-       struct resource res;
+       struct resource res = {0};
        struct device *dev;
        int ret;
 
index 97dd749c8290dbceaa57a22cb027a709567cf8a6..92d315a16cfd3cd5985fe663d3a155c1e1107d3e 100644 (file)
 #define CRASH_TYPE_OOBMSM      1
 
 /* Control Flags */
-#define CRASHLOG_FLAG_DISABLE          BIT(27)
+#define CRASHLOG_FLAG_DISABLE          BIT(28)
 
 /*
- * Bits 28 and 29 control the state of bit 31.
+ * Bits 29 and 30 control the state of bit 31.
  *
- * Bit 28 will clear bit 31, if set, allowing a new crashlog to be captured.
- * Bit 29 will immediately trigger a crashlog to be generated, setting bit 31.
- * Bit 30 is read-only and reserved as 0.
+ * Bit 29 will clear bit 31, if set, allowing a new crashlog to be captured.
+ * Bit 30 will immediately trigger a crashlog to be generated, setting bit 31.
  * Bit 31 is the read-only status with a 1 indicating log is complete.
  */
-#define CRASHLOG_FLAG_TRIGGER_CLEAR    BIT(28)
-#define CRASHLOG_FLAG_TRIGGER_EXECUTE  BIT(29)
+#define CRASHLOG_FLAG_TRIGGER_CLEAR    BIT(29)
+#define CRASHLOG_FLAG_TRIGGER_EXECUTE  BIT(30)
 #define CRASHLOG_FLAG_TRIGGER_COMPLETE BIT(31)
 #define CRASHLOG_FLAG_TRIGGER_MASK     GENMASK(31, 28)
 
index b881044b31b0b3984f02b1565fab34fb216936f9..0d9e2ddbf904d2c93499dc0da3086d7b4c1a0455 100644 (file)
@@ -4081,13 +4081,19 @@ static bool hotkey_notify_6xxx(const u32 hkey,
 
        case TP_HKEY_EV_KEY_NUMLOCK:
        case TP_HKEY_EV_KEY_FN:
-       case TP_HKEY_EV_KEY_FN_ESC:
                /* key press events, we just ignore them as long as the EC
                 * is still reporting them in the normal keyboard stream */
                *send_acpi_ev = false;
                *ignore_acpi_ev = true;
                return true;
 
+       case TP_HKEY_EV_KEY_FN_ESC:
+               /* Get the media key status to foce the status LED to update */
+               acpi_evalf(hkey_handle, NULL, "GMKS", "v");
+               *send_acpi_ev = false;
+               *ignore_acpi_ev = true;
+               return true;
+
        case TP_HKEY_EV_TABLET_CHANGED:
                tpacpi_input_send_tabletsw();
                hotkey_tablet_mode_notify_change();
@@ -9845,6 +9851,11 @@ static struct ibm_struct lcdshadow_driver_data = {
  * Thinkpad sensor interfaces
  */
 
+#define DYTC_CMD_QUERY        0 /* To get DYTC status - enable/revision */
+#define DYTC_QUERY_ENABLE_BIT 8  /* Bit        8 - 0 = disabled, 1 = enabled */
+#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */
+#define DYTC_QUERY_REV_BIT    28 /* Bits 28 - 31 - revision */
+
 #define DYTC_CMD_GET          2 /* To get current IC function and mode */
 #define DYTC_GET_LAPMODE_BIT 17 /* Set when in lapmode */
 
@@ -9855,6 +9866,7 @@ static bool has_palmsensor;
 static bool has_lapsensor;
 static bool palm_state;
 static bool lap_state;
+static int dytc_version;
 
 static int dytc_command(int command, int *output)
 {
@@ -9869,6 +9881,33 @@ static int dytc_command(int command, int *output)
        return 0;
 }
 
+static int dytc_get_version(void)
+{
+       int err, output;
+
+       /* Check if we've been called before - and just return cached value */
+       if (dytc_version)
+               return dytc_version;
+
+       /* Otherwise query DYTC and extract version information */
+       err = dytc_command(DYTC_CMD_QUERY, &output);
+       /*
+        * If support isn't available (ENODEV) then don't return an error
+        * and don't create the sysfs group
+        */
+       if (err == -ENODEV)
+               return 0;
+       /* For all other errors we can flag the failure */
+       if (err)
+               return err;
+
+       /* Check DYTC is enabled and supports mode setting */
+       if (output & BIT(DYTC_QUERY_ENABLE_BIT))
+               dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
+
+       return 0;
+}
+
 static int lapsensor_get(bool *present, bool *state)
 {
        int output, err;
@@ -9974,7 +10013,18 @@ static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm)
                if (err)
                        return err;
        }
-       if (has_lapsensor) {
+
+       /* Check if we know the DYTC version, if we don't then get it */
+       if (!dytc_version) {
+               err = dytc_get_version();
+               if (err)
+                       return err;
+       }
+       /*
+        * Platforms before DYTC version 5 claim to have a lap sensor, but it doesn't work, so we
+        * ignore them
+        */
+       if (has_lapsensor && (dytc_version >= 5)) {
                err = sysfs_create_file(&tpacpi_pdev->dev.kobj, &dev_attr_dytc_lapmode.attr);
                if (err)
                        return err;
@@ -9999,14 +10049,9 @@ static struct ibm_struct proxsensor_driver_data = {
  * DYTC Platform Profile interface
  */
 
-#define DYTC_CMD_QUERY        0 /* To get DYTC status - enable/revision */
 #define DYTC_CMD_SET          1 /* To enable/disable IC function mode */
 #define DYTC_CMD_RESET    0x1ff /* To reset back to default */
 
-#define DYTC_QUERY_ENABLE_BIT 8  /* Bit        8 - 0 = disabled, 1 = enabled */
-#define DYTC_QUERY_SUBREV_BIT 16 /* Bits 16 - 27 - sub revision */
-#define DYTC_QUERY_REV_BIT    28 /* Bits 28 - 31 - revision */
-
 #define DYTC_GET_FUNCTION_BIT 8  /* Bits  8-11 - function setting */
 #define DYTC_GET_MODE_BIT     12 /* Bits 12-15 - mode setting */
 
@@ -10142,8 +10187,13 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
                return err;
 
        if (profile == PLATFORM_PROFILE_BALANCED) {
-               /* To get back to balanced mode we just issue a reset command */
-               err = dytc_command(DYTC_CMD_RESET, &output);
+               /*
+                * To get back to balanced mode we need to issue a reset command.
+                * Note we still need to disable CQL mode before hand and re-enable
+                * it afterwards, otherwise dytc_lapmode gets reset to 0 and stays
+                * stuck at 0 for aprox. 30 minutes.
+                */
+               err = dytc_cql_command(DYTC_CMD_RESET, &output);
                if (err)
                        goto unlock;
        } else {
@@ -10211,28 +10261,28 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
        if (err)
                return err;
 
+       /* Check if we know the DYTC version, if we don't then get it */
+       if (!dytc_version) {
+               err = dytc_get_version();
+               if (err)
+                       return err;
+       }
        /* Check DYTC is enabled and supports mode setting */
-       if (output & BIT(DYTC_QUERY_ENABLE_BIT)) {
-               /* Only DYTC v5.0 and later has this feature. */
-               int dytc_version;
-
-               dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
-               if (dytc_version >= 5) {
-                       dbg_printk(TPACPI_DBG_INIT,
-                                  "DYTC version %d: thermal mode available\n", dytc_version);
-                       /* Create platform_profile structure and register */
-                       err = platform_profile_register(&dytc_profile);
-                       /*
-                        * If for some reason platform_profiles aren't enabled
-                        * don't quit terminally.
-                        */
-                       if (err)
-                               return 0;
+       if (dytc_version >= 5) {
+               dbg_printk(TPACPI_DBG_INIT,
+                               "DYTC version %d: thermal mode available\n", dytc_version);
+               /* Create platform_profile structure and register */
+               err = platform_profile_register(&dytc_profile);
+               /*
+                * If for some reason platform_profiles aren't enabled
+                * don't quit terminally.
+                */
+               if (err)
+                       return 0;
 
-                       dytc_profile_available = true;
-                       /* Ensure initial values are correct */
-                       dytc_profile_refresh();
-               }
+               dytc_profile_available = true;
+               /* Ensure initial values are correct */
+               dytc_profile_refresh();
        }
        return 0;
 }
index beb5f74944cdf95f4d083e57bab7699f7c20014d..08f4cf0ad9e3cacb8ac0e55f6f6003c74d53453a 100644 (file)
@@ -189,15 +189,16 @@ int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
        tmr_add = ptp_qoriq->tmr_add;
        adj = tmr_add;
 
-       /* calculate diff as adj*(scaled_ppm/65536)/1000000
-        * and round() to the nearest integer
+       /*
+        * Calculate diff and round() to the nearest integer
+        *
+        * diff = adj * (ppb / 1000000000)
+        *      = adj * scaled_ppm / 65536000000
         */
-       adj *= scaled_ppm;
-       diff = div_u64(adj, 8000000);
-       diff = (diff >> 13) + ((diff >> 12) & 1);
+       diff = mul_u64_u64_div_u64(adj, scaled_ppm, 32768000000);
+       diff = DIV64_U64_ROUND_UP(diff, 2);
 
        tmr_add = neg_adj ? tmr_add - diff : tmr_add + diff;
-
        ptp_qoriq->write(&regs->ctrl_regs->tmr_add, tmr_add);
 
        return 0;
index f140eafc0d6a2df1fa3f6e5c0f1cc42be1ce7f1a..61831f2fdb309800703f881ba98706b2ec405b64 100644 (file)
@@ -2371,6 +2371,24 @@ static int ibmvfc_match_lun(struct ibmvfc_event *evt, void *device)
        return 0;
 }
 
+/**
+ * ibmvfc_event_is_free - Check if event is free or not
+ * @evt:       ibmvfc event struct
+ *
+ * Returns:
+ *     true / false
+ **/
+static bool ibmvfc_event_is_free(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_event *loop_evt;
+
+       list_for_each_entry(loop_evt, &evt->queue->free, queue_list)
+               if (loop_evt == evt)
+                       return true;
+
+       return false;
+}
+
 /**
  * ibmvfc_wait_for_ops - Wait for ops to complete
  * @vhost:     ibmvfc host struct
@@ -2385,35 +2403,58 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
 {
        struct ibmvfc_event *evt;
        DECLARE_COMPLETION_ONSTACK(comp);
-       int wait;
+       int wait, i, q_index, q_size;
        unsigned long flags;
        signed long timeout = IBMVFC_ABORT_WAIT_TIMEOUT * HZ;
+       struct ibmvfc_queue *queues;
 
        ENTER;
+       if (vhost->mq_enabled && vhost->using_channels) {
+               queues = vhost->scsi_scrqs.scrqs;
+               q_size = vhost->scsi_scrqs.active_queues;
+       } else {
+               queues = &vhost->crq;
+               q_size = 1;
+       }
+
        do {
                wait = 0;
-               spin_lock_irqsave(&vhost->crq.l_lock, flags);
-               list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
-                       if (match(evt, device)) {
-                               evt->eh_comp = &comp;
-                               wait++;
+               spin_lock_irqsave(vhost->host->host_lock, flags);
+               for (q_index = 0; q_index < q_size; q_index++) {
+                       spin_lock(&queues[q_index].l_lock);
+                       for (i = 0; i < queues[q_index].evt_pool.size; i++) {
+                               evt = &queues[q_index].evt_pool.events[i];
+                               if (!ibmvfc_event_is_free(evt)) {
+                                       if (match(evt, device)) {
+                                               evt->eh_comp = &comp;
+                                               wait++;
+                                       }
+                               }
                        }
+                       spin_unlock(&queues[q_index].l_lock);
                }
-               spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
+               spin_unlock_irqrestore(vhost->host->host_lock, flags);
 
                if (wait) {
                        timeout = wait_for_completion_timeout(&comp, timeout);
 
                        if (!timeout) {
                                wait = 0;
-                               spin_lock_irqsave(&vhost->crq.l_lock, flags);
-                               list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
-                                       if (match(evt, device)) {
-                                               evt->eh_comp = NULL;
-                                               wait++;
+                               spin_lock_irqsave(vhost->host->host_lock, flags);
+                               for (q_index = 0; q_index < q_size; q_index++) {
+                                       spin_lock(&queues[q_index].l_lock);
+                                       for (i = 0; i < queues[q_index].evt_pool.size; i++) {
+                                               evt = &queues[q_index].evt_pool.events[i];
+                                               if (!ibmvfc_event_is_free(evt)) {
+                                                       if (match(evt, device)) {
+                                                               evt->eh_comp = NULL;
+                                                               wait++;
+                                                       }
+                                               }
                                        }
+                                       spin_unlock(&queues[q_index].l_lock);
                                }
-                               spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
+                               spin_unlock_irqrestore(vhost->host->host_lock, flags);
                                if (wait)
                                        dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
                                LEAVE;
index ac066f86bb141b3708c9655d1e830f2c2ac6cff4..ac0eef975f17941f1b57c200365ff7ae61818ae8 100644 (file)
@@ -7806,14 +7806,18 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
                ioc->pend_os_device_add_sz++;
        ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
            GFP_KERNEL);
-       if (!ioc->pend_os_device_add)
+       if (!ioc->pend_os_device_add) {
+               r = -ENOMEM;
                goto out_free_resources;
+       }
 
        ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
        ioc->device_remove_in_progress =
                kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
-       if (!ioc->device_remove_in_progress)
+       if (!ioc->device_remove_in_progress) {
+               r = -ENOMEM;
                goto out_free_resources;
+       }
 
        ioc->fwfault_debug = mpt3sas_fwfault_debug;
 
index 47ad64b06623696ca510da02ffafa0d0149256ce..69c5b5ee2169bffedba10eb35d7aadd6cbf32bee 100644 (file)
@@ -1675,6 +1675,7 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi)
                if (!qedi->global_queues[i]) {
                        QEDI_ERR(&qedi->dbg_ctx,
                                 "Unable to allocation global queue %d.\n", i);
+                       status = -ENOMEM;
                        goto mem_alloc_failure;
                }
 
index c48daf52725d9c654014ff89f3f9e8d736a14620..480e7d2dcf3e60f34f964d50dc09054f7553421d 100644 (file)
@@ -3222,8 +3222,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
        if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
            (cmd->sess && cmd->sess->deleted)) {
                cmd->state = QLA_TGT_STATE_PROCESSED;
-               res = 0;
-               goto free;
+               return 0;
        }
 
        ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018,
@@ -3234,8 +3233,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
 
        res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
            &full_req_cnt);
-       if (unlikely(res != 0))
-               goto free;
+       if (unlikely(res != 0)) {
+               return res;
+       }
 
        spin_lock_irqsave(qpair->qp_lock_ptr, flags);
 
@@ -3255,8 +3255,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
                        vha->flags.online, qla2x00_reset_active(vha),
                        cmd->reset_count, qpair->chip_reset);
                spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
-               res = 0;
-               goto free;
+               return 0;
        }
 
        /* Does F/W have an IOCBs for this request */
@@ -3359,8 +3358,6 @@ out_unmap_unlock:
        qlt_unmap_sg(vha, cmd);
        spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
 
-free:
-       vha->hw->tgt.tgt_ops->free_cmd(cmd);
        return res;
 }
 EXPORT_SYMBOL(qlt_xmit_response);
index b55fc768a2a7a08e00615cde02a22c980ea3f4f6..8b4890cdd4ca1044a181c02a541131125c6adff8 100644 (file)
@@ -644,7 +644,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
 {
        struct qla_tgt_cmd *cmd = container_of(se_cmd,
                                struct qla_tgt_cmd, se_cmd);
-       struct scsi_qla_host *vha = cmd->vha;
 
        if (cmd->aborted) {
                /* Cmd can loop during Q-full.  tcm_qla2xxx_aborted_task
@@ -657,7 +656,6 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
                        cmd->se_cmd.transport_state,
                        cmd->se_cmd.t_state,
                        cmd->se_cmd.se_cmd_flags);
-               vha->hw->tgt.tgt_ops->free_cmd(cmd);
                return 0;
        }
 
@@ -685,7 +683,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
 {
        struct qla_tgt_cmd *cmd = container_of(se_cmd,
                                struct qla_tgt_cmd, se_cmd);
-       struct scsi_qla_host *vha = cmd->vha;
        int xmit_type = QLA_TGT_XMIT_STATUS;
 
        if (cmd->aborted) {
@@ -699,7 +696,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
                    cmd, kref_read(&cmd->se_cmd.cmd_kref),
                    cmd->se_cmd.transport_state, cmd->se_cmd.t_state,
                    cmd->se_cmd.se_cmd_flags);
-               vha->hw->tgt.tgt_ops->free_cmd(cmd);
                return 0;
        }
        cmd->bufflen = se_cmd->data_length;
index 91074fd97f6444b4fceeea2730a40acb2bd2e46d..f4bf62b007a08ba818e36ac2633a7a13fb3cff19 100644 (file)
@@ -2475,6 +2475,7 @@ static void iscsi_if_stop_conn(struct iscsi_cls_conn *conn, int flag)
         */
        mutex_lock(&conn_mutex);
        conn->transport->stop_conn(conn, flag);
+       conn->state = ISCSI_CONN_DOWN;
        mutex_unlock(&conn_mutex);
 
 }
@@ -2901,6 +2902,13 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
        default:
                err = transport->set_param(conn, ev->u.set_param.param,
                                           data, ev->u.set_param.len);
+               if ((conn->state == ISCSI_CONN_BOUND) ||
+                       (conn->state == ISCSI_CONN_UP)) {
+                       err = transport->set_param(conn, ev->u.set_param.param,
+                                       data, ev->u.set_param.len);
+               } else {
+                       return -ENOTCONN;
+               }
        }
 
        return err;
@@ -2960,6 +2968,7 @@ static int iscsi_if_ep_disconnect(struct iscsi_transport *transport,
                mutex_lock(&conn->ep_mutex);
                conn->ep = NULL;
                mutex_unlock(&conn->ep_mutex);
+               conn->state = ISCSI_CONN_DOWN;
        }
 
        transport->ep_disconnect(ep);
@@ -3727,6 +3736,8 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
                ev->r.retcode = transport->bind_conn(session, conn,
                                                ev->u.b_conn.transport_eph,
                                                ev->u.b_conn.is_leading);
+               if (!ev->r.retcode)
+                       conn->state = ISCSI_CONN_BOUND;
                mutex_unlock(&conn_mutex);
 
                if (ev->r.retcode || !transport->ep_connect)
@@ -3966,7 +3977,8 @@ iscsi_conn_attr(local_ipaddr, ISCSI_PARAM_LOCAL_IPADDR);
 static const char *const connection_state_names[] = {
        [ISCSI_CONN_UP] = "up",
        [ISCSI_CONN_DOWN] = "down",
-       [ISCSI_CONN_FAILED] = "failed"
+       [ISCSI_CONN_FAILED] = "failed",
+       [ISCSI_CONN_BOUND] = "bound"
 };
 
 static ssize_t show_conn_state(struct device *dev,
index bf1468e5bccbaa50c44d3ce81b59e37ab7b057e2..51143a68a8896db434658878f6176dd5de1d0826 100644 (file)
@@ -332,7 +332,7 @@ static const struct omap_prm_data dra7_prm_data[] = {
        {
                .name = "l3init", .base = 0x4ae07300,
                .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
-               .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
+               .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01,
                .clkdm_name = "pcie"
        },
        {
@@ -830,8 +830,12 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
                       reset->prm->data->name, id);
 
 exit:
-       if (reset->clkdm)
+       if (reset->clkdm) {
+               /* At least dra7 iva needs a delay before clkdm idle */
+               if (has_rstst)
+                       udelay(1);
                pdata->clkdm_allow_idle(reset->clkdm);
+       }
 
        return ret;
 }
index 3cbc074992bc86af1606acf481c2ac60f49bba58..9ee797b8cb7eae116922be7e6c5421cce4adea53 100644 (file)
@@ -882,7 +882,6 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                        if (!bio) {
 new_bio:
                                nr_vecs = bio_max_segs(nr_pages);
-                               nr_pages -= nr_vecs;
                                /*
                                 * Calls bio_kmalloc() and sets bio->bi_end_io()
                                 */
@@ -939,6 +938,14 @@ new_bio:
 
        return 0;
 fail:
+       if (bio)
+               bio_put(bio);
+       while (req->bio) {
+               bio = req->bio;
+               req->bio = bio->bi_next;
+               bio_put(bio);
+       }
+       req->biotail = NULL;
        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 }
 
index ac3c1dd3edeff119e6108b15259da040d760a3a9..4abddbebd4b2364e86624d5c51b7bde775e1ee7a 100644 (file)
@@ -42,6 +42,6 @@ config VFIO_PCI_IGD
 
 config VFIO_PCI_NVLINK2
        def_bool y
-       depends on VFIO_PCI && PPC_POWERNV
+       depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU
        help
          VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
index be444407664af74e66d746a52a199513b501efb1..45cbfd4879a55310d7d17c6194dd1ba9a93943e6 100644 (file)
@@ -739,6 +739,12 @@ out:
        ret = vfio_lock_acct(dma, lock_acct, false);
 
 unpin_out:
+       if (batch->size == 1 && !batch->offset) {
+               /* May be a VM_PFNMAP pfn, which the batch can't remember. */
+               put_pfn(pfn, dma->prot);
+               batch->size = 0;
+       }
+
        if (ret < 0) {
                if (pinned && !rsvd) {
                        for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
index 41645fe6ad48a2902e5dbb1827b859e0e3e39ecc..ea0efd290c3722c34a7f788b724bbbc77493b3fb 100644 (file)
@@ -50,11 +50,11 @@ config XEN_BALLOON_MEMORY_HOTPLUG
 
          SUBSYSTEM=="memory", ACTION=="add", RUN+="/bin/sh -c '[ -f /sys$devpath/state ] && echo online > /sys$devpath/state'"
 
-config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
+config XEN_MEMORY_HOTPLUG_LIMIT
        int "Hotplugged memory limit (in GiB) for a PV guest"
        default 512
        depends on XEN_HAVE_PVMMU
-       depends on XEN_BALLOON_MEMORY_HOTPLUG
+       depends on MEMORY_HOTPLUG
        help
          Maxmium amount of memory (in GiB) that a PV guest can be
          expanded to when using memory hotplug.
index c9195fc67fd8fc41dbe5614b04e3cd9a5f0e42dd..eb737ed63afb6041f26207171b95960e5ec6e7a9 100644 (file)
@@ -851,8 +851,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
        fscache_wait_on_page_write(vnode->cache, vmf->page);
 #endif
 
-       if (PageWriteback(vmf->page) &&
-           wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
+       if (wait_on_page_writeback_killable(vmf->page))
                return VM_FAULT_RETRY;
 
        if (lock_page_killable(vmf->page) < 0)
index 92ed7d5df67744c90012d7c829b9beaf0fc037f7..09d6f7229db9d96c973d99cd822df1bbd6935328 100644 (file)
@@ -275,6 +275,8 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
                bio.bi_opf = dio_bio_write_op(iocb);
                task_io_account_write(ret);
        }
+       if (iocb->ki_flags & IOCB_NOWAIT)
+               bio.bi_opf |= REQ_NOWAIT;
        if (iocb->ki_flags & IOCB_HIPRI)
                bio_set_polled(&bio, iocb);
 
@@ -428,6 +430,8 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                        bio->bi_opf = dio_bio_write_op(iocb);
                        task_io_account_write(bio->bi_iter.bi_size);
                }
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       bio->bi_opf |= REQ_NOWAIT;
 
                dio->size += bio->bi_iter.bi_size;
                pos += bio->bi_iter.bi_size;
@@ -1240,13 +1244,13 @@ int bdev_disk_changed(struct block_device *bdev, bool invalidate)
 
        lockdep_assert_held(&bdev->bd_mutex);
 
-       clear_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
-
 rescan:
        ret = blk_drop_partitions(bdev);
        if (ret)
                return ret;
 
+       clear_bit(GD_NEED_PART_SCAN, &disk->state);
+
        /*
         * Historically we only set the capacity to zero for devices that
         * support partitions (independ of actually having partitions created).
index b634c42115ead9a256f7c261ef76b0fa4e1e7c59..b4fb997eda16257bf1bb126b51e0383aa36fa64f 100644 (file)
@@ -7,10 +7,12 @@ subdir-ccflags-y += -Wmissing-format-attribute
 subdir-ccflags-y += -Wmissing-prototypes
 subdir-ccflags-y += -Wold-style-definition
 subdir-ccflags-y += -Wmissing-include-dirs
-subdir-ccflags-y += $(call cc-option, -Wunused-but-set-variable)
-subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
-subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
-subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
+condflags := \
+       $(call cc-option, -Wunused-but-set-variable)            \
+       $(call cc-option, -Wunused-const-variable)              \
+       $(call cc-option, -Wpacked-not-aligned)                 \
+       $(call cc-option, -Wstringop-truncation)
+subdir-ccflags-y += $(condflags)
 # The following turn off the warnings enabled by -Wextra
 subdir-ccflags-y += -Wno-missing-field-initializers
 subdir-ccflags-y += -Wno-sign-compare
index 3a9c1e046ebe52d332cc3c5edfc2098b11d601e5..d05f73530af7a2ca709f66ad69e832d8587bce2e 100644 (file)
@@ -81,6 +81,9 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info)
        struct btrfs_dev_replace_item *ptr;
        u64 src_devid;
 
+       if (!dev_root)
+               return 0;
+
        path = btrfs_alloc_path();
        if (!path) {
                ret = -ENOMEM;
index 41b718cfea406fc3f302b5a784a04ca1e2a5447e..289f1f09481d7923872466e520cd08647d1c65c2 100644 (file)
@@ -2387,8 +2387,9 @@ static int btrfs_read_roots(struct btrfs_fs_info *fs_info)
        } else {
                set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state);
                fs_info->dev_root = root;
-               btrfs_init_devices_late(fs_info);
        }
+       /* Initialize fs_info for all devices in any case */
+       btrfs_init_devices_late(fs_info);
 
        /* If IGNOREDATACSUMS is set don't bother reading the csum root. */
        if (!btrfs_test_opt(fs_info, IGNOREDATACSUMS)) {
@@ -3009,6 +3010,21 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
                }
        }
 
+       /*
+        * btrfs_find_orphan_roots() is responsible for finding all the dead
+        * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load
+        * them into the fs_info->fs_roots_radix tree. This must be done before
+        * calling btrfs_orphan_cleanup() on the tree root. If we don't do it
+        * first, then btrfs_orphan_cleanup() will delete a dead root's orphan
+        * item before the root's tree is deleted - this means that if we unmount
+        * or crash before the deletion completes, on the next mount we will not
+        * delete what remains of the tree because the orphan item does not
+        * exists anymore, which is what tells us we have a pending deletion.
+        */
+       ret = btrfs_find_orphan_roots(fs_info);
+       if (ret)
+               goto out;
+
        ret = btrfs_cleanup_fs_roots(fs_info);
        if (ret)
                goto out;
@@ -3068,7 +3084,6 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
                }
        }
 
-       ret = btrfs_find_orphan_roots(fs_info);
 out:
        return ret;
 }
index 7cdf65be3707cd4c1602de86698488b84e97bf3f..a520775949a0715d0e433d0545ecc5c4c201fbb8 100644 (file)
@@ -3099,11 +3099,13 @@ void btrfs_writepage_endio_finish_ordered(struct page *page, u64 start,
  * @bio_offset:        offset to the beginning of the bio (in bytes)
  * @page:      page where is the data to be verified
  * @pgoff:     offset inside the page
+ * @start:     logical offset in the file
  *
  * The length of such check is always one sector size.
  */
 static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
-                          u32 bio_offset, struct page *page, u32 pgoff)
+                          u32 bio_offset, struct page *page, u32 pgoff,
+                          u64 start)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
@@ -3130,8 +3132,8 @@ static int check_data_csum(struct inode *inode, struct btrfs_io_bio *io_bio,
        kunmap_atomic(kaddr);
        return 0;
 zeroit:
-       btrfs_print_data_csum_error(BTRFS_I(inode), page_offset(page) + pgoff,
-                                   csum, csum_expected, io_bio->mirror_num);
+       btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected,
+                                   io_bio->mirror_num);
        if (io_bio->device)
                btrfs_dev_stat_inc_and_print(io_bio->device,
                                             BTRFS_DEV_STAT_CORRUPTION_ERRS);
@@ -3184,7 +3186,8 @@ int btrfs_verify_data_csum(struct btrfs_io_bio *io_bio, u32 bio_offset,
             pg_off += sectorsize, bio_offset += sectorsize) {
                int ret;
 
-               ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off);
+               ret = check_data_csum(inode, io_bio, bio_offset, page, pg_off,
+                                     page_offset(page) + pg_off);
                if (ret < 0)
                        return -EIO;
        }
@@ -7910,7 +7913,8 @@ static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
                        ASSERT(pgoff < PAGE_SIZE);
                        if (uptodate &&
                            (!csum || !check_data_csum(inode, io_bio,
-                                       bio_offset, bvec.bv_page, pgoff))) {
+                                                      bio_offset, bvec.bv_page,
+                                                      pgoff, start))) {
                                clean_io_failure(fs_info, failure_tree, io_tree,
                                                 start, bvec.bv_page,
                                                 btrfs_ino(BTRFS_I(inode)),
@@ -8169,10 +8173,6 @@ static blk_qc_t btrfs_submit_direct(struct inode *inode, struct iomap *iomap,
                bio->bi_end_io = btrfs_end_dio_bio;
                btrfs_io_bio(bio)->logical = file_offset;
 
-               WARN_ON_ONCE(write && btrfs_is_zoned(fs_info) &&
-                            fs_info->max_zone_append_size &&
-                            bio_op(bio) != REQ_OP_ZONE_APPEND);
-
                if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
                        status = extract_ordered_extent(BTRFS_I(inode), bio,
                                                        file_offset);
index 14ff388fd3bda083d9d08a87a38de2799b1a5887..f0b9ef13153adbbd42323e7ad7ebdaee971bec19 100644 (file)
@@ -226,7 +226,6 @@ static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
 {
        struct btrfs_qgroup_list *list;
 
-       btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
        list_del(&qgroup->dirty);
        while (!list_empty(&qgroup->groups)) {
                list = list_first_entry(&qgroup->groups,
@@ -243,7 +242,6 @@ static void __del_qgroup_rb(struct btrfs_fs_info *fs_info,
                list_del(&list->next_member);
                kfree(list);
        }
-       kfree(qgroup);
 }
 
 /* must be called with qgroup_lock held */
@@ -569,6 +567,8 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
                qgroup = rb_entry(n, struct btrfs_qgroup, node);
                rb_erase(n, &fs_info->qgroup_tree);
                __del_qgroup_rb(fs_info, qgroup);
+               btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
+               kfree(qgroup);
        }
        /*
         * We call btrfs_free_qgroup_config() when unmounting
@@ -1578,6 +1578,14 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid)
        spin_lock(&fs_info->qgroup_lock);
        del_qgroup_rb(fs_info, qgroupid);
        spin_unlock(&fs_info->qgroup_lock);
+
+       /*
+        * Remove the qgroup from sysfs now without holding the qgroup_lock
+        * spinlock, since the sysfs_remove_group() function needs to take
+        * the mutex kernfs_mutex through kernfs_remove_by_name_ns().
+        */
+       btrfs_sysfs_del_one_qgroup(fs_info, qgroup);
+       kfree(qgroup);
 out:
        mutex_unlock(&fs_info->qgroup_ioctl_lock);
        return ret;
index bc3b33efddc5694df8afef1e481c8969829b11ff..1c6810bbaf8b5445c74913af2f5cd23387be0e5c 100644 (file)
@@ -7448,6 +7448,9 @@ static int btrfs_device_init_dev_stats(struct btrfs_device *device,
        int item_size;
        int i, ret, slot;
 
+       if (!device->fs_info->dev_root)
+               return 0;
+
        key.objectid = BTRFS_DEV_STATS_OBJECTID;
        key.type = BTRFS_PERSISTENT_ITEM_KEY;
        key.offset = device->devid;
index dfb14dbddf51d9d16f3361bd16b8ada627bffb20..38bb7764b454092ad65d1793b54b71b0db570c52 100644 (file)
@@ -118,6 +118,12 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
        cache->mnt = path.mnt;
        root = path.dentry;
 
+       ret = -EINVAL;
+       if (mnt_user_ns(path.mnt) != &init_user_ns) {
+               pr_warn("File cache on idmapped mounts not supported");
+               goto error_unsupported;
+       }
+
        /* check parameters */
        ret = -EOPNOTSUPP;
        if (d_is_negative(root) ||
index e027c718ca01adea87dbd12bd5fa7b2945fb9c0c..8ffc40e84a59442312295536a3ef44733b356f66 100644 (file)
@@ -24,17 +24,16 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
                container_of(wait, struct cachefiles_one_read, monitor);
        struct cachefiles_object *object;
        struct fscache_retrieval *op = monitor->op;
-       struct wait_bit_key *key = _key;
+       struct wait_page_key *key = _key;
        struct page *page = wait->private;
 
        ASSERT(key);
 
        _enter("{%lu},%u,%d,{%p,%u}",
               monitor->netfs_page->index, mode, sync,
-              key->flags, key->bit_nr);
+              key->page, key->bit_nr);
 
-       if (key->flags != &page->flags ||
-           key->bit_nr != PG_locked)
+       if (key->page != page || key->bit_nr != PG_locked)
                return 0;
 
        _debug("--- monitor %p %lx ---", page, page->flags);
index 2be22a5c690f88f7ff975071e99944988540fe13..d178cf85e926d479d9169c9bba772a4054d11001 100644 (file)
@@ -1130,8 +1130,7 @@ static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
                }
 
                /* If it's any one of the ACE we're replacing, skip! */
-               if (!mode_from_sid &&
-                               ((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) ||
+               if (((compare_sids(&pntace->sid, &sid_unix_NFS_mode) == 0) ||
                                (compare_sids(&pntace->sid, pownersid) == 0) ||
                                (compare_sids(&pntace->sid, pgrpsid) == 0) ||
                                (compare_sids(&pntace->sid, &sid_everyone) == 0) ||
index 31fc8695abd6d76c76b15fb67c772677946049e5..67c056a9a519ce76033e2e40f14be129ed22d6a5 100644 (file)
@@ -919,8 +919,8 @@ struct cifs_ses {
        bool binding:1; /* are we binding the session? */
        __u16 session_flags;
        __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
-       __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
-       __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
+       __u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
+       __u8 smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
        __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
 
        __u8 binding_preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
index 64fe5a47b5e8746d14671612c61ec0947cd7565e..9adc74bd9f8fafb57f8c0390c541da25ed030b48 100644 (file)
  */
 #define SMB3_SIGN_KEY_SIZE (16)
 
+/*
+ * Size of the smb3 encryption/decryption keys
+ */
+#define SMB3_ENC_DEC_KEY_SIZE (32)
+
 #define CIFS_CLIENT_CHALLENGE_SIZE (8)
 #define CIFS_SERVER_CHALLENGE_SIZE (8)
 #define CIFS_HMAC_MD5_HASH_SIZE (16)
index 26de4329d16158363b054e38464a13b19cb4d6d0..042e24aad41098ac29c92c7d837deb5b6c806e19 100644 (file)
@@ -165,6 +165,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
                        goto posix_open_ret;
                }
        } else {
+               cifs_revalidate_mapping(*pinode);
                cifs_fattr_to_inode(*pinode, &fattr);
        }
 
index 99a1951a01ec27e78a91f4f330bb14da0b7a2b9c..d9a990c9912137a9b7cfb05d7bff1d6563652e4f 100644 (file)
@@ -58,6 +58,7 @@
 #define SMB2_HMACSHA256_SIZE (32)
 #define SMB2_CMACAES_SIZE (16)
 #define SMB3_SIGNKEY_SIZE (16)
+#define SMB3_GCM128_CRYPTKEY_SIZE (16)
 #define SMB3_GCM256_CRYPTKEY_SIZE (32)
 
 /* Maximum buffer size value we can send with 1 credit */
index b50164e2c88dc65df34cfca7e5b2411fdf3d818e..aac384f69f74b684900144f2dab1e2d5f7d6156f 100644 (file)
@@ -754,8 +754,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
                }
        }
        spin_unlock(&cifs_tcp_ses_lock);
-       cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
-       return false;
+       cifs_dbg(FYI, "No file id matched, oplock break ignored\n");
+       return true;
 }
 
 void
index 9bae7e8deb09e07bc28332f8c41767a239858ccd..f703204fb185dc584023b2674fdea826d4a6f679 100644 (file)
@@ -2038,6 +2038,7 @@ smb2_duplicate_extents(const unsigned int xid,
 {
        int rc;
        unsigned int ret_data_len;
+       struct inode *inode;
        struct duplicate_extents_to_file dup_ext_buf;
        struct cifs_tcon *tcon = tlink_tcon(trgtfile->tlink);
 
@@ -2054,10 +2055,21 @@ smb2_duplicate_extents(const unsigned int xid,
        cifs_dbg(FYI, "Duplicate extents: src off %lld dst off %lld len %lld\n",
                src_off, dest_off, len);
 
-       rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
-       if (rc)
-               goto duplicate_extents_out;
+       inode = d_inode(trgtfile->dentry);
+       if (inode->i_size < dest_off + len) {
+               rc = smb2_set_file_size(xid, tcon, trgtfile, dest_off + len, false);
+               if (rc)
+                       goto duplicate_extents_out;
 
+               /*
+                * Although also could set plausible allocation size (i_blocks)
+                * here in addition to setting the file size, in reflink
+                * it is likely that the target file is sparse. Its allocation
+                * size will be queried on next revalidate, but it is important
+                * to make sure that file's cached size is updated immediately
+                */
+               cifs_setsize(inode, dest_off + len);
+       }
        rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
                        trgtfile->fid.volatile_fid,
                        FSCTL_DUPLICATE_EXTENTS_TO_FILE,
@@ -4158,7 +4170,7 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
                        if (ses->Suid == ses_id) {
                                ses_enc_key = enc ? ses->smb3encryptionkey :
                                        ses->smb3decryptionkey;
-                               memcpy(key, ses_enc_key, SMB3_SIGN_KEY_SIZE);
+                               memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
                                spin_unlock(&cifs_tcp_ses_lock);
                                return 0;
                        }
@@ -4185,7 +4197,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
        int rc = 0;
        struct scatterlist *sg;
        u8 sign[SMB2_SIGNATURE_SIZE] = {};
-       u8 key[SMB3_SIGN_KEY_SIZE];
+       u8 key[SMB3_ENC_DEC_KEY_SIZE];
        struct aead_request *req;
        char *iv;
        unsigned int iv_len;
@@ -4209,10 +4221,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
        tfm = enc ? server->secmech.ccmaesencrypt :
                                                server->secmech.ccmaesdecrypt;
 
-       if (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+       if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
+               (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM))
                rc = crypto_aead_setkey(tfm, key, SMB3_GCM256_CRYPTKEY_SIZE);
        else
-               rc = crypto_aead_setkey(tfm, key, SMB3_SIGN_KEY_SIZE);
+               rc = crypto_aead_setkey(tfm, key, SMB3_GCM128_CRYPTKEY_SIZE);
 
        if (rc) {
                cifs_server_dbg(VFS, "%s: Failed to set aead key %d\n", __func__, rc);
index ebccd71cc60a3887f398829897cfc4c1956bfa91..e6fa76ab70be7f6aa8bb710e10a544de916347d3 100644 (file)
@@ -298,7 +298,8 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
 {
        unsigned char zero = 0x0;
        __u8 i[4] = {0, 0, 0, 1};
-       __u8 L[4] = {0, 0, 0, 128};
+       __u8 L128[4] = {0, 0, 0, 128};
+       __u8 L256[4] = {0, 0, 1, 0};
        int rc = 0;
        unsigned char prfhash[SMB2_HMACSHA256_SIZE];
        unsigned char *hashptr = prfhash;
@@ -354,8 +355,14 @@ static int generate_key(struct cifs_ses *ses, struct kvec label,
                goto smb3signkey_ret;
        }
 
-       rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
-                               L, 4);
+       if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
+               (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
+               rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
+                               L256, 4);
+       } else {
+               rc = crypto_shash_update(&server->secmech.sdeschmacsha256->shash,
+                               L128, 4);
+       }
        if (rc) {
                cifs_server_dbg(VFS, "%s: Could not update with L\n", __func__);
                goto smb3signkey_ret;
@@ -390,6 +397,9 @@ generate_smb3signingkey(struct cifs_ses *ses,
                        const struct derivation_triplet *ptriplet)
 {
        int rc;
+#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
+       struct TCP_Server_Info *server = ses->server;
+#endif
 
        /*
         * All channels use the same encryption/decryption keys but
@@ -422,11 +432,11 @@ generate_smb3signingkey(struct cifs_ses *ses,
                rc = generate_key(ses, ptriplet->encryption.label,
                                  ptriplet->encryption.context,
                                  ses->smb3encryptionkey,
-                                 SMB3_SIGN_KEY_SIZE);
+                                 SMB3_ENC_DEC_KEY_SIZE);
                rc = generate_key(ses, ptriplet->decryption.label,
                                  ptriplet->decryption.context,
                                  ses->smb3decryptionkey,
-                                 SMB3_SIGN_KEY_SIZE);
+                                 SMB3_ENC_DEC_KEY_SIZE);
                if (rc)
                        return rc;
        }
@@ -442,14 +452,23 @@ generate_smb3signingkey(struct cifs_ses *ses,
         */
        cifs_dbg(VFS, "Session Id    %*ph\n", (int)sizeof(ses->Suid),
                        &ses->Suid);
+       cifs_dbg(VFS, "Cipher type   %d\n", server->cipher_type);
        cifs_dbg(VFS, "Session Key   %*ph\n",
                 SMB2_NTLMV2_SESSKEY_SIZE, ses->auth_key.response);
        cifs_dbg(VFS, "Signing Key   %*ph\n",
                 SMB3_SIGN_KEY_SIZE, ses->smb3signingkey);
-       cifs_dbg(VFS, "ServerIn Key  %*ph\n",
-                SMB3_SIGN_KEY_SIZE, ses->smb3encryptionkey);
-       cifs_dbg(VFS, "ServerOut Key %*ph\n",
-                SMB3_SIGN_KEY_SIZE, ses->smb3decryptionkey);
+       if ((server->cipher_type == SMB2_ENCRYPTION_AES256_CCM) ||
+               (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)) {
+               cifs_dbg(VFS, "ServerIn Key  %*ph\n",
+                               SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3encryptionkey);
+               cifs_dbg(VFS, "ServerOut Key %*ph\n",
+                               SMB3_GCM256_CRYPTKEY_SIZE, ses->smb3decryptionkey);
+       } else {
+               cifs_dbg(VFS, "ServerIn Key  %*ph\n",
+                               SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3encryptionkey);
+               cifs_dbg(VFS, "ServerOut Key %*ph\n",
+                               SMB3_GCM128_CRYPTKEY_SIZE, ses->smb3decryptionkey);
+       }
 #endif
        return rc;
 }
index 3dc10bfd8c3baa4a7130ab9caf6cdbf90cf5bd84..433c4d3c3c1c8c76b93ee0f848a08b3debe4d0bd 100644 (file)
@@ -16,7 +16,6 @@
 #include <linux/rculist_nulls.h>
 #include <linux/cpu.h>
 #include <linux/tracehook.h>
-#include <linux/freezer.h>
 
 #include "../kernel/sched/sched.h"
 #include "io-wq.h"
@@ -388,11 +387,9 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
 
 static bool io_flush_signals(void)
 {
-       if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) {
+       if (unlikely(test_thread_flag(TIF_NOTIFY_SIGNAL))) {
                __set_current_state(TASK_RUNNING);
-               if (current->task_works)
-                       task_work_run();
-               clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL);
+               tracehook_notify_signal();
                return true;
        }
        return false;
@@ -487,7 +484,7 @@ static int io_wqe_worker(void *data)
        worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
        io_wqe_inc_running(worker);
 
-       sprintf(buf, "iou-wrk-%d", wq->task_pid);
+       snprintf(buf, sizeof(buf), "iou-wrk-%d", wq->task_pid);
        set_task_comm(current, buf);
 
        while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
@@ -505,10 +502,15 @@ loop:
                if (io_flush_signals())
                        continue;
                ret = schedule_timeout(WORKER_IDLE_TIMEOUT);
-               if (try_to_freeze() || ret)
-                       continue;
-               if (fatal_signal_pending(current))
+               if (signal_pending(current)) {
+                       struct ksignal ksig;
+
+                       if (!get_signal(&ksig))
+                               continue;
                        break;
+               }
+               if (ret)
+                       continue;
                /* timed out, exit unless we're the fixed worker */
                if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
                    !(worker->flags & IO_WORKER_F_FIXED))
@@ -709,16 +711,20 @@ static int io_wq_manager(void *data)
        char buf[TASK_COMM_LEN];
        int node;
 
-       sprintf(buf, "iou-mgr-%d", wq->task_pid);
+       snprintf(buf, sizeof(buf), "iou-mgr-%d", wq->task_pid);
        set_task_comm(current, buf);
 
        do {
                set_current_state(TASK_INTERRUPTIBLE);
                io_wq_check_workers(wq);
                schedule_timeout(HZ);
-               try_to_freeze();
-               if (fatal_signal_pending(current))
+               if (signal_pending(current)) {
+                       struct ksignal ksig;
+
+                       if (!get_signal(&ksig))
+                               continue;
                        set_bit(IO_WQ_BIT_EXIT, &wq->state);
+               }
        } while (!test_bit(IO_WQ_BIT_EXIT, &wq->state));
 
        io_wq_check_workers(wq);
@@ -1065,7 +1071,11 @@ static void io_wq_destroy(struct io_wq *wq)
 
        for_each_node(node) {
                struct io_wqe *wqe = wq->wqes[node];
-               WARN_ON_ONCE(!wq_list_empty(&wqe->work_list));
+               struct io_cb_cancel_data match = {
+                       .fn             = io_wq_work_match_all,
+                       .cancel_all     = true,
+               };
+               io_wqe_cancel_pending_work(wqe, &match);
                kfree(wqe);
        }
        io_wq_put_hash(wq->hash);
index 543551d70327fa95f58e3d95718f3d917c50ff65..8be5420506485ac2cde65e2ab57806f3cbd44e19 100644 (file)
@@ -78,7 +78,6 @@
 #include <linux/task_work.h>
 #include <linux/pagemap.h>
 #include <linux/io_uring.h>
-#include <linux/freezer.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/io_uring.h>
@@ -698,6 +697,7 @@ enum {
        REQ_F_NO_FILE_TABLE_BIT,
        REQ_F_LTIMEOUT_ACTIVE_BIT,
        REQ_F_COMPLETE_INLINE_BIT,
+       REQ_F_REISSUE_BIT,
 
        /* not a real bit, just to check we're not overflowing the space */
        __REQ_F_LAST_BIT,
@@ -741,6 +741,8 @@ enum {
        REQ_F_LTIMEOUT_ACTIVE   = BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
        /* completion is deferred through io_comp_state */
        REQ_F_COMPLETE_INLINE   = BIT(REQ_F_COMPLETE_INLINE_BIT),
+       /* caller should reissue async */
+       REQ_F_REISSUE           = BIT(REQ_F_REISSUE_BIT),
 };
 
 struct async_poll {
@@ -1095,8 +1097,6 @@ static bool io_match_task(struct io_kiocb *head,
        io_for_each_link(req, head) {
                if (req->flags & REQ_F_INFLIGHT)
                        return true;
-               if (req->task->files == files)
-                       return true;
        }
        return false;
 }
@@ -1216,7 +1216,7 @@ static void io_prep_async_work(struct io_kiocb *req)
        if (req->flags & REQ_F_ISREG) {
                if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
                        io_wq_hash_work(&req->work, file_inode(req->file));
-       } else {
+       } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
                if (def->unbound_nonreg_file)
                        req->work.flags |= IO_WQ_WORK_UNBOUND;
        }
@@ -1239,16 +1239,16 @@ static void io_queue_async_work(struct io_kiocb *req)
        BUG_ON(!tctx);
        BUG_ON(!tctx->io_wq);
 
-       trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
-                                       &req->work, req->flags);
        /* init ->work of the whole link before punting */
        io_prep_async_link(req);
+       trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
+                                       &req->work, req->flags);
        io_wq_enqueue(tctx->io_wq, &req->work);
        if (link)
                io_queue_linked_timeout(link);
 }
 
-static void io_kill_timeout(struct io_kiocb *req)
+static void io_kill_timeout(struct io_kiocb *req, int status)
 {
        struct io_timeout_data *io = req->async_data;
        int ret;
@@ -1258,31 +1258,11 @@ static void io_kill_timeout(struct io_kiocb *req)
                atomic_set(&req->ctx->cq_timeouts,
                        atomic_read(&req->ctx->cq_timeouts) + 1);
                list_del_init(&req->timeout.list);
-               io_cqring_fill_event(req, 0);
+               io_cqring_fill_event(req, status);
                io_put_req_deferred(req, 1);
        }
 }
 
-/*
- * Returns true if we found and killed one or more timeouts
- */
-static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
-                            struct files_struct *files)
-{
-       struct io_kiocb *req, *tmp;
-       int canceled = 0;
-
-       spin_lock_irq(&ctx->completion_lock);
-       list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
-               if (io_match_task(req, tsk, files)) {
-                       io_kill_timeout(req);
-                       canceled++;
-               }
-       }
-       spin_unlock_irq(&ctx->completion_lock);
-       return canceled != 0;
-}
-
 static void __io_queue_deferred(struct io_ring_ctx *ctx)
 {
        do {
@@ -1327,7 +1307,7 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx)
                        break;
 
                list_del_init(&req->timeout.list);
-               io_kill_timeout(req);
+               io_kill_timeout(req, 0);
        } while (!list_empty(&ctx->timeout_list));
 
        ctx->cq_last_tm_flush = seq;
@@ -2524,13 +2504,14 @@ static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
 {
        int cflags = 0;
 
-       if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_reissue(req))
+       if (req->rw.kiocb.ki_flags & IOCB_WRITE)
+               kiocb_end_write(req);
+       if ((res == -EAGAIN || res == -EOPNOTSUPP) && io_rw_should_reissue(req)) {
+               req->flags |= REQ_F_REISSUE;
                return;
+       }
        if (res != req->result)
                req_set_fail_links(req);
-
-       if (req->rw.kiocb.ki_flags & IOCB_WRITE)
-               kiocb_end_write(req);
        if (req->flags & REQ_F_BUFFER_SELECTED)
                cflags = io_put_rw_kbuf(req);
        __io_req_complete(req, issue_flags, res, cflags);
@@ -3307,11 +3288,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
 
        ret = io_iter_do_read(req, iter);
 
-       if (ret == -EIOCBQUEUED) {
-               if (req->async_data)
-                       iov_iter_revert(iter, io_size - iov_iter_count(iter));
-               goto out_free;
-       } else if (ret == -EAGAIN) {
+       if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
                /* IOPOLL retry should happen for io-wq threads */
                if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
                        goto done;
@@ -3321,6 +3298,8 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
                /* some cases will consume bytes even on error returns */
                iov_iter_revert(iter, io_size - iov_iter_count(iter));
                ret = 0;
+       } else if (ret == -EIOCBQUEUED) {
+               goto out_free;
        } else if (ret <= 0 || ret == io_size || !force_nonblock ||
                   (req->flags & REQ_F_NOWAIT) || !(req->flags & REQ_F_ISREG)) {
                /* read all, failed, already did sync or don't want to retry */
@@ -3433,6 +3412,9 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        else
                ret2 = -EINVAL;
 
+       if (req->flags & REQ_F_REISSUE)
+               ret2 = -EAGAIN;
+
        /*
         * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
         * retry them without IOCB_NOWAIT.
@@ -3442,8 +3424,6 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
        /* no retry on NONBLOCK nor RWF_NOWAIT */
        if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
                goto done;
-       if (ret2 == -EIOCBQUEUED && req->async_data)
-               iov_iter_revert(iter, io_size - iov_iter_count(iter));
        if (!force_nonblock || ret2 != -EAGAIN) {
                /* IOPOLL retry should happen for io-wq threads */
                if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
@@ -3978,6 +3958,7 @@ static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags)
 static int io_provide_buffers_prep(struct io_kiocb *req,
                                   const struct io_uring_sqe *sqe)
 {
+       unsigned long size;
        struct io_provide_buf *p = &req->pbuf;
        u64 tmp;
 
@@ -3991,7 +3972,8 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
        p->addr = READ_ONCE(sqe->addr);
        p->len = READ_ONCE(sqe->len);
 
-       if (!access_ok(u64_to_user_ptr(p->addr), (p->len * p->nbufs)))
+       size = (unsigned long)p->len * p->nbufs;
+       if (!access_ok(u64_to_user_ptr(p->addr), size))
                return -EFAULT;
 
        p->bgid = READ_ONCE(sqe->buf_group);
@@ -4820,7 +4802,6 @@ static int io_connect(struct io_kiocb *req, unsigned int issue_flags)
                        ret = -ENOMEM;
                        goto out;
                }
-               io = req->async_data;
                memcpy(req->async_data, &__io, sizeof(__io));
                return -EAGAIN;
        }
@@ -5583,7 +5564,8 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 
        data->mode = io_translate_timeout_mode(flags);
        hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
-       io_req_track_inflight(req);
+       if (is_timeout_link)
+               io_req_track_inflight(req);
        return 0;
 }
 
@@ -6186,6 +6168,7 @@ static void io_wq_submit_work(struct io_wq_work *work)
                ret = -ECANCELED;
 
        if (!ret) {
+               req->flags &= ~REQ_F_REISSUE;
                do {
                        ret = io_issue_sqe(req, 0);
                        /*
@@ -6479,8 +6462,6 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
        ret = io_init_req(ctx, req, sqe);
        if (unlikely(ret)) {
 fail_req:
-               io_put_req(req);
-               io_req_complete(req, ret);
                if (link->head) {
                        /* fail even hard links since we don't submit */
                        link->head->flags |= REQ_F_FAIL_LINK;
@@ -6488,6 +6469,8 @@ fail_req:
                        io_req_complete(link->head, -ECANCELED);
                        link->head = NULL;
                }
+               io_put_req(req);
+               io_req_complete(req, ret);
                return ret;
        }
        ret = io_req_prep(req, sqe);
@@ -6740,7 +6723,7 @@ static int io_sq_thread(void *data)
        char buf[TASK_COMM_LEN];
        DEFINE_WAIT(wait);
 
-       sprintf(buf, "iou-sqp-%d", sqd->task_pid);
+       snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
        set_task_comm(current, buf);
        current->pf_io_worker = NULL;
 
@@ -6755,17 +6738,25 @@ static int io_sq_thread(void *data)
                int ret;
                bool cap_entries, sqt_spin, needs_sched;
 
-               if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state)) {
+               if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
+                   signal_pending(current)) {
+                       bool did_sig = false;
+
                        mutex_unlock(&sqd->lock);
+                       if (signal_pending(current)) {
+                               struct ksignal ksig;
+
+                               did_sig = get_signal(&ksig);
+                       }
                        cond_resched();
                        mutex_lock(&sqd->lock);
+                       if (did_sig)
+                               break;
                        io_run_task_work();
                        io_run_task_work_head(&sqd->park_task_work);
                        timeout = jiffies + sqd->sq_thread_idle;
                        continue;
                }
-               if (fatal_signal_pending(current))
-                       break;
                sqt_spin = false;
                cap_entries = !list_is_singular(&sqd->ctx_list);
                list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
@@ -6808,7 +6799,6 @@ static int io_sq_thread(void *data)
 
                        mutex_unlock(&sqd->lock);
                        schedule();
-                       try_to_freeze();
                        mutex_lock(&sqd->lock);
                        list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
                                io_ring_clear_wakeup_flag(ctx);
@@ -6873,7 +6863,7 @@ static int io_run_task_work_sig(void)
                return 1;
        if (!signal_pending(current))
                return 0;
-       if (test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))
+       if (test_thread_flag(TIF_NOTIFY_SIGNAL))
                return -ERESTARTSYS;
        return -EINTR;
 }
@@ -8563,6 +8553,14 @@ static void io_ring_exit_work(struct work_struct *work)
        struct io_tctx_node *node;
        int ret;
 
+       /* prevent SQPOLL from submitting new requests */
+       if (ctx->sq_data) {
+               io_sq_thread_park(ctx->sq_data);
+               list_del_init(&ctx->sqd_list);
+               io_sqd_update_thread_idle(ctx->sq_data);
+               io_sq_thread_unpark(ctx->sq_data);
+       }
+
        /*
         * If we're doing polled IO and end up having requests being
         * submitted async (out-of-line), then completions can come in while
@@ -8599,6 +8597,28 @@ static void io_ring_exit_work(struct work_struct *work)
        io_ring_ctx_free(ctx);
 }
 
+/* Returns true if we found and killed one or more timeouts */
+static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
+                            struct files_struct *files)
+{
+       struct io_kiocb *req, *tmp;
+       int canceled = 0;
+
+       spin_lock_irq(&ctx->completion_lock);
+       list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
+               if (io_match_task(req, tsk, files)) {
+                       io_kill_timeout(req, -ECANCELED);
+                       canceled++;
+               }
+       }
+       if (canceled != 0)
+               io_commit_cqring(ctx);
+       spin_unlock_irq(&ctx->completion_lock);
+       if (canceled != 0)
+               io_cqring_ev_posted(ctx);
+       return canceled != 0;
+}
+
 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
 {
        unsigned long index;
@@ -8614,14 +8634,6 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
                io_unregister_personality(ctx, index);
        mutex_unlock(&ctx->uring_lock);
 
-       /* prevent SQPOLL from submitting new requests */
-       if (ctx->sq_data) {
-               io_sq_thread_park(ctx->sq_data);
-               list_del_init(&ctx->sqd_list);
-               io_sqd_update_thread_idle(ctx->sq_data);
-               io_sq_thread_unpark(ctx->sq_data);
-       }
-
        io_kill_timeouts(ctx, NULL, NULL);
        io_poll_remove_all(ctx, NULL, NULL);
 
@@ -8998,6 +9010,8 @@ void __io_uring_task_cancel(void)
 
        /* make sure overflow events are dropped */
        atomic_inc(&tctx->in_idle);
+       __io_uring_files_cancel(NULL);
+
        do {
                /* read completions before cancelations */
                inflight = tctx_inflight(tctx);
index 9b3b06da568c8cb470514016dd58a78d37290565..e47fde1182de0efe35e27571edc877ff7ed41449 100644 (file)
@@ -44,7 +44,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec);
 
 static inline int reiserfs_xattrs_initialized(struct super_block *sb)
 {
-       return REISERFS_SB(sb)->priv_root != NULL;
+       return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root;
 }
 
 #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
index eb02072d28dd6dc6c375facc3da051bddafac30a..723763746238d8b260b0b5c7dbca75495f65035f 100644 (file)
@@ -152,14 +152,18 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
                start = le64_to_cpu(table[n]);
                end = le64_to_cpu(table[n + 1]);
 
-               if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
+               if (start >= end
+                   || (end - start) >
+                   (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
                        kfree(table);
                        return ERR_PTR(-EINVAL);
                }
        }
 
        start = le64_to_cpu(table[indexes - 1]);
-       if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
+       if (start >= lookup_table_start ||
+           (lookup_table_start - start) >
+           (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
                kfree(table);
                return ERR_PTR(-EINVAL);
        }
index 11581bf31af41dcfd99293ef4a261d8e8440a331..ea5387679723fd46e3ee46b216e2fe2f4e0954a0 100644 (file)
@@ -97,14 +97,16 @@ __le64 *squashfs_read_id_index_table(struct super_block *sb,
                start = le64_to_cpu(table[n]);
                end = le64_to_cpu(table[n + 1]);
 
-               if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
+               if (start >= end || (end - start) >
+                               (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
                        kfree(table);
                        return ERR_PTR(-EINVAL);
                }
        }
 
        start = le64_to_cpu(table[indexes - 1]);
-       if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
+       if (start >= id_table_start || (id_table_start - start) >
+                               (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
                kfree(table);
                return ERR_PTR(-EINVAL);
        }
index 8d64edb80ebf0cbf6fd9dc32e68d92c486751587..b3fdc8212c5f52d940391ec0345d6f1ab805efcf 100644 (file)
@@ -17,6 +17,7 @@
 
 /* size of metadata (inode and directory) blocks */
 #define SQUASHFS_METADATA_SIZE         8192
+#define SQUASHFS_BLOCK_OFFSET          2
 
 /* default size of block device I/O */
 #ifdef CONFIG_SQUASHFS_4K_DEVBLK_SIZE
index ead66670b41a593a1e24607bfc7bb0dc8ebcef75..087cab8c78f4e86ea7c97e3d3b4266896159f29d 100644 (file)
@@ -109,14 +109,16 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
                start = le64_to_cpu(table[n]);
                end = le64_to_cpu(table[n + 1]);
 
-               if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
+               if (start >= end || (end - start) >
+                               (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
                        kfree(table);
                        return ERR_PTR(-EINVAL);
                }
        }
 
        start = le64_to_cpu(table[indexes - 1]);
-       if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
+       if (start >= table_start || (table_start - start) >
+                               (SQUASHFS_METADATA_SIZE + SQUASHFS_BLOCK_OFFSET)) {
                kfree(table);
                return ERR_PTR(-EINVAL);
        }
index 02a716a0af5d4950f5f43d5a2e39a9bc36b351f4..f28b097c658f474c89c071d80de7391809afdc67 100644 (file)
@@ -233,6 +233,7 @@ struct acpi_pnp_type {
 
 struct acpi_device_pnp {
        acpi_bus_id bus_id;             /* Object name */
+       int instance_no;                /* Instance number of this object */
        struct acpi_pnp_type type;      /* ID type */
        acpi_bus_address bus_address;   /* _ADR */
        char *unique_id;                /* _UID */
index fcdaab72391672f077de07d3ad49d1f3dc7d6eeb..3bdcfc4401b7b07980e646f5c8fcd7a37b1d9949 100644 (file)
@@ -222,10 +222,14 @@ void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
 void __acpi_unmap_table(void __iomem *map, unsigned long size);
 int early_acpi_boot_init(void);
 int acpi_boot_init (void);
+void acpi_boot_table_prepare (void);
 void acpi_boot_table_init (void);
 int acpi_mps_check (void);
 int acpi_numa_init (void);
 
+int acpi_locate_initial_tables (void);
+void acpi_reserve_initial_tables (void);
+void acpi_table_init_complete (void);
 int acpi_table_init (void);
 int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
 int __init acpi_table_parse_entries(char *id, unsigned long table_size,
@@ -814,9 +818,12 @@ static inline int acpi_boot_init(void)
        return 0;
 }
 
+static inline void acpi_boot_table_prepare(void)
+{
+}
+
 static inline void acpi_boot_table_init(void)
 {
-       return;
 }
 
 static inline int acpi_mps_check(void)
index bc6bc8383b434ef501146a7bd5028408b94c065a..158aefae1030db487e64891c26150616b391b4d4 100644 (file)
@@ -85,8 +85,6 @@ typedef __u32 __bitwise req_flags_t;
 #define RQF_ELVPRIV            ((__force req_flags_t)(1 << 12))
 /* account into disk and partition IO statistics */
 #define RQF_IO_STAT            ((__force req_flags_t)(1 << 13))
-/* request came from our alloc pool */
-#define RQF_ALLOCED            ((__force req_flags_t)(1 << 14))
 /* runtime pm request */
 #define RQF_PM                 ((__force req_flags_t)(1 << 15))
 /* on IO scheduler merge hash */
index cccaef1088eaede592ff5d5cf3904009ddafda93..3625f019767dfd5f3f2dd40b23f170ccdb9b8768 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/capability.h>
 #include <linux/sched/mm.h>
 #include <linux/slab.h>
+#include <linux/percpu-refcount.h>
 
 struct bpf_verifier_env;
 struct bpf_verifier_log;
@@ -556,7 +557,8 @@ struct bpf_tramp_progs {
  *      fentry = a set of program to run before calling original function
  *      fexit = a set of program to run after original function
  */
-int arch_prepare_bpf_trampoline(void *image, void *image_end,
+struct bpf_tramp_image;
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
                                const struct btf_func_model *m, u32 flags,
                                struct bpf_tramp_progs *tprogs,
                                void *orig_call);
@@ -565,6 +567,8 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog);
 void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
 u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog);
 void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start);
+void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
+void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
 
 struct bpf_ksym {
        unsigned long            start;
@@ -583,6 +587,18 @@ enum bpf_tramp_prog_type {
        BPF_TRAMP_REPLACE, /* more than MAX */
 };
 
+struct bpf_tramp_image {
+       void *image;
+       struct bpf_ksym ksym;
+       struct percpu_ref pcref;
+       void *ip_after_call;
+       void *ip_epilogue;
+       union {
+               struct rcu_head rcu;
+               struct work_struct work;
+       };
+};
+
 struct bpf_trampoline {
        /* hlist for trampoline_table */
        struct hlist_node hlist;
@@ -605,9 +621,8 @@ struct bpf_trampoline {
        /* Number of attached programs. A counter per kind. */
        int progs_cnt[BPF_TRAMP_MAX];
        /* Executable image of trampoline */
-       void *image;
+       struct bpf_tramp_image *cur_image;
        u64 selector;
-       struct bpf_ksym ksym;
 };
 
 struct bpf_attach_target_info {
@@ -691,6 +706,8 @@ void bpf_image_ksym_add(void *data, struct bpf_ksym *ksym);
 void bpf_image_ksym_del(struct bpf_ksym *ksym);
 void bpf_ksym_add(struct bpf_ksym *ksym);
 void bpf_ksym_del(struct bpf_ksym *ksym);
+int bpf_jit_charge_modmem(u32 pages);
+void bpf_jit_uncharge_modmem(u32 pages);
 #else
 static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
                                           struct bpf_trampoline *tr)
@@ -787,7 +804,6 @@ struct bpf_prog_aux {
        bool func_proto_unreliable;
        bool sleepable;
        bool tail_call_reachable;
-       enum bpf_tramp_prog_type trampoline_prog_type;
        struct hlist_node tramp_hlist;
        /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
        const struct btf_type *attach_func_proto;
@@ -1093,7 +1109,7 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
                _ret;                                                   \
         })
 
-#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
+#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \
        ({                                              \
                struct bpf_prog_array_item *_item;      \
                struct bpf_prog *_prog;                 \
@@ -1106,7 +1122,8 @@ int bpf_prog_array_copy(struct bpf_prog_array *old_array,
                        goto _out;                      \
                _item = &_array->items[0];              \
                while ((_prog = READ_ONCE(_item->prog))) {              \
-                       bpf_cgroup_storage_set(_item->cgroup_storage);  \
+                       if (set_cg_storage)             \
+                               bpf_cgroup_storage_set(_item->cgroup_storage);  \
                        _ret &= func(_prog, ctx);       \
                        _item++;                        \
                }                                       \
@@ -1153,10 +1170,10 @@ _out:                                                   \
        })
 
 #define BPF_PROG_RUN_ARRAY(array, ctx, func)           \
-       __BPF_PROG_RUN_ARRAY(array, ctx, func, false)
+       __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true)
 
 #define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func)     \
-       __BPF_PROG_RUN_ARRAY(array, ctx, func, true)
+       __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false)
 
 #ifdef CONFIG_BPF_SYSCALL
 DECLARE_PER_CPU(int, bpf_prog_active);
index 7f4ac87c0b323ad162b4d8dda2729b110e702be5..5c641f930cafb8fdcdb708fdfeb0d36c8a3b0bc5 100644 (file)
@@ -253,7 +253,11 @@ struct target_type {
 #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
 
 /*
- * Indicates that a target supports host-managed zoned block devices.
+ * Indicates support for zoned block devices:
+ * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned
+ *   block devices but does not support combining different zoned models.
+ * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple
+ *   devices with different zoned models.
  */
 #ifdef CONFIG_BLK_DEV_ZONED
 #define DM_TARGET_ZONED_HM             0x00000040
@@ -275,6 +279,15 @@ struct target_type {
 #define DM_TARGET_PASSES_CRYPTO                0x00000100
 #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO)
 
+#ifdef CONFIG_BLK_DEV_ZONED
+#define DM_TARGET_MIXED_ZONED_MODEL    0x00000200
+#define dm_target_supports_mixed_zoned_model(type) \
+       ((type)->features & DM_TARGET_MIXED_ZONED_MODEL)
+#else
+#define DM_TARGET_MIXED_ZONED_MODEL    0x00000000
+#define dm_target_supports_mixed_zoned_model(type) (false)
+#endif
+
 struct dm_target {
        struct dm_table *table;
        struct target_type *type;
index ce59a6a6a0087e6ab71de2200076d2189b64a2d0..9eb77c87a83b0a23d453267a1873af249a22ddcd 100644 (file)
@@ -320,7 +320,14 @@ static inline struct host1x_device *to_host1x_device(struct device *dev)
 int host1x_device_init(struct host1x_device *device);
 int host1x_device_exit(struct host1x_device *device);
 
-int host1x_client_register(struct host1x_client *client);
+int __host1x_client_register(struct host1x_client *client,
+                            struct lock_class_key *key);
+#define host1x_client_register(class) \
+       ({ \
+               static struct lock_class_key __key; \
+               __host1x_client_register(class, &__key); \
+       })
+
 int host1x_client_unregister(struct host1x_client *client);
 
 int host1x_client_suspend(struct host1x_client *client);
index 2ad6e92f124adc121a7692ec66ebaedea0d2c4ca..0bff345c4bc68460381aa7af563f02b61cc7e998 100644 (file)
@@ -113,6 +113,11 @@ static inline bool hugetlb_cgroup_disabled(void)
        return !cgroup_subsys_enabled(hugetlb_cgrp_subsys);
 }
 
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+       css_put(&h_cg->css);
+}
+
 extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
                                        struct hugetlb_cgroup **ptr);
 extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
@@ -138,7 +143,8 @@ extern void hugetlb_cgroup_uncharge_counter(struct resv_map *resv,
 
 extern void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
                                                struct file_region *rg,
-                                               unsigned long nr_pages);
+                                               unsigned long nr_pages,
+                                               bool region_del);
 
 extern void hugetlb_cgroup_file_init(void) __init;
 extern void hugetlb_cgroup_migrate(struct page *oldhpage,
@@ -147,7 +153,8 @@ extern void hugetlb_cgroup_migrate(struct page *oldhpage,
 #else
 static inline void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
                                                       struct file_region *rg,
-                                                      unsigned long nr_pages)
+                                                      unsigned long nr_pages,
+                                                      bool region_del)
 {
 }
 
@@ -185,6 +192,10 @@ static inline bool hugetlb_cgroup_disabled(void)
        return true;
 }
 
+static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg)
+{
+}
+
 static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
                                               struct hugetlb_cgroup **ptr)
 {
index 96556c64c95daeff81775132ea93f9758e8d6f27..10c94a3936ca76c782a7e4080046088e83969c1a 100644 (file)
@@ -43,13 +43,14 @@ static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
        if (likely(success)) {
                struct vlan_pcpu_stats *pcpu_stats;
 
-               pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+               pcpu_stats = get_cpu_ptr(vlan->pcpu_stats);
                u64_stats_update_begin(&pcpu_stats->syncp);
                pcpu_stats->rx_packets++;
                pcpu_stats->rx_bytes += len;
                if (multicast)
                        pcpu_stats->rx_multicast++;
                u64_stats_update_end(&pcpu_stats->syncp);
+               put_cpu_ptr(vlan->pcpu_stats);
        } else {
                this_cpu_inc(vlan->pcpu_stats->rx_errors);
        }
index d13e3cd938b4660583dba1edd3e07c2fa5c44c2b..5984fff3f1758aa1281b301b77c539b3c5759730 100644 (file)
@@ -460,7 +460,7 @@ static inline void memblock_free_late(phys_addr_t base, phys_addr_t size)
 /*
  * Set the allocation direction to bottom-up or top-down.
  */
-static inline __init void memblock_set_bottom_up(bool enable)
+static inline __init_memblock void memblock_set_bottom_up(bool enable)
 {
        memblock.bottom_up = enable;
 }
@@ -470,7 +470,7 @@ static inline __init void memblock_set_bottom_up(bool enable)
  * if this is true, that said, memblock will allocate memory
  * in bottom-up direction.
  */
-static inline __init bool memblock_bottom_up(void)
+static inline __init_memblock bool memblock_bottom_up(void)
 {
        return memblock.bottom_up;
 }
index d75ef8aa8fac02840e917fd3057eb568704cba3d..b7deb790f25769ba0d92d91c3483a63c815dd27d 100644 (file)
@@ -547,4 +547,11 @@ static inline const char *mlx5_qp_state_str(int state)
        }
 }
 
+static inline int mlx5_get_qp_default_ts(struct mlx5_core_dev *dev)
+{
+       return !MLX5_CAP_ROCE(dev, qp_ts_format) ?
+                      MLX5_QPC_TIMESTAMP_FORMAT_FREE_RUNNING :
+                      MLX5_QPC_TIMESTAMP_FORMAT_DEFAULT;
+}
+
 #endif /* MLX5_QP_H */
index 64a71bf2053674c32322555b5ad32821acdbc6ca..8ba434287387b7d67fb9ce2f2f37024f41ec9894 100644 (file)
@@ -1461,16 +1461,28 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
 
 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
 
+/*
+ * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
+ * setting tags for all pages to native kernel tag value 0xff, as the default
+ * value 0x00 maps to 0xff.
+ */
+
 static inline u8 page_kasan_tag(const struct page *page)
 {
-       if (kasan_enabled())
-               return (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
-       return 0xff;
+       u8 tag = 0xff;
+
+       if (kasan_enabled()) {
+               tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
+               tag ^= 0xff;
+       }
+
+       return tag;
 }
 
 static inline void page_kasan_tag_set(struct page *page, u8 tag)
 {
        if (kasan_enabled()) {
+               tag ^= 0xff;
                page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
                page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
        }
index b8200782dedeb9dfeeef28fde6175a03ba6dd14d..1a6a9eb6d3fac950d50d4860ae86bee959465efc 100644 (file)
@@ -169,11 +169,11 @@ struct mmu_notifier_ops {
         * the last refcount is dropped.
         *
         * If blockable argument is set to false then the callback cannot
-        * sleep and has to return with -EAGAIN. 0 should be returned
-        * otherwise. Please note that if invalidate_range_start approves
-        * a non-blocking behavior then the same applies to
-        * invalidate_range_end.
-        *
+        * sleep and has to return with -EAGAIN if sleeping would be required.
+        * 0 should be returned otherwise. Please note that notifiers that can
+        * fail invalidate_range_start are not allowed to implement
+        * invalidate_range_end, as there is no mechanism for informing the
+        * notifier that its start failed.
         */
        int (*invalidate_range_start)(struct mmu_notifier *subscription,
                                      const struct mmu_notifier_range *range);
index 0cd631a19727627243431974f17e7d1ac9d6f747..515cff77a4f469612cfc5f6075adbe77cee452ae 100644 (file)
@@ -185,7 +185,7 @@ extern void mutex_lock_io(struct mutex *lock);
 # define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
 # define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
 # define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
-# define mutex_lock_io_nested(lock, subclass) mutex_lock(lock)
+# define mutex_lock_io_nested(lock, subclass) mutex_lock_io(lock)
 #endif
 
 /*
index 5b67ea89d5f26d72257ba03a1d669999cb570f92..87a5d186faff469d624708fb8ae7cb69cf2df59a 100644 (file)
@@ -360,6 +360,7 @@ enum {
        NAPI_STATE_IN_BUSY_POLL,        /* sk_busy_loop() owns this NAPI */
        NAPI_STATE_PREFER_BUSY_POLL,    /* prefer busy-polling over softirq processing*/
        NAPI_STATE_THREADED,            /* The poll is performed inside its own thread*/
+       NAPI_STATE_SCHED_THREADED,      /* Napi is currently scheduled in threaded mode */
 };
 
 enum {
@@ -372,6 +373,7 @@ enum {
        NAPIF_STATE_IN_BUSY_POLL        = BIT(NAPI_STATE_IN_BUSY_POLL),
        NAPIF_STATE_PREFER_BUSY_POLL    = BIT(NAPI_STATE_PREFER_BUSY_POLL),
        NAPIF_STATE_THREADED            = BIT(NAPI_STATE_THREADED),
+       NAPIF_STATE_SCHED_THREADED      = BIT(NAPI_STATE_SCHED_THREADED),
 };
 
 enum gro_result {
index 8ebb64193757100a62de2d6bae70b5e7009a4110..8ec48466410a68dcbc363f5cfee697d8dc51f075 100644 (file)
@@ -227,7 +227,7 @@ struct xt_table {
        unsigned int valid_hooks;
 
        /* Man behind the curtain... */
-       struct xt_table_info __rcu *private;
+       struct xt_table_info *private;
 
        /* Set this to THIS_MODULE if you are a module, otherwise NULL */
        struct module *me;
@@ -376,7 +376,7 @@ static inline unsigned int xt_write_recseq_begin(void)
         * since addend is most likely 1
         */
        __this_cpu_add(xt_recseq.sequence, addend);
-       smp_wmb();
+       smp_mb();
 
        return addend;
 }
@@ -448,9 +448,6 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
 
 struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
 
-struct xt_table_info
-*xt_table_get_private_protected(const struct xt_table *table);
-
 #ifdef CONFIG_COMPAT
 #include <net/compat.h>
 
index 20225b067583ab552a0fc22a22c6bad81cabd12a..8c9947fd62f308cb42cfb4910c3dface136a80e4 100644 (file)
@@ -559,7 +559,6 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
        return pgoff;
 }
 
-/* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
 struct wait_page_key {
        struct page *page;
        int bit_nr;
@@ -683,6 +682,7 @@ static inline int wait_on_page_locked_killable(struct page *page)
 
 int put_and_wait_on_page_locked(struct page *page, int state);
 void wait_on_page_writeback(struct page *page);
+int wait_on_page_writeback_killable(struct page *page);
 extern void end_page_writeback(struct page *page);
 void wait_for_stable_page(struct page *page);
 
index 6d0a33d1c0db6defaa005335c3e285f0f1ac686c..f2c9ee71cb2c30bcfd730e566a3967e8de2df21e 100644 (file)
@@ -285,6 +285,7 @@ struct nf_bridge_info {
 struct tc_skb_ext {
        __u32 chain;
        __u16 mru;
+       bool post_ct;
 };
 #endif
 
index 073a9e0ec07d06c6fc8f3a323483bbd3401e0df5..ad970416260dd208b43098e17df9ad49b4da7693 100644 (file)
@@ -14,5 +14,6 @@ struct umd_info {
 int umd_load_blob(struct umd_info *info, const void *data, size_t len);
 int umd_unload_blob(struct umd_info *info);
 int fork_usermode_driver(struct umd_info *info);
+void umd_cleanup_helper(struct umd_info *info);
 
 #endif /* __LINUX_USERMODE_DRIVER_H__ */
index 92c0160b3352314fa2d5dbdff241cf24f3706ecf..a91e3d90df8a540daebcd0b9149bf98de2e9ec7f 100644 (file)
@@ -229,9 +229,10 @@ static inline int xa_err(void *entry)
  *
  * This structure is used either directly or via the XA_LIMIT() macro
  * to communicate the range of IDs that are valid for allocation.
- * Two common ranges are predefined for you:
+ * Three common ranges are predefined for you:
  * * xa_limit_32b      - [0 - UINT_MAX]
  * * xa_limit_31b      - [0 - INT_MAX]
+ * * xa_limit_16b      - [0 - USHRT_MAX]
  */
 struct xa_limit {
        u32 max;
@@ -242,6 +243,7 @@ struct xa_limit {
 
 #define xa_limit_32b   XA_LIMIT(0, UINT_MAX)
 #define xa_limit_31b   XA_LIMIT(0, INT_MAX)
+#define xa_limit_16b   XA_LIMIT(0, USHRT_MAX)
 
 typedef unsigned __bitwise xa_mark_t;
 #define XA_MARK_0              ((__force xa_mark_t)0U)
index 26f134ad3a25a6081035964713ee55211d7a6535..75b1e734e9c2e7ed640cae9f148f6aa5f1204ade 100644 (file)
@@ -550,4 +550,15 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
                dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
 }
 
+struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie);
+void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+                              struct sk_buff *skb, u32 mtu, bool confirm_neigh);
+void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+                           struct sk_buff *skb);
+u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old);
+struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
+                                            struct sk_buff *skb,
+                                            const void *daddr);
+unsigned int dst_blackhole_mtu(const struct dst_entry *dst);
+
 #endif /* _NET_DST_H */
index 10a625760de9128c57dbc047e6c766dd29069122..3c8c59471bc19d53c0cebd4a7e5ef42886d34783 100644 (file)
@@ -282,7 +282,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
        return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
 }
 
-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req);
 
 static inline void inet_csk_prepare_for_destroy_sock(struct sock *sk)
index fdec57d862b738feaaae996715a14f8404669821..5aaced6bf13e397a79041819be2a9d1e27e9239c 100644 (file)
@@ -1536,6 +1536,7 @@ struct nft_trans_flowtable {
        struct nft_flowtable            *flowtable;
        bool                            update;
        struct list_head                hook_list;
+       u32                             flags;
 };
 
 #define nft_trans_flowtable(trans)     \
@@ -1544,6 +1545,8 @@ struct nft_trans_flowtable {
        (((struct nft_trans_flowtable *)trans->data)->update)
 #define nft_trans_flowtable_hooks(trans)       \
        (((struct nft_trans_flowtable *)trans->data)->hook_list)
+#define nft_trans_flowtable_flags(trans)       \
+       (((struct nft_trans_flowtable *)trans->data)->flags)
 
 int __init nft_chain_filter_init(void);
 void nft_chain_filter_fini(void);
index 7bc057aee40b112b5422bbb7e78985546f004689..a10a319d7eb20d2587c29f2abd725aa0779f08c2 100644 (file)
@@ -410,6 +410,7 @@ static inline struct fib_nh *fib_info_nh(struct fib_info *fi, int nhsel)
 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
                       struct netlink_ext_ack *extack);
 
+/* Caller should either hold rcu_read_lock(), or RTNL. */
 static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
 {
        struct nh_info *nhi;
@@ -430,6 +431,29 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
        return NULL;
 }
 
+/* Variant of nexthop_fib6_nh().
+ * Caller should either hold rcu_read_lock_bh(), or RTNL.
+ */
+static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh)
+{
+       struct nh_info *nhi;
+
+       if (nh->is_group) {
+               struct nh_group *nh_grp;
+
+               nh_grp = rcu_dereference_bh_rtnl(nh->nh_grp);
+               nh = nexthop_mpath_select(nh_grp, 0);
+               if (!nh)
+                       return NULL;
+       }
+
+       nhi = rcu_dereference_bh_rtnl(nh->nh_info);
+       if (nhi->family == AF_INET6)
+               return &nhi->fib6_nh;
+
+       return NULL;
+}
+
 static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
 {
        struct fib6_nh *fib6_nh;
index 932f0d79d60cbbab60db73baddc8b650935b3bf6..0b39eff1d50aebc43784ebec3fe615fec0bf995c 100644 (file)
@@ -168,7 +168,8 @@ static inline void red_set_vars(struct red_vars *v)
        v->qcount       = -1;
 }
 
-static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_log)
+static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog,
+                                   u8 Scell_log, u8 *stab)
 {
        if (fls(qth_min) + Wlog > 32)
                return false;
@@ -178,6 +179,13 @@ static inline bool red_check_params(u32 qth_min, u32 qth_max, u8 Wlog, u8 Scell_
                return false;
        if (qth_max < qth_min)
                return false;
+       if (stab) {
+               int i;
+
+               for (i = 0; i < RED_STAB_SIZE; i++)
+                       if (stab[i] >= 32)
+                               return false;
+       }
        return true;
 }
 
@@ -287,7 +295,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms
        int  shift;
 
        /*
-        * The problem: ideally, average length queue recalcultion should
+        * The problem: ideally, average length queue recalculation should
         * be done over constant clock intervals. This is too expensive, so
         * that the calculation is driven by outgoing packets.
         * When the queue is idle we have to model this clock by hand.
index e2091bb2b3a8e72715d83f5b1c36304a026b4330..4da61c950e931a424d60c6af5b7400e80d6bb904 100644 (file)
@@ -33,6 +33,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
  *
  *     @list: Used internally
  *     @kind: Identifier
+ *     @netns_refund: Physical device, move to init_net on netns exit
  *     @maxtype: Highest device specific netlink attribute number
  *     @policy: Netlink policy for device specific attribute validation
  *     @validate: Optional validation function for netlink/changelink parameters
@@ -64,6 +65,7 @@ struct rtnl_link_ops {
        size_t                  priv_size;
        void                    (*setup)(struct net_device *dev);
 
+       bool                    netns_refund;
        unsigned int            maxtype;
        const struct nla_policy *policy;
        int                     (*validate)(struct nlattr *tb[],
index 636810ddcd9ba6a0f0ae58fad9c32f68374a7e19..0b6266fd6bf6f4496b09dd170869ff4db38dfeb9 100644 (file)
@@ -936,7 +936,7 @@ static inline void sk_acceptq_added(struct sock *sk)
 
 static inline bool sk_acceptq_is_full(const struct sock *sk)
 {
-       return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
+       return READ_ONCE(sk->sk_ack_backlog) >= READ_ONCE(sk->sk_max_ack_backlog);
 }
 
 /*
index 8a26a2ffa95234e9ce1a31492af236f774f71244..fc5a39839b4b02cc32642379f5eeeee662020af9 100644 (file)
@@ -193,6 +193,7 @@ enum iscsi_connection_state {
        ISCSI_CONN_UP = 0,
        ISCSI_CONN_DOWN,
        ISCSI_CONN_FAILED,
+       ISCSI_CONN_BOUND,
 };
 
 struct iscsi_cls_conn {
index ac6474e4f29d5aeb77f7dc2445108fcce2915f4a..d0a64ee97c6df05cd630c95e071b8f9112c27795 100644 (file)
@@ -2,29 +2,6 @@
 #ifndef _UAPI__LINUX_BLKPG_H
 #define _UAPI__LINUX_BLKPG_H
 
-/*
- * Partition table and disk geometry handling
- *
- * A single ioctl with lots of subfunctions:
- *
- * Device number stuff:
- *    get_whole_disk()         (given the device number of a partition,
- *                               find the device number of the encompassing disk)
- *    get_all_partitions()     (given the device number of a disk, return the
- *                              device numbers of all its known partitions)
- *
- * Partition stuff:
- *    add_partition()
- *    delete_partition()
- *    test_partition_in_use()  (also for test_disk_in_use)
- *
- * Geometry stuff:
- *    get_geometry()
- *    set_geometry()
- *    get_bios_drivedata()
- *
- * For today, only the partition stuff - aeb, 990515
- */
 #include <linux/compiler.h>
 #include <linux/ioctl.h>
 
@@ -52,9 +29,8 @@ struct blkpg_partition {
        long long start;                /* starting offset in bytes */
        long long length;               /* length in bytes */
        int pno;                        /* partition number */
-       char devname[BLKPG_DEVNAMELTH]; /* partition name, like sda5 or c0d1p2,
-                                          to be used in kernel messages */
-       char volname[BLKPG_VOLNAMELTH]; /* volume label */
+       char devname[BLKPG_DEVNAMELTH]; /* unused / ignored */
+       char volname[BLKPG_VOLNAMELTH]; /* unused / ignore */
 };
 
 #endif /* _UAPI__LINUX_BLKPG_H */
index 79c893310492b33689352b9dff5f53a42dd9d72b..4ba4ef0ff63a4084a065a9228be99a641594c6ce 100644 (file)
@@ -3850,7 +3850,7 @@ union bpf_attr {
  *
  * long bpf_check_mtu(void *ctx, u32 ifindex, u32 *mtu_len, s32 len_diff, u64 flags)
  *     Description
- *             Check ctx packet size against exceeding MTU of net device (based
+ *             Check packet size against exceeding MTU of net device (based
  *             on *ifindex*).  This helper will likely be used in combination
  *             with helpers that adjust/change the packet size.
  *
@@ -3867,6 +3867,14 @@ union bpf_attr {
  *             against the current net device.  This is practical if this isn't
  *             used prior to redirect.
  *
+ *             On input *mtu_len* must be a valid pointer, else verifier will
+ *             reject BPF program.  If the value *mtu_len* is initialized to
+ *             zero then the ctx packet size is use.  When value *mtu_len* is
+ *             provided as input this specify the L3 length that the MTU check
+ *             is done against. Remember XDP and TC length operate at L2, but
+ *             this value is L3 as this correlate to MTU and IP-header tot_len
+ *             values which are L3 (similar behavior as bpf_fib_lookup).
+ *
  *             The Linux kernel route table can configure MTUs on a more
  *             specific per route level, which is not provided by this helper.
  *             For route level MTU checks use the **bpf_fib_lookup**\ ()
@@ -3891,11 +3899,9 @@ union bpf_attr {
  *
  *             On return *mtu_len* pointer contains the MTU value of the net
  *             device.  Remember the net device configured MTU is the L3 size,
- *             which is returned here and XDP and TX length operate at L2.
+ *             which is returned here and XDP and TC length operate at L2.
  *             Helper take this into account for you, but remember when using
- *             MTU value in your BPF-code.  On input *mtu_len* must be a valid
- *             pointer and be initialized (to zero), else verifier will reject
- *             BPF program.
+ *             MTU value in your BPF-code.
  *
  *     Return
  *             * 0 on success, and populate MTU value in *mtu_len* pointer.
index aea26ab1431c14a1f5b3785b9a37c2b7a253c0ff..bff5032c98df42e2225a378108e700c936ba6147 100644 (file)
@@ -3,7 +3,6 @@
 #define __UAPI_PSAMPLE_H
 
 enum {
-       /* sampled packet metadata */
        PSAMPLE_ATTR_IIFINDEX,
        PSAMPLE_ATTR_OIFINDEX,
        PSAMPLE_ATTR_ORIGSIZE,
@@ -11,10 +10,8 @@ enum {
        PSAMPLE_ATTR_GROUP_SEQ,
        PSAMPLE_ATTR_SAMPLE_RATE,
        PSAMPLE_ATTR_DATA,
-       PSAMPLE_ATTR_TUNNEL,
-
-       /* commands attributes */
        PSAMPLE_ATTR_GROUP_REFCOUNT,
+       PSAMPLE_ATTR_TUNNEL,
 
        __PSAMPLE_ATTR_MAX
 };
index 6639640523c0bb1afb6a51d9e3e141fc30d8be8a..b58b2efb9b431db68121e7c5cdaf6dedbfa3c517 100644 (file)
@@ -109,7 +109,7 @@ static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
        fd = *(int *)key;
        f = fget_raw(fd);
        if (!f)
-               return NULL;
+               return ERR_PTR(-EBADF);
 
        sdata = inode_storage_lookup(f->f_inode, map, true);
        fput(f);
index 1a666a975416cf9b3f59d3e29139da0e9465523c..70f6fd4fa30562bee9ce992f8e4981b1c8396419 100644 (file)
@@ -430,7 +430,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
 
                tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
                tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
-               err = arch_prepare_bpf_trampoline(image,
+               err = arch_prepare_bpf_trampoline(NULL, image,
                                                  st_map->image + PAGE_SIZE,
                                                  &st_ops->func_models[i], 0,
                                                  tprogs, NULL);
index 3a283bf97f2f674927d17da17efec2f5906c6580..75244ecb2389f113ee8f4cc21ee05fb51d4a7e85 100644 (file)
@@ -827,7 +827,7 @@ static int __init bpf_jit_charge_init(void)
 }
 pure_initcall(bpf_jit_charge_init);
 
-static int bpf_jit_charge_modmem(u32 pages)
+int bpf_jit_charge_modmem(u32 pages)
 {
        if (atomic_long_add_return(pages, &bpf_jit_current) >
            (bpf_jit_limit >> PAGE_SHIFT)) {
@@ -840,7 +840,7 @@ static int bpf_jit_charge_modmem(u32 pages)
        return 0;
 }
 
-static void bpf_jit_uncharge_modmem(u32 pages)
+void bpf_jit_uncharge_modmem(u32 pages)
 {
        atomic_long_sub(pages, &bpf_jit_current);
 }
index 79c5772465f14f1e64a2b962e684b5dfa5338790..53736e52c1dfaa91e00cec2949a396d4354e080f 100644 (file)
@@ -60,9 +60,12 @@ static int finish(void)
                         &magic, sizeof(magic), &pos);
        if (n != sizeof(magic))
                return -EPIPE;
+
        tgid = umd_ops.info.tgid;
-       wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
-       umd_ops.info.tgid = NULL;
+       if (tgid) {
+               wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
+               umd_cleanup_helper(&umd_ops.info);
+       }
        return 0;
 }
 
@@ -80,10 +83,18 @@ static int __init load_umd(void)
 
 static void __exit fini_umd(void)
 {
+       struct pid *tgid;
+
        bpf_preload_ops = NULL;
+
        /* kill UMD in case it's still there due to earlier error */
-       kill_pid(umd_ops.info.tgid, SIGKILL, 1);
-       umd_ops.info.tgid = NULL;
+       tgid = umd_ops.info.tgid;
+       if (tgid) {
+               kill_pid(tgid, SIGKILL, 1);
+
+               wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
+               umd_cleanup_helper(&umd_ops.info);
+       }
        umd_unload_blob(&umd_ops.info);
 }
 late_initcall(load_umd);
index c859bc46d06caa7c57428ca12aa419c3c5fac766..250503482cda2424cced5608eb40fe92fde6420c 100644 (file)
@@ -854,6 +854,11 @@ static int map_create(union bpf_attr *attr)
                        err = PTR_ERR(btf);
                        goto free_map;
                }
+               if (btf_is_kernel(btf)) {
+                       btf_put(btf);
+                       err = -EACCES;
+                       goto free_map;
+               }
                map->btf = btf;
 
                if (attr->btf_value_type_id) {
index 7bc3b3209224e79577e21c1f66553cefe67b4978..1f3a4be4b175f3d8e05489bb022df91b4bef83f4 100644 (file)
@@ -57,19 +57,10 @@ void bpf_image_ksym_del(struct bpf_ksym *ksym)
                           PAGE_SIZE, true, ksym->name);
 }
 
-static void bpf_trampoline_ksym_add(struct bpf_trampoline *tr)
-{
-       struct bpf_ksym *ksym = &tr->ksym;
-
-       snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu", tr->key);
-       bpf_image_ksym_add(tr->image, ksym);
-}
-
 static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
 {
        struct bpf_trampoline *tr;
        struct hlist_head *head;
-       void *image;
        int i;
 
        mutex_lock(&trampoline_mutex);
@@ -84,14 +75,6 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
        if (!tr)
                goto out;
 
-       /* is_root was checked earlier. No need for bpf_jit_charge_modmem() */
-       image = bpf_jit_alloc_exec_page();
-       if (!image) {
-               kfree(tr);
-               tr = NULL;
-               goto out;
-       }
-
        tr->key = key;
        INIT_HLIST_NODE(&tr->hlist);
        hlist_add_head(&tr->hlist, head);
@@ -99,9 +82,6 @@ static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
        mutex_init(&tr->mutex);
        for (i = 0; i < BPF_TRAMP_MAX; i++)
                INIT_HLIST_HEAD(&tr->progs_hlist[i]);
-       tr->image = image;
-       INIT_LIST_HEAD_RCU(&tr->ksym.lnode);
-       bpf_trampoline_ksym_add(tr);
 out:
        mutex_unlock(&trampoline_mutex);
        return tr;
@@ -185,10 +165,142 @@ bpf_trampoline_get_progs(const struct bpf_trampoline *tr, int *total)
        return tprogs;
 }
 
+static void __bpf_tramp_image_put_deferred(struct work_struct *work)
+{
+       struct bpf_tramp_image *im;
+
+       im = container_of(work, struct bpf_tramp_image, work);
+       bpf_image_ksym_del(&im->ksym);
+       bpf_jit_free_exec(im->image);
+       bpf_jit_uncharge_modmem(1);
+       percpu_ref_exit(&im->pcref);
+       kfree_rcu(im, rcu);
+}
+
+/* callback, fexit step 3 or fentry step 2 */
+static void __bpf_tramp_image_put_rcu(struct rcu_head *rcu)
+{
+       struct bpf_tramp_image *im;
+
+       im = container_of(rcu, struct bpf_tramp_image, rcu);
+       INIT_WORK(&im->work, __bpf_tramp_image_put_deferred);
+       schedule_work(&im->work);
+}
+
+/* callback, fexit step 2. Called after percpu_ref_kill confirms. */
+static void __bpf_tramp_image_release(struct percpu_ref *pcref)
+{
+       struct bpf_tramp_image *im;
+
+       im = container_of(pcref, struct bpf_tramp_image, pcref);
+       call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
+}
+
+/* callback, fexit or fentry step 1 */
+static void __bpf_tramp_image_put_rcu_tasks(struct rcu_head *rcu)
+{
+       struct bpf_tramp_image *im;
+
+       im = container_of(rcu, struct bpf_tramp_image, rcu);
+       if (im->ip_after_call)
+               /* the case of fmod_ret/fexit trampoline and CONFIG_PREEMPTION=y */
+               percpu_ref_kill(&im->pcref);
+       else
+               /* the case of fentry trampoline */
+               call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu);
+}
+
+static void bpf_tramp_image_put(struct bpf_tramp_image *im)
+{
+       /* The trampoline image that calls original function is using:
+        * rcu_read_lock_trace to protect sleepable bpf progs
+        * rcu_read_lock to protect normal bpf progs
+        * percpu_ref to protect trampoline itself
+        * rcu tasks to protect trampoline asm not covered by percpu_ref
+        * (which are few asm insns before __bpf_tramp_enter and
+        *  after __bpf_tramp_exit)
+        *
+        * The trampoline is unreachable before bpf_tramp_image_put().
+        *
+        * First, patch the trampoline to avoid calling into fexit progs.
+        * The progs will be freed even if the original function is still
+        * executing or sleeping.
+        * In case of CONFIG_PREEMPT=y use call_rcu_tasks() to wait on
+        * first few asm instructions to execute and call into
+        * __bpf_tramp_enter->percpu_ref_get.
+        * Then use percpu_ref_kill to wait for the trampoline and the original
+        * function to finish.
+        * Then use call_rcu_tasks() to make sure few asm insns in
+        * the trampoline epilogue are done as well.
+        *
+        * In !PREEMPT case the task that got interrupted in the first asm
+        * insns won't go through an RCU quiescent state which the
+        * percpu_ref_kill will be waiting for. Hence the first
+        * call_rcu_tasks() is not necessary.
+        */
+       if (im->ip_after_call) {
+               int err = bpf_arch_text_poke(im->ip_after_call, BPF_MOD_JUMP,
+                                            NULL, im->ip_epilogue);
+               WARN_ON(err);
+               if (IS_ENABLED(CONFIG_PREEMPTION))
+                       call_rcu_tasks(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
+               else
+                       percpu_ref_kill(&im->pcref);
+               return;
+       }
+
+       /* The trampoline without fexit and fmod_ret progs doesn't call original
+        * function and doesn't use percpu_ref.
+        * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
+        * Then use call_rcu_tasks() to wait for the rest of trampoline asm
+        * and normal progs.
+        */
+       call_rcu_tasks_trace(&im->rcu, __bpf_tramp_image_put_rcu_tasks);
+}
+
+static struct bpf_tramp_image *bpf_tramp_image_alloc(u64 key, u32 idx)
+{
+       struct bpf_tramp_image *im;
+       struct bpf_ksym *ksym;
+       void *image;
+       int err = -ENOMEM;
+
+       im = kzalloc(sizeof(*im), GFP_KERNEL);
+       if (!im)
+               goto out;
+
+       err = bpf_jit_charge_modmem(1);
+       if (err)
+               goto out_free_im;
+
+       err = -ENOMEM;
+       im->image = image = bpf_jit_alloc_exec_page();
+       if (!image)
+               goto out_uncharge;
+
+       err = percpu_ref_init(&im->pcref, __bpf_tramp_image_release, 0, GFP_KERNEL);
+       if (err)
+               goto out_free_image;
+
+       ksym = &im->ksym;
+       INIT_LIST_HEAD_RCU(&ksym->lnode);
+       snprintf(ksym->name, KSYM_NAME_LEN, "bpf_trampoline_%llu_%u", key, idx);
+       bpf_image_ksym_add(image, ksym);
+       return im;
+
+out_free_image:
+       bpf_jit_free_exec(im->image);
+out_uncharge:
+       bpf_jit_uncharge_modmem(1);
+out_free_im:
+       kfree(im);
+out:
+       return ERR_PTR(err);
+}
+
 static int bpf_trampoline_update(struct bpf_trampoline *tr)
 {
-       void *old_image = tr->image + ((tr->selector + 1) & 1) * PAGE_SIZE/2;
-       void *new_image = tr->image + (tr->selector & 1) * PAGE_SIZE/2;
+       struct bpf_tramp_image *im;
        struct bpf_tramp_progs *tprogs;
        u32 flags = BPF_TRAMP_F_RESTORE_REGS;
        int err, total;
@@ -198,41 +310,42 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
                return PTR_ERR(tprogs);
 
        if (total == 0) {
-               err = unregister_fentry(tr, old_image);
+               err = unregister_fentry(tr, tr->cur_image->image);
+               bpf_tramp_image_put(tr->cur_image);
+               tr->cur_image = NULL;
                tr->selector = 0;
                goto out;
        }
 
+       im = bpf_tramp_image_alloc(tr->key, tr->selector);
+       if (IS_ERR(im)) {
+               err = PTR_ERR(im);
+               goto out;
+       }
+
        if (tprogs[BPF_TRAMP_FEXIT].nr_progs ||
            tprogs[BPF_TRAMP_MODIFY_RETURN].nr_progs)
                flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME;
 
-       /* Though the second half of trampoline page is unused a task could be
-        * preempted in the middle of the first half of trampoline and two
-        * updates to trampoline would change the code from underneath the
-        * preempted task. Hence wait for tasks to voluntarily schedule or go
-        * to userspace.
-        * The same trampoline can hold both sleepable and non-sleepable progs.
-        * synchronize_rcu_tasks_trace() is needed to make sure all sleepable
-        * programs finish executing.
-        * Wait for these two grace periods together.
-        */
-       synchronize_rcu_mult(call_rcu_tasks, call_rcu_tasks_trace);
-
-       err = arch_prepare_bpf_trampoline(new_image, new_image + PAGE_SIZE / 2,
+       err = arch_prepare_bpf_trampoline(im, im->image, im->image + PAGE_SIZE,
                                          &tr->func.model, flags, tprogs,
                                          tr->func.addr);
        if (err < 0)
                goto out;
 
-       if (tr->selector)
+       WARN_ON(tr->cur_image && tr->selector == 0);
+       WARN_ON(!tr->cur_image && tr->selector);
+       if (tr->cur_image)
                /* progs already running at this address */
-               err = modify_fentry(tr, old_image, new_image);
+               err = modify_fentry(tr, tr->cur_image->image, im->image);
        else
                /* first time registering */
-               err = register_fentry(tr, new_image);
+               err = register_fentry(tr, im->image);
        if (err)
                goto out;
+       if (tr->cur_image)
+               bpf_tramp_image_put(tr->cur_image);
+       tr->cur_image = im;
        tr->selector++;
 out:
        kfree(tprogs);
@@ -364,17 +477,12 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
                goto out;
        if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
                goto out;
-       bpf_image_ksym_del(&tr->ksym);
-       /* This code will be executed when all bpf progs (both sleepable and
-        * non-sleepable) went through
-        * bpf_prog_put()->call_rcu[_tasks_trace]()->bpf_prog_free_deferred().
-        * Hence no need for another synchronize_rcu_tasks_trace() here,
-        * but synchronize_rcu_tasks() is still needed, since trampoline
-        * may not have had any sleepable programs and we need to wait
-        * for tasks to get out of trampoline code before freeing it.
+       /* This code will be executed even when the last bpf_tramp_image
+        * is alive. All progs are detached from the trampoline and the
+        * trampoline image is patched with jmp into epilogue to skip
+        * fexit progs. The fentry-only trampoline will be freed via
+        * multiple rcu callbacks.
         */
-       synchronize_rcu_tasks();
-       bpf_jit_free_exec(tr->image);
        hlist_del(&tr->hlist);
        kfree(tr);
 out:
@@ -478,8 +586,18 @@ void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start)
        rcu_read_unlock_trace();
 }
 
+void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr)
+{
+       percpu_ref_get(&tr->pcref);
+}
+
+void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr)
+{
+       percpu_ref_put(&tr->pcref);
+}
+
 int __weak
-arch_prepare_bpf_trampoline(void *image, void *image_end,
+arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *image_end,
                            const struct btf_func_model *m, u32 flags,
                            struct bpf_tramp_progs *tprogs,
                            void *orig_call)
index c56e3fcb5f1a07d2e0df00d82f01c4e6a1b12ff9..44e4ec1640f1da8d6d30a654baa4d79bfb5cbac4 100644 (file)
@@ -5861,10 +5861,14 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
 {
        bool mask_to_left = (opcode == BPF_ADD &&  off_is_neg) ||
                            (opcode == BPF_SUB && !off_is_neg);
-       u32 off;
+       u32 off, max;
 
        switch (ptr_reg->type) {
        case PTR_TO_STACK:
+               /* Offset 0 is out-of-bounds, but acceptable start for the
+                * left direction, see BPF_REG_FP.
+                */
+               max = MAX_BPF_STACK + mask_to_left;
                /* Indirect variable offset stack access is prohibited in
                 * unprivileged mode so it's not handled here.
                 */
@@ -5872,16 +5876,17 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
                if (mask_to_left)
                        *ptr_limit = MAX_BPF_STACK + off;
                else
-                       *ptr_limit = -off;
-               return 0;
+                       *ptr_limit = -off - 1;
+               return *ptr_limit >= max ? -ERANGE : 0;
        case PTR_TO_MAP_VALUE:
+               max = ptr_reg->map_ptr->value_size;
                if (mask_to_left) {
                        *ptr_limit = ptr_reg->umax_value + ptr_reg->off;
                } else {
                        off = ptr_reg->smin_value + ptr_reg->off;
-                       *ptr_limit = ptr_reg->map_ptr->value_size - off;
+                       *ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
                }
-               return 0;
+               return *ptr_limit >= max ? -ERANGE : 0;
        default:
                return -EINVAL;
        }
@@ -5934,6 +5939,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
        u32 alu_state, alu_limit;
        struct bpf_reg_state tmp;
        bool ret;
+       int err;
 
        if (can_skip_alu_sanitation(env, insn))
                return 0;
@@ -5949,10 +5955,13 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
        alu_state |= ptr_is_dst_reg ?
                     BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
 
-       if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
-               return 0;
-       if (update_alu_sanitation_state(aux, alu_state, alu_limit))
-               return -EACCES;
+       err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg);
+       if (err < 0)
+               return err;
+
+       err = update_alu_sanitation_state(aux, alu_state, alu_limit);
+       if (err < 0)
+               return err;
 do_sim:
        /* Simulate and find potential out-of-bounds access under
         * speculative execution from truncation as a result of
@@ -6103,7 +6112,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        case BPF_ADD:
                ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
                if (ret < 0) {
-                       verbose(env, "R%d tried to add from different maps or paths\n", dst);
+                       verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst);
                        return ret;
                }
                /* We can take a fixed offset as long as it doesn't overflow
@@ -6158,7 +6167,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
        case BPF_SUB:
                ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
                if (ret < 0) {
-                       verbose(env, "R%d tried to sub from different maps or paths\n", dst);
+                       verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst);
                        return ret;
                }
                if (dst_reg == off_reg) {
@@ -9056,6 +9065,10 @@ static int check_btf_info(struct bpf_verifier_env *env,
        btf = btf_get_by_fd(attr->prog_btf_fd);
        if (IS_ERR(btf))
                return PTR_ERR(btf);
+       if (btf_is_kernel(btf)) {
+               btf_put(btf);
+               return -EACCES;
+       }
        env->prog->aux->btf = btf;
 
        err = check_btf_func(env, attr, uattr);
@@ -11660,7 +11673,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
                        off_reg = issrc ? insn->src_reg : insn->dst_reg;
                        if (isneg)
                                *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
-                       *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
+                       *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
                        *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
                        *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
                        *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
index 54cc905e5fe095d0081ec943ac2a16e487f2235b..426cd0c51f9ebb7d903d14c7e220a018ac38e27b 100644 (file)
@@ -1948,8 +1948,14 @@ static __latent_entropy struct task_struct *copy_process(
        p = dup_task_struct(current, node);
        if (!p)
                goto fork_out;
-       if (args->io_thread)
+       if (args->io_thread) {
+               /*
+                * Mark us an IO worker, and block any signal that isn't
+                * fatal or STOP
+                */
                p->flags |= PF_IO_WORKER;
+               siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
+       }
 
        /*
         * This _must_ happen before we call free_task(), i.e. before we jump
@@ -2438,14 +2444,8 @@ struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node)
                .stack_size     = (unsigned long)arg,
                .io_thread      = 1,
        };
-       struct task_struct *tsk;
 
-       tsk = copy_process(NULL, 0, node, &args);
-       if (!IS_ERR(tsk)) {
-               sigfillset(&tsk->blocked);
-               sigdelsetmask(&tsk->blocked, sigmask(SIGKILL));
-       }
-       return tsk;
+       return copy_process(NULL, 0, node, &args);
 }
 
 /*
index 1a2d57d1327cd6d467d8ce86117bc2174652068e..dc520f01f99ddc053366ab5fdbaffd126f519c24 100644 (file)
@@ -134,7 +134,7 @@ bool freeze_task(struct task_struct *p)
                return false;
        }
 
-       if (!(p->flags & (PF_KTHREAD | PF_IO_WORKER)))
+       if (!(p->flags & PF_KTHREAD))
                fake_signal_wake_up(p);
        else
                wake_up_state(p, TASK_INTERRUPTIBLE);
index c94b820a1b62c59e76a089393761aaf2445b09b8..8743150db2acc527f6b253131388281530c68b3e 100644 (file)
@@ -75,7 +75,9 @@ struct gcov_fn_info {
 
        u32 num_counters;
        u64 *counters;
+#if CONFIG_CLANG_VERSION < 110000
        const char *function_name;
+#endif
 };
 
 static struct gcov_info *current_info;
@@ -105,6 +107,7 @@ void llvm_gcov_init(llvm_gcov_callback writeout, llvm_gcov_callback flush)
 }
 EXPORT_SYMBOL(llvm_gcov_init);
 
+#if CONFIG_CLANG_VERSION < 110000
 void llvm_gcda_start_file(const char *orig_filename, const char version[4],
                u32 checksum)
 {
@@ -113,7 +116,17 @@ void llvm_gcda_start_file(const char *orig_filename, const char version[4],
        current_info->checksum = checksum;
 }
 EXPORT_SYMBOL(llvm_gcda_start_file);
+#else
+void llvm_gcda_start_file(const char *orig_filename, u32 version, u32 checksum)
+{
+       current_info->filename = orig_filename;
+       current_info->version = version;
+       current_info->checksum = checksum;
+}
+EXPORT_SYMBOL(llvm_gcda_start_file);
+#endif
 
+#if CONFIG_CLANG_VERSION < 110000
 void llvm_gcda_emit_function(u32 ident, const char *function_name,
                u32 func_checksum, u8 use_extra_checksum, u32 cfg_checksum)
 {
@@ -133,6 +146,24 @@ void llvm_gcda_emit_function(u32 ident, const char *function_name,
        list_add_tail(&info->head, &current_info->functions);
 }
 EXPORT_SYMBOL(llvm_gcda_emit_function);
+#else
+void llvm_gcda_emit_function(u32 ident, u32 func_checksum,
+               u8 use_extra_checksum, u32 cfg_checksum)
+{
+       struct gcov_fn_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+       if (!info)
+               return;
+
+       INIT_LIST_HEAD(&info->head);
+       info->ident = ident;
+       info->checksum = func_checksum;
+       info->use_extra_checksum = use_extra_checksum;
+       info->cfg_checksum = cfg_checksum;
+       list_add_tail(&info->head, &current_info->functions);
+}
+EXPORT_SYMBOL(llvm_gcda_emit_function);
+#endif
 
 void llvm_gcda_emit_arcs(u32 num_counters, u64 *counters)
 {
@@ -295,6 +326,7 @@ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src)
        }
 }
 
+#if CONFIG_CLANG_VERSION < 110000
 static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
 {
        size_t cv_size; /* counter values size */
@@ -322,6 +354,28 @@ err_name:
        kfree(fn_dup);
        return NULL;
 }
+#else
+static struct gcov_fn_info *gcov_fn_info_dup(struct gcov_fn_info *fn)
+{
+       size_t cv_size; /* counter values size */
+       struct gcov_fn_info *fn_dup = kmemdup(fn, sizeof(*fn),
+                       GFP_KERNEL);
+       if (!fn_dup)
+               return NULL;
+       INIT_LIST_HEAD(&fn_dup->head);
+
+       cv_size = fn->num_counters * sizeof(fn->counters[0]);
+       fn_dup->counters = vmalloc(cv_size);
+       if (!fn_dup->counters) {
+               kfree(fn_dup);
+               return NULL;
+       }
+
+       memcpy(fn_dup->counters, fn->counters, cv_size);
+
+       return fn_dup;
+}
+#endif
 
 /**
  * gcov_info_dup - duplicate profiling data set
@@ -362,6 +416,7 @@ err:
  * gcov_info_free - release memory for profiling data set duplicate
  * @info: profiling data set duplicate to free
  */
+#if CONFIG_CLANG_VERSION < 110000
 void gcov_info_free(struct gcov_info *info)
 {
        struct gcov_fn_info *fn, *tmp;
@@ -375,6 +430,20 @@ void gcov_info_free(struct gcov_info *info)
        kfree(info->filename);
        kfree(info);
 }
+#else
+void gcov_info_free(struct gcov_info *info)
+{
+       struct gcov_fn_info *fn, *tmp;
+
+       list_for_each_entry_safe(fn, tmp, &info->functions, head) {
+               vfree(fn->counters);
+               list_del(&fn->head);
+               kfree(fn);
+       }
+       kfree(info->filename);
+       kfree(info);
+}
+#endif
 
 #define ITER_STRIDE    PAGE_SIZE
 
index 1358fa4abfa83570030cf84474d28d43a459fbcc..0f4530b3a8cd9d961dc52b08750e7b44fcfdedd6 100644 (file)
@@ -98,7 +98,7 @@ static int __init em_debug_init(void)
 
        return 0;
 }
-core_initcall(em_debug_init);
+fs_initcall(em_debug_init);
 #else /* CONFIG_DEBUG_FS */
 static void em_debug_create_pd(struct device *dev) {}
 static void em_debug_remove_pd(struct device *dev) {}
index 821cf17238147ab8af77b01b7e8e86d10d8501d5..61db50f7ca8667a11811c6a423cf07f9a662a6f6 100644 (file)
@@ -375,7 +375,7 @@ static int ptrace_attach(struct task_struct *task, long request,
        audit_ptrace(task);
 
        retval = -EPERM;
-       if (unlikely(task->flags & (PF_KTHREAD | PF_IO_WORKER)))
+       if (unlikely(task->flags & PF_KTHREAD))
                goto out;
        if (same_thread_group(task, current))
                goto out;
index f2a1b898da29fcb5adc6cc5732f53150b25d51b3..f2718350bf4b529668960e82a0dac774429e619d 100644 (file)
@@ -91,7 +91,7 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
                return true;
 
        /* Only allow kernel generated signals to this kthread */
-       if (unlikely((t->flags & (PF_KTHREAD | PF_IO_WORKER)) &&
+       if (unlikely((t->flags & PF_KTHREAD) &&
                     (handler == SIG_KTHREAD_KERNEL) && !force))
                return true;
 
@@ -288,8 +288,7 @@ bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
                        JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
        BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
 
-       if (unlikely(fatal_signal_pending(task) ||
-                    (task->flags & (PF_EXITING | PF_IO_WORKER))))
+       if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
                return false;
 
        if (mask & JOBCTL_STOP_SIGMASK)
@@ -834,9 +833,6 @@ static int check_kill_permission(int sig, struct kernel_siginfo *info,
 
        if (!valid_signal(sig))
                return -EINVAL;
-       /* PF_IO_WORKER threads don't take any signals */
-       if (t->flags & PF_IO_WORKER)
-               return -ESRCH;
 
        if (!si_fromuser(info))
                return 0;
@@ -1100,7 +1096,7 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
        /*
         * Skip useless siginfo allocation for SIGKILL and kernel threads.
         */
-       if ((sig == SIGKILL) || (t->flags & (PF_KTHREAD | PF_IO_WORKER)))
+       if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
                goto out_set;
 
        /*
@@ -2771,6 +2767,14 @@ relock:
                        do_coredump(&ksig->info);
                }
 
+               /*
+                * PF_IO_WORKER threads will catch and exit on fatal signals
+                * themselves. They have cleanup that must be performed, so
+                * we cannot call do_exit() on their behalf.
+                */
+               if (current->flags & PF_IO_WORKER)
+                       goto out;
+
                /*
                 * Death signals, no core dump.
                 */
@@ -2778,7 +2782,7 @@ relock:
                /* NOTREACHED */
        }
        spin_unlock_irq(&sighand->siglock);
-
+out:
        ksig->sig = signr;
 
        if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
index 4d8e3557554919a9b994ebf0e8c85b0816a526d3..3ba52d4e1314228147112047aa497709dbe6ba59 100644 (file)
@@ -3231,7 +3231,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
        pg = start_pg;
        while (pg) {
                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               free_pages((unsigned long)pg->records, order);
+               if (order >= 0)
+                       free_pages((unsigned long)pg->records, order);
                start_pg = pg->next;
                kfree(pg);
                pg = start_pg;
@@ -5045,6 +5046,20 @@ struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
        return NULL;
 }
 
+static struct ftrace_direct_func *ftrace_alloc_direct_func(unsigned long addr)
+{
+       struct ftrace_direct_func *direct;
+
+       direct = kmalloc(sizeof(*direct), GFP_KERNEL);
+       if (!direct)
+               return NULL;
+       direct->addr = addr;
+       direct->count = 0;
+       list_add_rcu(&direct->next, &ftrace_direct_funcs);
+       ftrace_direct_func_count++;
+       return direct;
+}
+
 /**
  * register_ftrace_direct - Call a custom trampoline directly
  * @ip: The address of the nop at the beginning of a function
@@ -5120,15 +5135,11 @@ int register_ftrace_direct(unsigned long ip, unsigned long addr)
 
        direct = ftrace_find_direct_func(addr);
        if (!direct) {
-               direct = kmalloc(sizeof(*direct), GFP_KERNEL);
+               direct = ftrace_alloc_direct_func(addr);
                if (!direct) {
                        kfree(entry);
                        goto out_unlock;
                }
-               direct->addr = addr;
-               direct->count = 0;
-               list_add_rcu(&direct->next, &ftrace_direct_funcs);
-               ftrace_direct_func_count++;
        }
 
        entry->ip = ip;
@@ -5329,6 +5340,7 @@ int __weak ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
 int modify_ftrace_direct(unsigned long ip,
                         unsigned long old_addr, unsigned long new_addr)
 {
+       struct ftrace_direct_func *direct, *new_direct = NULL;
        struct ftrace_func_entry *entry;
        struct dyn_ftrace *rec;
        int ret = -ENODEV;
@@ -5344,6 +5356,20 @@ int modify_ftrace_direct(unsigned long ip,
        if (entry->direct != old_addr)
                goto out_unlock;
 
+       direct = ftrace_find_direct_func(old_addr);
+       if (WARN_ON(!direct))
+               goto out_unlock;
+       if (direct->count > 1) {
+               ret = -ENOMEM;
+               new_direct = ftrace_alloc_direct_func(new_addr);
+               if (!new_direct)
+                       goto out_unlock;
+               direct->count--;
+               new_direct->count++;
+       } else {
+               direct->addr = new_addr;
+       }
+
        /*
         * If there's no other ftrace callback on the rec->ip location,
         * then it can be changed directly by the architecture.
@@ -5357,6 +5383,14 @@ int modify_ftrace_direct(unsigned long ip,
                ret = 0;
        }
 
+       if (unlikely(ret && new_direct)) {
+               direct->count++;
+               list_del_rcu(&new_direct->next);
+               synchronize_rcu_tasks();
+               kfree(new_direct);
+               ftrace_direct_func_count--;
+       }
+
  out_unlock:
        mutex_unlock(&ftrace_lock);
        mutex_unlock(&direct_mutex);
@@ -6418,7 +6452,8 @@ void ftrace_release_mod(struct module *mod)
                clear_mod_from_hashes(pg);
 
                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-               free_pages((unsigned long)pg->records, order);
+               if (order >= 0)
+                       free_pages((unsigned long)pg->records, order);
                tmp_page = pg->next;
                kfree(pg);
                ftrace_number_of_pages -= 1 << order;
@@ -6778,7 +6813,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
                if (!pg->index) {
                        *last_pg = pg->next;
                        order = get_count_order(pg->size / ENTRIES_PER_PAGE);
-                       free_pages((unsigned long)pg->records, order);
+                       if (order >= 0)
+                               free_pages((unsigned long)pg->records, order);
                        ftrace_number_of_pages -= 1 << order;
                        ftrace_number_of_groups--;
                        kfree(pg);
index eccb4e1187cc788e2f4f60b15bd7b4c00bd2884b..5c777627212fa81cc3dc4fd70cbba807c62a5acc 100644 (file)
@@ -2984,7 +2984,8 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer,
 
        size = nr_entries * sizeof(unsigned long);
        event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
-                                           sizeof(*entry) + size, trace_ctx);
+                                   (sizeof(*entry) - sizeof(entry->caller)) + size,
+                                   trace_ctx);
        if (!event)
                goto out;
        entry = ring_buffer_event_data(event);
index 0b35212ffc3d0a60ae796bb98bf2ad7232b541aa..bb7bb3b478abfa5c6dbb81466125858db1f90d5b 100644 (file)
@@ -139,13 +139,22 @@ static void umd_cleanup(struct subprocess_info *info)
        struct umd_info *umd_info = info->data;
 
        /* cleanup if umh_setup() was successful but exec failed */
-       if (info->retval) {
-               fput(umd_info->pipe_to_umh);
-               fput(umd_info->pipe_from_umh);
-               put_pid(umd_info->tgid);
-               umd_info->tgid = NULL;
-       }
+       if (info->retval)
+               umd_cleanup_helper(umd_info);
+}
+
+/**
+ * umd_cleanup_helper - release the resources which were allocated in umd_setup
+ * @info: information about usermode driver
+ */
+void umd_cleanup_helper(struct umd_info *info)
+{
+       fput(info->pipe_to_umh);
+       fput(info->pipe_from_umh);
+       put_pid(info->tgid);
+       info->tgid = NULL;
 }
+EXPORT_SYMBOL_GPL(umd_cleanup_helper);
 
 /**
  * fork_usermode_driver - fork a usermode driver
index 064d68a5391a09da4da883f187075b9bc6056d8a..46866394fc84382cbab511d044103406ffcd0176 100644 (file)
@@ -232,4 +232,5 @@ u64 mul_u64_u64_div_u64(u64 a, u64 b, u64 c)
 
        return res + div64_u64(a * b, c);
 }
+EXPORT_SYMBOL(mul_u64_u64_div_u64);
 #endif
index 8294f43f498164c7e815593790a732924ad98e0b..8b1c318189ce801a0935133b4882556fede5bfd2 100644 (file)
@@ -1530,24 +1530,24 @@ static noinline void check_store_range(struct xarray *xa)
 
 #ifdef CONFIG_XARRAY_MULTI
 static void check_split_1(struct xarray *xa, unsigned long index,
-                                                       unsigned int order)
+                               unsigned int order, unsigned int new_order)
 {
-       XA_STATE(xas, xa, index);
-       void *entry;
-       unsigned int i = 0;
+       XA_STATE_ORDER(xas, xa, index, new_order);
+       unsigned int i;
 
        xa_store_order(xa, index, order, xa, GFP_KERNEL);
 
        xas_split_alloc(&xas, xa, order, GFP_KERNEL);
        xas_lock(&xas);
        xas_split(&xas, xa, order);
+       for (i = 0; i < (1 << order); i += (1 << new_order))
+               __xa_store(xa, index + i, xa_mk_index(index + i), 0);
        xas_unlock(&xas);
 
-       xa_for_each(xa, index, entry) {
-               XA_BUG_ON(xa, entry != xa);
-               i++;
+       for (i = 0; i < (1 << order); i++) {
+               unsigned int val = index + (i & ~((1 << new_order) - 1));
+               XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
        }
-       XA_BUG_ON(xa, i != 1 << order);
 
        xa_set_mark(xa, index, XA_MARK_0);
        XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
@@ -1557,14 +1557,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
 
 static noinline void check_split(struct xarray *xa)
 {
-       unsigned int order;
+       unsigned int order, new_order;
 
        XA_BUG_ON(xa, !xa_empty(xa));
 
        for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
-               check_split_1(xa, 0, order);
-               check_split_1(xa, 1UL << order, order);
-               check_split_1(xa, 3UL << order, order);
+               for (new_order = 0; new_order < order; new_order++) {
+                       check_split_1(xa, 0, order, new_order);
+                       check_split_1(xa, 1UL << order, order, new_order);
+                       check_split_1(xa, 3UL << order, order, new_order);
+               }
        }
 }
 #else
index 5fa51614802ada34af73623a7cab609e236af856..f5d8f54907b4f87e649211a60d1e0bf3cb170a16 100644 (file)
@@ -987,7 +987,7 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,
  * xas_split_alloc() - Allocate memory for splitting an entry.
  * @xas: XArray operation state.
  * @entry: New entry which will be stored in the array.
- * @order: New entry order.
+ * @order: Current entry order.
  * @gfp: Memory allocation flags.
  *
  * This function should be called before calling xas_split().
@@ -1011,7 +1011,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
 
        do {
                unsigned int i;
-               void *sibling;
+               void *sibling = NULL;
                struct xa_node *node;
 
                node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
@@ -1021,7 +1021,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
                for (i = 0; i < XA_CHUNK_SIZE; i++) {
                        if ((i & mask) == 0) {
                                RCU_INIT_POINTER(node->slots[i], entry);
-                               sibling = xa_mk_sibling(0);
+                               sibling = xa_mk_sibling(i);
                        } else {
                                RCU_INIT_POINTER(node->slots[i], sibling);
                        }
@@ -1041,9 +1041,10 @@ EXPORT_SYMBOL_GPL(xas_split_alloc);
  * xas_split() - Split a multi-index entry into smaller entries.
  * @xas: XArray operation state.
  * @entry: New entry to store in the array.
- * @order: New entry order.
+ * @order: Current entry order.
  *
- * The value in the entry is copied to all the replacement entries.
+ * The size of the new entries is set in @xas.  The value in @entry is
+ * copied to all the replacement entries.
  *
  * Context: Any context.  The caller should hold the xa_lock.
  */
index 86f2b9495f9cfd3f30127e8a452f6c1e182511d9..6ef8f5e05e7e5db835337728cd9f57e1f3d98f77 100644 (file)
@@ -618,7 +618,7 @@ void __kmap_local_sched_out(void)
                int idx;
 
                /* With debug all even slots are unmapped and act as guard */
-               if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
+               if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
                        WARN_ON_ONCE(!pte_none(pteval));
                        continue;
                }
@@ -654,7 +654,7 @@ void __kmap_local_sched_in(void)
                int idx;
 
                /* With debug all even slots are unmapped and act as guard */
-               if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
+               if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
                        WARN_ON_ONCE(!pte_none(pteval));
                        continue;
                }
index 5b1ab1f427c57cd1a66c29d9fca147d02bf229d4..a86a58ef132d55690e92025039aa5024dce141b3 100644 (file)
@@ -280,6 +280,17 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
                nrg->reservation_counter =
                        &h_cg->rsvd_hugepage[hstate_index(h)];
                nrg->css = &h_cg->css;
+               /*
+                * The caller will hold exactly one h_cg->css reference for the
+                * whole contiguous reservation region. But this area might be
+                * scattered when there are already some file_regions reside in
+                * it. As a result, many file_regions may share only one css
+                * reference. In order to ensure that one file_region must hold
+                * exactly one h_cg->css reference, we should do css_get for
+                * each file_region and leave the reference held by caller
+                * untouched.
+                */
+               css_get(&h_cg->css);
                if (!resv->pages_per_hpage)
                        resv->pages_per_hpage = pages_per_huge_page(h);
                /* pages_per_hpage should be the same for all entries in
@@ -293,6 +304,14 @@ static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
 #endif
 }
 
+static void put_uncharge_info(struct file_region *rg)
+{
+#ifdef CONFIG_CGROUP_HUGETLB
+       if (rg->css)
+               css_put(rg->css);
+#endif
+}
+
 static bool has_same_uncharge_info(struct file_region *rg,
                                   struct file_region *org)
 {
@@ -316,6 +335,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
                prg->to = rg->to;
 
                list_del(&rg->link);
+               put_uncharge_info(rg);
                kfree(rg);
 
                rg = prg;
@@ -327,6 +347,7 @@ static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
                nrg->from = rg->from;
 
                list_del(&rg->link);
+               put_uncharge_info(rg);
                kfree(rg);
        }
 }
@@ -662,7 +683,7 @@ retry:
 
                        del += t - f;
                        hugetlb_cgroup_uncharge_file_region(
-                               resv, rg, t - f);
+                               resv, rg, t - f, false);
 
                        /* New entry for end of split region */
                        nrg->from = t;
@@ -683,7 +704,7 @@ retry:
                if (f <= rg->from && t >= rg->to) { /* Remove entire region */
                        del += rg->to - rg->from;
                        hugetlb_cgroup_uncharge_file_region(resv, rg,
-                                                           rg->to - rg->from);
+                                                           rg->to - rg->from, true);
                        list_del(&rg->link);
                        kfree(rg);
                        continue;
@@ -691,13 +712,13 @@ retry:
 
                if (f <= rg->from) {    /* Trim beginning of region */
                        hugetlb_cgroup_uncharge_file_region(resv, rg,
-                                                           t - rg->from);
+                                                           t - rg->from, false);
 
                        del += t - rg->from;
                        rg->from = t;
                } else {                /* Trim end of region */
                        hugetlb_cgroup_uncharge_file_region(resv, rg,
-                                                           rg->to - f);
+                                                           rg->to - f, false);
 
                        del += rg->to - f;
                        rg->to = f;
@@ -5187,6 +5208,10 @@ bool hugetlb_reserve_pages(struct inode *inode,
                         */
                        long rsv_adjust;
 
+                       /*
+                        * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
+                        * reference to h_cg->css. See comment below for detail.
+                        */
                        hugetlb_cgroup_uncharge_cgroup_rsvd(
                                hstate_index(h),
                                (chg - add) * pages_per_huge_page(h), h_cg);
@@ -5194,6 +5219,14 @@ bool hugetlb_reserve_pages(struct inode *inode,
                        rsv_adjust = hugepage_subpool_put_pages(spool,
                                                                chg - add);
                        hugetlb_acct_memory(h, -rsv_adjust);
+               } else if (h_cg) {
+                       /*
+                        * The file_regions will hold their own reference to
+                        * h_cg->css. So we should release the reference held
+                        * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
+                        * done.
+                        */
+                       hugetlb_cgroup_put_rsvd_cgroup(h_cg);
                }
        }
        return true;
index f68b51fcda3d137d9904206922ac4dbc100adce8..603a131e262dc75ca4cb61f87e6751ef8368de4b 100644 (file)
@@ -391,7 +391,8 @@ void hugetlb_cgroup_uncharge_counter(struct resv_map *resv, unsigned long start,
 
 void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
                                         struct file_region *rg,
-                                        unsigned long nr_pages)
+                                        unsigned long nr_pages,
+                                        bool region_del)
 {
        if (hugetlb_cgroup_disabled() || !resv || !rg || !nr_pages)
                return;
@@ -400,7 +401,12 @@ void hugetlb_cgroup_uncharge_file_region(struct resv_map *resv,
            !resv->reservation_counter) {
                page_counter_uncharge(rg->reservation_counter,
                                      nr_pages * resv->pages_per_hpage);
-               css_put(rg->css);
+               /*
+                * Only do css_put(rg->css) when we delete the entire region
+                * because one file_region must hold exactly one css reference.
+                */
+               if (region_del)
+                       css_put(rg->css);
        }
 }
 
index 3b8ec938470a2a0ca65a2ff3a39634d9feda7424..d53c91f881a41d1957a5aa0ce8715f4e60671255 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/debugfs.h>
 #include <linux/kcsan-checks.h>
 #include <linux/kfence.h>
+#include <linux/kmemleak.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
 #include <linux/memblock.h>
@@ -480,6 +481,14 @@ static bool __init kfence_init_pool(void)
                addr += 2 * PAGE_SIZE;
        }
 
+       /*
+        * The pool is live and will never be deallocated from this point on.
+        * Remove the pool object from the kmemleak object tree, as it would
+        * otherwise overlap with allocations returned by kfence_alloc(), which
+        * are registered with kmemleak through the slab post-alloc hook.
+        */
+       kmemleak_free(__kfence_pool);
+
        return true;
 
 err:
index c0014d3b91c10f2689f99d83c9ba2bd04cbbfea7..fe6e3ae8e8c6719f7ae6bead5954e2f43109a523 100644 (file)
@@ -97,6 +97,7 @@
 #include <linux/atomic.h>
 
 #include <linux/kasan.h>
+#include <linux/kfence.h>
 #include <linux/kmemleak.h>
 #include <linux/memory_hotplug.h>
 
@@ -589,7 +590,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
        atomic_set(&object->use_count, 1);
        object->flags = OBJECT_ALLOCATED;
        object->pointer = ptr;
-       object->size = size;
+       object->size = kfence_ksize((void *)ptr) ?: size;
        object->excess_ref = 0;
        object->min_count = min_count;
        object->count = 0;                      /* white color initially */
index 5efa07fb6cdc18c9189fc94a995716744218454a..550405fc3b5e6396a1dd1fd974dde3e07a3c9d15 100644 (file)
@@ -166,7 +166,7 @@ static int __init init_zero_pfn(void)
        zero_pfn = page_to_pfn(ZERO_PAGE(0));
        return 0;
 }
-core_initcall(init_zero_pfn);
+early_initcall(init_zero_pfn);
 
 void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
 {
index 61ee40ed804ee57ba199cdf69ee96f45ca1b9706..459d195d2ff64bb71509189277c71e84d9725033 100644 (file)
@@ -501,10 +501,33 @@ static int mn_hlist_invalidate_range_start(
                                                "");
                                WARN_ON(mmu_notifier_range_blockable(range) ||
                                        _ret != -EAGAIN);
+                               /*
+                                * We call all the notifiers on any EAGAIN,
+                                * there is no way for a notifier to know if
+                                * its start method failed, thus a start that
+                                * does EAGAIN can't also do end.
+                                */
+                               WARN_ON(ops->invalidate_range_end);
                                ret = _ret;
                        }
                }
        }
+
+       if (ret) {
+               /*
+                * Must be non-blocking to get here.  If there are multiple
+                * notifiers and one or more failed start, any that succeeded
+                * start are expecting their end to be called.  Do so now.
+                */
+               hlist_for_each_entry_rcu(subscription, &subscriptions->list,
+                                        hlist, srcu_read_lock_held(&srcu)) {
+                       if (!subscription->ops->invalidate_range_end)
+                               continue;
+
+                       subscription->ops->invalidate_range_end(subscription,
+                                                               range);
+               }
+       }
        srcu_read_unlock(&srcu, id);
 
        return ret;
index eb34d204d4ee717f0cc3e8c640df1a4c10f1029a..9e35b636a393755d34fc302b1b3cdb6567e33d8f 100644 (file)
@@ -2833,6 +2833,22 @@ void wait_on_page_writeback(struct page *page)
 }
 EXPORT_SYMBOL_GPL(wait_on_page_writeback);
 
+/*
+ * Wait for a page to complete writeback.  Returns -EINTR if we get a
+ * fatal signal while waiting.
+ */
+int wait_on_page_writeback_killable(struct page *page)
+{
+       while (PageWriteback(page)) {
+               trace_wait_on_page_writeback(page, page_mapping(page));
+               if (wait_on_page_bit_killable(page, PG_writeback))
+                       return -EINTR;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(wait_on_page_writeback_killable);
+
 /**
  * wait_for_stable_page() - wait for writeback to finish, if necessary.
  * @page:      The page to wait on.
index b5dafa7e44e429931600ad220fe45482ff7d1a92..9d889ad2bb869328e8b320a988531561eb586902 100644 (file)
@@ -1346,8 +1346,22 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
                        page = list_entry(pos, struct page, lru);
 
                        zhdr = page_address(page);
-                       if (test_bit(PAGE_HEADLESS, &page->private))
+                       if (test_bit(PAGE_HEADLESS, &page->private)) {
+                               /*
+                                * For non-headless pages, we wait to do this
+                                * until we have the page lock to avoid racing
+                                * with __z3fold_alloc(). Headless pages don't
+                                * have a lock (and __z3fold_alloc() will never
+                                * see them), but we still need to test and set
+                                * PAGE_CLAIMED to avoid racing with
+                                * z3fold_free(), so just do it now before
+                                * leaving the loop.
+                                */
+                               if (test_and_set_bit(PAGE_CLAIMED, &page->private))
+                                       continue;
+
                                break;
+                       }
 
                        if (kref_get_unless_zero(&zhdr->refcount) == 0) {
                                zhdr = NULL;
index b89503832fcca8b4317e4bab4b0ecad94dbc8464..1e24d9a2c9a77ac20c871e5452b55010249d0e42 100644 (file)
@@ -128,6 +128,8 @@ br_switchdev_fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
 {
        if (!fdb->dst)
                return;
+       if (test_bit(BR_FDB_LOCAL, &fdb->flags))
+               return;
 
        switch (type) {
        case RTM_DELNEIGH:
index 3ef7f78e553bc93e2c24db9b5b8a016cc0235f28..15ea1234d45730232941a1bbbe441466f865af46 100644 (file)
@@ -196,7 +196,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
        nskb->dev = dev;
        can_skb_set_owner(nskb, sk);
        ncf = (struct canfd_frame *)nskb->data;
-       skb_put(nskb, so->ll.mtu);
+       skb_put_zero(nskb, so->ll.mtu);
 
        /* create & send flow control reply */
        ncf->can_id = so->txid;
@@ -215,8 +215,7 @@ static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
        if (ae)
                ncf->data[0] = so->opt.ext_address;
 
-       if (so->ll.mtu == CANFD_MTU)
-               ncf->flags = so->ll.tx_flags;
+       ncf->flags = so->ll.tx_flags;
 
        can_send_ret = can_send(nskb, 1);
        if (can_send_ret)
@@ -780,7 +779,7 @@ isotp_tx_burst:
                can_skb_prv(skb)->skbcnt = 0;
 
                cf = (struct canfd_frame *)skb->data;
-               skb_put(skb, so->ll.mtu);
+               skb_put_zero(skb, so->ll.mtu);
 
                /* create consecutive frame */
                isotp_fill_dataframe(cf, so, ae, 0);
@@ -790,8 +789,7 @@ isotp_tx_burst:
                so->tx.sn %= 16;
                so->tx.bs++;
 
-               if (so->ll.mtu == CANFD_MTU)
-                       cf->flags = so->ll.tx_flags;
+               cf->flags = so->ll.tx_flags;
 
                skb->dev = dev;
                can_skb_set_owner(skb, sk);
@@ -897,7 +895,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        so->tx.idx = 0;
 
        cf = (struct canfd_frame *)skb->data;
-       skb_put(skb, so->ll.mtu);
+       skb_put_zero(skb, so->ll.mtu);
 
        /* check for single frame transmission depending on TX_DL */
        if (size <= so->tx.ll_dl - SF_PCI_SZ4 - ae - off) {
@@ -939,8 +937,7 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        }
 
        /* send the first or only CAN frame */
-       if (so->ll.mtu == CANFD_MTU)
-               cf->flags = so->ll.tx_flags;
+       cf->flags = so->ll.tx_flags;
 
        skb->dev = dev;
        skb->sk = sk;
@@ -1228,7 +1225,8 @@ static int isotp_setsockopt(struct socket *sock, int level, int optname,
                        if (ll.mtu != CAN_MTU && ll.mtu != CANFD_MTU)
                                return -EINVAL;
 
-                       if (ll.mtu == CAN_MTU && ll.tx_dl > CAN_MAX_DLEN)
+                       if (ll.mtu == CAN_MTU &&
+                           (ll.tx_dl > CAN_MAX_DLEN || ll.tx_flags != 0))
                                return -EINVAL;
 
                        memcpy(&so->ll, &ll, sizeof(ll));
index 6c5967e801328a00ffb434a430a865ddcf8c02db..0f72ff5d34ba0072b7fcb1588ecdef6b169a7733 100644 (file)
@@ -1184,6 +1184,18 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
                        return -ENOMEM;
 
                for_each_netdev(net, d) {
+                       struct netdev_name_node *name_node;
+                       list_for_each_entry(name_node, &d->name_node->list, list) {
+                               if (!sscanf(name_node->name, name, &i))
+                                       continue;
+                               if (i < 0 || i >= max_netdevices)
+                                       continue;
+
+                               /*  avoid cases where sscanf is not exact inverse of printf */
+                               snprintf(buf, IFNAMSIZ, name, i);
+                               if (!strncmp(buf, name_node->name, IFNAMSIZ))
+                                       set_bit(i, inuse);
+                       }
                        if (!sscanf(d->name, name, &i))
                                continue;
                        if (i < 0 || i >= max_netdevices)
@@ -4294,6 +4306,13 @@ static inline void ____napi_schedule(struct softnet_data *sd,
                 */
                thread = READ_ONCE(napi->thread);
                if (thread) {
+                       /* Avoid doing set_bit() if the thread is in
+                        * INTERRUPTIBLE state, cause napi_thread_wait()
+                        * makes sure to proceed with napi polling
+                        * if the thread is explicitly woken from here.
+                        */
+                       if (READ_ONCE(thread->state) != TASK_INTERRUPTIBLE)
+                               set_bit(NAPI_STATE_SCHED_THREADED, &napi->state);
                        wake_up_process(thread);
                        return;
                }
@@ -6486,6 +6505,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
                WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED));
 
                new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED |
+                             NAPIF_STATE_SCHED_THREADED |
                              NAPIF_STATE_PREFER_BUSY_POLL);
 
                /* If STATE_MISSED was set, leave STATE_SCHED set,
@@ -6968,16 +6988,25 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
 
 static int napi_thread_wait(struct napi_struct *napi)
 {
+       bool woken = false;
+
        set_current_state(TASK_INTERRUPTIBLE);
 
        while (!kthread_should_stop() && !napi_disable_pending(napi)) {
-               if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
+               /* Testing SCHED_THREADED bit here to make sure the current
+                * kthread owns this napi and could poll on this napi.
+                * Testing SCHED bit is not enough because SCHED bit might be
+                * set by some other busy poll thread or by napi_disable().
+                */
+               if (test_bit(NAPI_STATE_SCHED_THREADED, &napi->state) || woken) {
                        WARN_ON(!list_empty(&napi->poll_list));
                        __set_current_state(TASK_RUNNING);
                        return 0;
                }
 
                schedule();
+               /* woken being true indicates this thread owns this napi. */
+               woken = true;
                set_current_state(TASK_INTERRUPTIBLE);
        }
        __set_current_state(TASK_RUNNING);
@@ -11346,7 +11375,7 @@ static void __net_exit default_device_exit(struct net *net)
                        continue;
 
                /* Leave virtual devices for the generic cleanup */
-               if (dev->rtnl_link_ops)
+               if (dev->rtnl_link_ops && !dev->rtnl_link_ops->netns_refund)
                        continue;
 
                /* Push remaining network devices to init_net */
index 571f191c06d947440858e91f4ad3a52ec9630538..db65ce62b625ad375cba872ae2f2b31454baca81 100644 (file)
@@ -1053,6 +1053,20 @@ static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack)
        return 0;
 
 err_module_put:
+       for_each_possible_cpu(cpu) {
+               struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
+               struct sk_buff *skb;
+
+               del_timer_sync(&hw_data->send_timer);
+               cancel_work_sync(&hw_data->dm_alert_work);
+               while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
+                       struct devlink_trap_metadata *hw_metadata;
+
+                       hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
+                       net_dm_hw_metadata_free(hw_metadata);
+                       consume_skb(skb);
+               }
+       }
        module_put(THIS_MODULE);
        return rc;
 }
@@ -1134,6 +1148,15 @@ static int net_dm_trace_on_set(struct netlink_ext_ack *extack)
 err_unregister_trace:
        unregister_trace_kfree_skb(ops->kfree_skb_probe, NULL);
 err_module_put:
+       for_each_possible_cpu(cpu) {
+               struct per_cpu_dm_data *data = &per_cpu(dm_cpu_data, cpu);
+               struct sk_buff *skb;
+
+               del_timer_sync(&data->send_timer);
+               cancel_work_sync(&data->dm_alert_work);
+               while ((skb = __skb_dequeue(&data->drop_queue)))
+                       consume_skb(skb);
+       }
        module_put(THIS_MODULE);
        return rc;
 }
index 0c01bd8d9d81eb09e9a47ac9cd830a9efe5541dd..fb3bcba87744d3f48fcfbdd229f3313c7520f196 100644 (file)
@@ -237,37 +237,62 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
 }
 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
 
-static struct dst_ops md_dst_ops = {
-       .family =               AF_UNSPEC,
-};
+struct dst_entry *dst_blackhole_check(struct dst_entry *dst, u32 cookie)
+{
+       return NULL;
+}
 
-static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
+u32 *dst_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old)
 {
-       WARN_ONCE(1, "Attempting to call output on metadata dst\n");
-       kfree_skb(skb);
-       return 0;
+       return NULL;
 }
 
-static int dst_md_discard(struct sk_buff *skb)
+struct neighbour *dst_blackhole_neigh_lookup(const struct dst_entry *dst,
+                                            struct sk_buff *skb,
+                                            const void *daddr)
 {
-       WARN_ONCE(1, "Attempting to call input on metadata dst\n");
-       kfree_skb(skb);
-       return 0;
+       return NULL;
+}
+
+void dst_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
+                              struct sk_buff *skb, u32 mtu,
+                              bool confirm_neigh)
+{
+}
+EXPORT_SYMBOL_GPL(dst_blackhole_update_pmtu);
+
+void dst_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
+                           struct sk_buff *skb)
+{
+}
+EXPORT_SYMBOL_GPL(dst_blackhole_redirect);
+
+unsigned int dst_blackhole_mtu(const struct dst_entry *dst)
+{
+       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+       return mtu ? : dst->dev->mtu;
 }
+EXPORT_SYMBOL_GPL(dst_blackhole_mtu);
+
+static struct dst_ops dst_blackhole_ops = {
+       .family         = AF_UNSPEC,
+       .neigh_lookup   = dst_blackhole_neigh_lookup,
+       .check          = dst_blackhole_check,
+       .cow_metrics    = dst_blackhole_cow_metrics,
+       .update_pmtu    = dst_blackhole_update_pmtu,
+       .redirect       = dst_blackhole_redirect,
+       .mtu            = dst_blackhole_mtu,
+};
 
 static void __metadata_dst_init(struct metadata_dst *md_dst,
                                enum metadata_type type, u8 optslen)
-
 {
        struct dst_entry *dst;
 
        dst = &md_dst->dst;
-       dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
+       dst_init(dst, &dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE,
                 DST_METADATA | DST_NOCOUNT);
-
-       dst->input = dst_md_discard;
-       dst->output = dst_md_discard_out;
-
        memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
        md_dst->type = type;
 }
index adfdad234674dc1031d24b8ae635174fcfbd0dce..9323d34d34cccbf8724bc7b2c937387a2cbcc0a6 100644 (file)
@@ -5658,7 +5658,7 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
        if (unlikely(flags & ~(BPF_MTU_CHK_SEGS)))
                return -EINVAL;
 
-       if (unlikely(flags & BPF_MTU_CHK_SEGS && len_diff))
+       if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len)))
                return -EINVAL;
 
        dev = __dev_via_ifindex(dev, ifindex);
@@ -5668,7 +5668,11 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
        mtu = READ_ONCE(dev->mtu);
 
        dev_len = mtu + dev->hard_header_len;
-       skb_len = skb->len + len_diff; /* minus result pass check */
+
+       /* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
+       skb_len = *mtu_len ? *mtu_len + dev->hard_header_len : skb->len;
+
+       skb_len += len_diff; /* minus result pass check */
        if (skb_len <= dev_len) {
                ret = BPF_MTU_CHK_RET_SUCCESS;
                goto out;
@@ -5713,6 +5717,10 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
        /* Add L2-header as dev MTU is L3 size */
        dev_len = mtu + dev->hard_header_len;
 
+       /* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
+       if (*mtu_len)
+               xdp_len = *mtu_len + dev->hard_header_len;
+
        xdp_len += len_diff; /* minus result pass check */
        if (xdp_len > dev_len)
                ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
index 2ef2224b3bff784dc5d8edbe110cc0b08096ee78..a96a4f5de0ce2538ae680354e7f31226a5eb67fa 100644 (file)
@@ -176,7 +176,7 @@ void skb_flow_get_icmp_tci(const struct sk_buff *skb,
         * avoid confusion with packets without such field
         */
        if (icmp_has_id(ih->type))
-               key_icmp->id = ih->un.echo.id ? : 1;
+               key_icmp->id = ih->un.echo.id ? ntohs(ih->un.echo.id) : 1;
        else
                key_icmp->id = 0;
 }
index 0ed98f20448a29b2d4eb73981b229cdda315c51b..cc31b601ae103387fdcdc83713f31b726ec25fc7 100644 (file)
@@ -3440,6 +3440,32 @@ static void tw_prot_cleanup(struct timewait_sock_ops *twsk_prot)
        twsk_prot->twsk_slab = NULL;
 }
 
+static int tw_prot_init(const struct proto *prot)
+{
+       struct timewait_sock_ops *twsk_prot = prot->twsk_prot;
+
+       if (!twsk_prot)
+               return 0;
+
+       twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s",
+                                             prot->name);
+       if (!twsk_prot->twsk_slab_name)
+               return -ENOMEM;
+
+       twsk_prot->twsk_slab =
+               kmem_cache_create(twsk_prot->twsk_slab_name,
+                                 twsk_prot->twsk_obj_size, 0,
+                                 SLAB_ACCOUNT | prot->slab_flags,
+                                 NULL);
+       if (!twsk_prot->twsk_slab) {
+               pr_crit("%s: Can't create timewait sock SLAB cache!\n",
+                       prot->name);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
 static void req_prot_cleanup(struct request_sock_ops *rsk_prot)
 {
        if (!rsk_prot)
@@ -3496,22 +3522,8 @@ int proto_register(struct proto *prot, int alloc_slab)
                if (req_prot_init(prot))
                        goto out_free_request_sock_slab;
 
-               if (prot->twsk_prot != NULL) {
-                       prot->twsk_prot->twsk_slab_name = kasprintf(GFP_KERNEL, "tw_sock_%s", prot->name);
-
-                       if (prot->twsk_prot->twsk_slab_name == NULL)
-                               goto out_free_request_sock_slab;
-
-                       prot->twsk_prot->twsk_slab =
-                               kmem_cache_create(prot->twsk_prot->twsk_slab_name,
-                                                 prot->twsk_prot->twsk_obj_size,
-                                                 0,
-                                                 SLAB_ACCOUNT |
-                                                 prot->slab_flags,
-                                                 NULL);
-                       if (prot->twsk_prot->twsk_slab == NULL)
-                               goto out_free_timewait_sock_slab;
-               }
+               if (tw_prot_init(prot))
+                       goto out_free_timewait_sock_slab;
        }
 
        mutex_lock(&proto_list_mutex);
index 1f73603913f5aeacdcb2554ed695e51262c0b2ba..2be5c69824f94423e2f90b97938dd623a4fea9c7 100644 (file)
@@ -319,6 +319,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (!ipv6_unicast_destination(skb))
                return 0;       /* discard, don't send a reset here */
 
+       if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
+               __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
+               return 0;
+       }
+
        if (dccp_bad_service_code(sk, service)) {
                dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
                goto drop;
index 4d4956ed303b096e685b499c534128be8c5a91a9..d142eb2b288b3d9a28934b5a10f43ae5c363d961 100644 (file)
@@ -1066,6 +1066,7 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
 {
        struct dsa_switch *ds = dp->ds;
        struct dsa_switch_tree *dst = ds->dst;
+       const struct dsa_device_ops *tag_ops;
        enum dsa_tag_protocol tag_protocol;
 
        tag_protocol = dsa_get_tag_protocol(dp, master);
@@ -1080,14 +1081,16 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
                 * nothing to do here.
                 */
        } else {
-               dst->tag_ops = dsa_tag_driver_get(tag_protocol);
-               if (IS_ERR(dst->tag_ops)) {
-                       if (PTR_ERR(dst->tag_ops) == -ENOPROTOOPT)
+               tag_ops = dsa_tag_driver_get(tag_protocol);
+               if (IS_ERR(tag_ops)) {
+                       if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
                                return -EPROBE_DEFER;
                        dev_warn(ds->dev, "No tagger for this switch\n");
                        dp->master = NULL;
-                       return PTR_ERR(dst->tag_ops);
+                       return PTR_ERR(tag_ops);
                }
+
+               dst->tag_ops = tag_ops;
        }
 
        dp->master = master;
index 6bd7ca09af03dd5385096f749cf05afecb4b7795..fd472eae4f5ca65f8a9722f4b3d156bd44f3a142 100644 (file)
@@ -705,12 +705,15 @@ static bool reqsk_queue_unlink(struct request_sock *req)
        return found;
 }
 
-void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
+bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
 {
-       if (reqsk_queue_unlink(req)) {
+       bool unlinked = reqsk_queue_unlink(req);
+
+       if (unlinked) {
                reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
                reqsk_put(req);
        }
+       return unlinked;
 }
 EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
 
index 47db1bfdaaa0a0c8e151ab927d87b50c49772a31..bc2f6ca971520e39d3d18299b80454cc949fd892 100644 (file)
@@ -309,7 +309,7 @@ have_carrier:
  */
 static void __init ic_close_devs(void)
 {
-       struct net_device *selected_dev = ic_dev->dev;
+       struct net_device *selected_dev = ic_dev ? ic_dev->dev : NULL;
        struct ic_device *d, *next;
        struct net_device *dev;
 
@@ -317,16 +317,18 @@ static void __init ic_close_devs(void)
        next = ic_first_dev;
        while ((d = next)) {
                bool bring_down = (d != ic_dev);
-               struct net_device *lower_dev;
+               struct net_device *lower;
                struct list_head *iter;
 
                next = d->next;
                dev = d->dev;
 
-               netdev_for_each_lower_dev(selected_dev, lower_dev, iter) {
-                       if (dev == lower_dev) {
-                               bring_down = false;
-                               break;
+               if (selected_dev) {
+                       netdev_for_each_lower_dev(selected_dev, lower, iter) {
+                               if (dev == lower) {
+                                       bring_down = false;
+                                       break;
+                               }
                        }
                }
                if (bring_down) {
index c576a63d09db1b5412becc51052441a7352f122f..d1e04d2b5170ec0d054764f2f116ef6d818d03ef 100644 (file)
@@ -203,7 +203,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
 
        local_bh_disable();
        addend = xt_write_recseq_begin();
-       private = rcu_access_pointer(table->private);
+       private = READ_ONCE(table->private); /* Address dependency. */
        cpu     = smp_processor_id();
        table_base = private->entries;
        jumpstack  = (struct arpt_entry **)private->jumpstack[cpu];
@@ -649,7 +649,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
 {
        unsigned int countersize;
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
 
        /* We need atomic snapshot of counters: rest doesn't change
         * (other than comefrom, which userspace doesn't care
@@ -673,7 +673,7 @@ static int copy_entries_to_user(unsigned int total_size,
        unsigned int off, num;
        const struct arpt_entry *e;
        struct xt_counters *counters;
-       struct xt_table_info *private = xt_table_get_private_protected(table);
+       struct xt_table_info *private = table->private;
        int ret = 0;
        void *loc_cpu_entry;
 
@@ -807,7 +807,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
        t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
        if (!IS_ERR(t)) {
                struct arpt_getinfo info;
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
 #ifdef CONFIG_COMPAT
                struct xt_table_info tmp;
 
@@ -860,7 +860,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
 
        t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
 
                if (get.size == private->size)
                        ret = copy_entries_to_user(private->size,
@@ -1017,7 +1017,7 @@ static int do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
        }
 
        local_bh_disable();
-       private = xt_table_get_private_protected(t);
+       private = t->private;
        if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
@@ -1330,7 +1330,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
                                       void __user *userptr)
 {
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
        void __user *pos;
        unsigned int size;
        int ret = 0;
@@ -1379,7 +1379,7 @@ static int compat_get_entries(struct net *net,
        xt_compat_lock(NFPROTO_ARP);
        t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
                struct xt_table_info info;
 
                ret = compat_table_info(private, &info);
index e8f6f9d86237635b26b37ef8d473a40a9f88c5b6..f15bc21d730164baf6cd2e8bf982c851685ee3c5 100644 (file)
@@ -258,7 +258,7 @@ ipt_do_table(struct sk_buff *skb,
        WARN_ON(!(table->valid_hooks & (1 << hook)));
        local_bh_disable();
        addend = xt_write_recseq_begin();
-       private = rcu_access_pointer(table->private);
+       private = READ_ONCE(table->private); /* Address dependency. */
        cpu        = smp_processor_id();
        table_base = private->entries;
        jumpstack  = (struct ipt_entry **)private->jumpstack[cpu];
@@ -791,7 +791,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
 {
        unsigned int countersize;
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
 
        /* We need atomic snapshot of counters: rest doesn't change
           (other than comefrom, which userspace doesn't care
@@ -815,7 +815,7 @@ copy_entries_to_user(unsigned int total_size,
        unsigned int off, num;
        const struct ipt_entry *e;
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
        int ret = 0;
        const void *loc_cpu_entry;
 
@@ -964,7 +964,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
        t = xt_request_find_table_lock(net, AF_INET, name);
        if (!IS_ERR(t)) {
                struct ipt_getinfo info;
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
 #ifdef CONFIG_COMPAT
                struct xt_table_info tmp;
 
@@ -1018,7 +1018,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
 
        t = xt_find_table_lock(net, AF_INET, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
                if (get.size == private->size)
                        ret = copy_entries_to_user(private->size,
                                                   t, uptr->entrytable);
@@ -1173,7 +1173,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
        }
 
        local_bh_disable();
-       private = xt_table_get_private_protected(t);
+       private = t->private;
        if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
@@ -1543,7 +1543,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
                            void __user *userptr)
 {
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
        void __user *pos;
        unsigned int size;
        int ret = 0;
@@ -1589,7 +1589,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
        xt_compat_lock(AF_INET);
        t = xt_find_table_lock(net, AF_INET, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
                struct xt_table_info info;
                ret = compat_table_info(private, &info);
                if (!ret && get.size == info.size)
index 02d81d79deeb1a3d8f103f87918d211019940085..bba150fdd265814d5a540be2b37c85d138eea930 100644 (file)
@@ -2687,44 +2687,15 @@ out:
        return rth;
 }
 
-static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
-{
-       return NULL;
-}
-
-static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
-{
-       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
-
-       return mtu ? : dst->dev->mtu;
-}
-
-static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                                         struct sk_buff *skb, u32 mtu,
-                                         bool confirm_neigh)
-{
-}
-
-static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
-                                      struct sk_buff *skb)
-{
-}
-
-static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
-                                         unsigned long old)
-{
-       return NULL;
-}
-
 static struct dst_ops ipv4_dst_blackhole_ops = {
-       .family                 =       AF_INET,
-       .check                  =       ipv4_blackhole_dst_check,
-       .mtu                    =       ipv4_blackhole_mtu,
-       .default_advmss         =       ipv4_default_advmss,
-       .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
-       .redirect               =       ipv4_rt_blackhole_redirect,
-       .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
-       .neigh_lookup           =       ipv4_neigh_lookup,
+       .family                 = AF_INET,
+       .default_advmss         = ipv4_default_advmss,
+       .neigh_lookup           = ipv4_neigh_lookup,
+       .check                  = dst_blackhole_check,
+       .cow_metrics            = dst_blackhole_cow_metrics,
+       .update_pmtu            = dst_blackhole_update_pmtu,
+       .redirect               = dst_blackhole_redirect,
+       .mtu                    = dst_blackhole_mtu,
 };
 
 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
index 0055ae0a3bf840c737f5f61fb1ef683208387e32..7513ba45553dba4de8fc9b85b2c18874f1ecbe22 100644 (file)
@@ -804,8 +804,11 @@ embryonic_reset:
                tcp_reset(sk, skb);
        }
        if (!fastopen) {
-               inet_csk_reqsk_queue_drop(sk, req);
-               __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+               bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
+
+               if (unlinked)
+                       __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
+               *req_stolen = !unlinked;
        }
        return NULL;
 }
index ef9d022e693f047ef2e86e6ec370b1bfcfc28ad1..679699e953f1a4d8bcd455856e96f8d7601f1e6c 100644 (file)
@@ -2486,7 +2486,7 @@ static int ipv6_route_native_seq_show(struct seq_file *seq, void *v)
        const struct net_device *dev;
 
        if (rt->nh)
-               fib6_nh = nexthop_fib6_nh(rt->nh);
+               fib6_nh = nexthop_fib6_nh_bh(rt->nh);
 
        seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen);
 
index e9d2a4a409aabef2d4b76cc6a8ae9d3ce8da0183..80256717868e69766acb3b590680f34a4ed1c0e9 100644 (file)
@@ -245,16 +245,6 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
        if (ipv6_addr_is_multicast(&hdr->saddr))
                goto err;
 
-       /* While RFC4291 is not explicit about v4mapped addresses
-        * in IPv6 headers, it seems clear linux dual-stack
-        * model can not deal properly with these.
-        * Security models could be fooled by ::ffff:127.0.0.1 for example.
-        *
-        * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
-        */
-       if (ipv6_addr_v4mapped(&hdr->saddr))
-               goto err;
-
        skb->transport_header = skb->network_header + sizeof(*hdr);
        IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
 
index 0d453fa9e327bde73da046a941361ce8a0052d35..2e2119bfcf137348e1ee77dcd360267e2ef47d77 100644 (file)
@@ -280,7 +280,7 @@ ip6t_do_table(struct sk_buff *skb,
 
        local_bh_disable();
        addend = xt_write_recseq_begin();
-       private = rcu_access_pointer(table->private);
+       private = READ_ONCE(table->private); /* Address dependency. */
        cpu        = smp_processor_id();
        table_base = private->entries;
        jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
@@ -807,7 +807,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
 {
        unsigned int countersize;
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
 
        /* We need atomic snapshot of counters: rest doesn't change
           (other than comefrom, which userspace doesn't care
@@ -831,7 +831,7 @@ copy_entries_to_user(unsigned int total_size,
        unsigned int off, num;
        const struct ip6t_entry *e;
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
        int ret = 0;
        const void *loc_cpu_entry;
 
@@ -980,7 +980,7 @@ static int get_info(struct net *net, void __user *user, const int *len)
        t = xt_request_find_table_lock(net, AF_INET6, name);
        if (!IS_ERR(t)) {
                struct ip6t_getinfo info;
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
 #ifdef CONFIG_COMPAT
                struct xt_table_info tmp;
 
@@ -1035,7 +1035,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
 
        t = xt_find_table_lock(net, AF_INET6, get.name);
        if (!IS_ERR(t)) {
-               struct xt_table_info *private = xt_table_get_private_protected(t);
+               struct xt_table_info *private = t->private;
                if (get.size == private->size)
                        ret = copy_entries_to_user(private->size,
                                                   t, uptr->entrytable);
@@ -1189,7 +1189,7 @@ do_add_counters(struct net *net, sockptr_t arg, unsigned int len)
        }
 
        local_bh_disable();
-       private = xt_table_get_private_protected(t);
+       private = t->private;
        if (private->number != tmp.num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
@@ -1552,7 +1552,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
                            void __user *userptr)
 {
        struct xt_counters *counters;
-       const struct xt_table_info *private = xt_table_get_private_protected(table);
+       const struct xt_table_info *private = table->private;
        void __user *pos;
        unsigned int size;
        int ret = 0;
@@ -1598,7 +1598,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
        xt_compat_lock(AF_INET6);
        t = xt_find_table_lock(net, AF_INET6, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = xt_table_get_private_protected(t);
+               const struct xt_table_info *private = t->private;
                struct xt_table_info info;
                ret = compat_table_info(private, &info);
                if (!ret && get.size == info.size)
index 1536f4948e86a50b8273573b10a5d8b4d3c5078a..1056b0229ffdc676012f7608eec52da0e752a698 100644 (file)
@@ -260,34 +260,16 @@ static struct dst_ops ip6_dst_ops_template = {
        .confirm_neigh          =       ip6_confirm_neigh,
 };
 
-static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
-{
-       unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
-
-       return mtu ? : dst->dev->mtu;
-}
-
-static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
-                                        struct sk_buff *skb, u32 mtu,
-                                        bool confirm_neigh)
-{
-}
-
-static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
-                                     struct sk_buff *skb)
-{
-}
-
 static struct dst_ops ip6_dst_blackhole_ops = {
-       .family                 =       AF_INET6,
-       .destroy                =       ip6_dst_destroy,
-       .check                  =       ip6_dst_check,
-       .mtu                    =       ip6_blackhole_mtu,
-       .default_advmss         =       ip6_default_advmss,
-       .update_pmtu            =       ip6_rt_blackhole_update_pmtu,
-       .redirect               =       ip6_rt_blackhole_redirect,
-       .cow_metrics            =       dst_cow_metrics_generic,
-       .neigh_lookup           =       ip6_dst_neigh_lookup,
+       .family                 = AF_INET6,
+       .default_advmss         = ip6_default_advmss,
+       .neigh_lookup           = ip6_dst_neigh_lookup,
+       .check                  = ip6_dst_check,
+       .destroy                = ip6_dst_destroy,
+       .cow_metrics            = dst_cow_metrics_generic,
+       .update_pmtu            = dst_blackhole_update_pmtu,
+       .redirect               = dst_blackhole_redirect,
+       .mtu                    = dst_blackhole_mtu,
 };
 
 static const u32 ip6_template_metrics[RTAX_MAX] = {
index bd44ded7e50c19d393433a9b7cd18384677f746f..d0f007741e8ed3638999823ca0637f870a3a7c2c 100644 (file)
@@ -1175,6 +1175,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (!ipv6_unicast_destination(skb))
                goto drop;
 
+       if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
+               __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
+               return 0;
+       }
+
        return tcp_conn_request(&tcp6_request_sock_ops,
                                &tcp_request_sock_ipv6_ops, sk, skb);
 
index d7b3d905d5353425574230a1731acd15049b2c5d..b00d6f5b33f40ca4cc3ba6d237020f0515013676 100644 (file)
@@ -23,6 +23,7 @@ int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
        struct aead_request *aead_req;
        int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
        u8 *__aad;
+       int ret;
 
        aead_req = kzalloc(reqsize + aad_len, GFP_ATOMIC);
        if (!aead_req)
@@ -40,10 +41,10 @@ int aead_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
        aead_request_set_crypt(aead_req, sg, sg, data_len, b_0);
        aead_request_set_ad(aead_req, sg[0].length);
 
-       crypto_aead_encrypt(aead_req);
+       ret = crypto_aead_encrypt(aead_req);
        kfree_sensitive(aead_req);
 
-       return 0;
+       return ret;
 }
 
 int aead_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad, size_t aad_len,
index 6f3b3a0cc10a4eb88f17fc5811459722a80608f8..512cab073f2e8ed9ff7bbf5c0d339e5240c77770 100644 (file)
@@ -22,6 +22,7 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
        struct aead_request *aead_req;
        int reqsize = sizeof(*aead_req) + crypto_aead_reqsize(tfm);
        const __le16 *fc;
+       int ret;
 
        if (data_len < GMAC_MIC_LEN)
                return -EINVAL;
@@ -59,10 +60,10 @@ int ieee80211_aes_gmac(struct crypto_aead *tfm, const u8 *aad, u8 *nonce,
        aead_request_set_crypt(aead_req, sg, sg, 0, iv);
        aead_request_set_ad(aead_req, GMAC_AAD_LEN + data_len);
 
-       crypto_aead_encrypt(aead_req);
+       ret = crypto_aead_encrypt(aead_req);
        kfree_sensitive(aead_req);
 
-       return 0;
+       return ret;
 }
 
 struct crypto_aead *ieee80211_aes_gmac_key_setup(const u8 key[],
index c4c70e30ad7f02644db03978a3f7ff5dc1687e05..68a0de02b56185f76d43ac549e4f5317f739020c 100644 (file)
@@ -2950,14 +2950,14 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
                        continue;
 
                for (j = 0; j < IEEE80211_HT_MCS_MASK_LEN; j++) {
-                       if (~sdata->rc_rateidx_mcs_mask[i][j]) {
+                       if (sdata->rc_rateidx_mcs_mask[i][j] != 0xff) {
                                sdata->rc_has_mcs_mask[i] = true;
                                break;
                        }
                }
 
                for (j = 0; j < NL80211_VHT_NSS_MAX; j++) {
-                       if (~sdata->rc_rateidx_vht_mcs_mask[i][j]) {
+                       if (sdata->rc_rateidx_vht_mcs_mask[i][j] != 0xffff) {
                                sdata->rc_has_vht_mcs_mask[i] = true;
                                break;
                        }
index 1f552f374e97d2e581375dc07b73f2f5c9c30c8f..a7ac53a2f00d88bb874d69f3ae8db538116c1c2d 100644 (file)
@@ -1874,6 +1874,8 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
 
        /* remove beacon */
        kfree(sdata->u.ibss.ie);
+       sdata->u.ibss.ie = NULL;
+       sdata->u.ibss.ie_len = 0;
 
        /* on the next join, re-program HT parameters */
        memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
index 4f3f8bb58e76cfa44d8545ac0e004c5e60af96cd..1b9c82616606b8851b2da8a43b4f2c6ec0c5b49d 100644 (file)
@@ -973,8 +973,19 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
                        continue;
 
                if (!dflt_chandef.chan) {
+                       /*
+                        * Assign the first enabled channel to dflt_chandef
+                        * from the list of channels
+                        */
+                       for (i = 0; i < sband->n_channels; i++)
+                               if (!(sband->channels[i].flags &
+                                               IEEE80211_CHAN_DISABLED))
+                                       break;
+                       /* if none found then use the first anyway */
+                       if (i == sband->n_channels)
+                               i = 0;
                        cfg80211_chandef_create(&dflt_chandef,
-                                               &sband->channels[0],
+                                               &sband->channels[i],
                                                NL80211_CHAN_NO_HT);
                        /* init channel we're on */
                        if (!local->use_chanctx && !local->_oper_chandef.chan) {
index 2e33a1263518d761dfa6af17a1fe2fd979fdd12c..ce4e3855fec18b801febaa0b23bf233397569196 100644 (file)
@@ -5071,7 +5071,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
                he_oper_ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_OPERATION,
                                                  ies->data, ies->len);
                if (he_oper_ie &&
-                   he_oper_ie[1] == ieee80211_he_oper_size(&he_oper_ie[3]))
+                   he_oper_ie[1] >= ieee80211_he_oper_size(&he_oper_ie[3]))
                        he_oper = (void *)(he_oper_ie + 3);
                else
                        he_oper = NULL;
index 2f44f49197892d9d28e591037b55cea0c96399dd..ecad9b10984ffd4ea5c4d540647186aebbcb0c6a 100644 (file)
@@ -805,7 +805,6 @@ minstrel_ht_group_min_rate_offset(struct minstrel_ht_sta *mi, int group,
 static u16
 minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur)
 {
-       struct minstrel_mcs_group_data *mg;
        u8 type = MINSTREL_SAMPLE_TYPE_INC;
        int i, index = 0;
        u8 group;
@@ -813,7 +812,6 @@ minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur)
        group = mi->sample[type].sample_group;
        for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
                group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups);
-               mg = &mi->groups[group];
 
                index = minstrel_ht_group_min_rate_offset(mi, group,
                                                          fast_rate_dur);
index f080fcf60e453549df52e151d5ceacc5e1bcd7ba..c0fa526a45b4d25a95a159bd88b2c6d4ef9dfc9e 100644 (file)
@@ -968,7 +968,7 @@ static void ieee80211_parse_extension_element(u32 *crc,
                break;
        case WLAN_EID_EXT_HE_OPERATION:
                if (len >= sizeof(*elems->he_operation) &&
-                   len == ieee80211_he_oper_size(data) - 1) {
+                   len >= ieee80211_he_oper_size(data) - 1) {
                        if (crc)
                                *crc = crc32_be(*crc, (void *)elem,
                                                elem->datalen + 2);
index 444a38681e93e4362b4f4fa600daf0e475d0faa0..89a4225ed3211f6c2a8b03ef3664c4413f5663cc 100644 (file)
@@ -567,15 +567,15 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
 }
 
 static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id,
-                                 struct in_addr *addr)
+                                 struct in_addr *addr, u16 port)
 {
        u8 hmac[SHA256_DIGEST_SIZE];
        u8 msg[7];
 
        msg[0] = addr_id;
        memcpy(&msg[1], &addr->s_addr, 4);
-       msg[5] = 0;
-       msg[6] = 0;
+       msg[5] = port >> 8;
+       msg[6] = port & 0xFF;
 
        mptcp_crypto_hmac_sha(key1, key2, msg, 7, hmac);
 
@@ -584,15 +584,15 @@ static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id,
 
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
 static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id,
-                                  struct in6_addr *addr)
+                                  struct in6_addr *addr, u16 port)
 {
        u8 hmac[SHA256_DIGEST_SIZE];
        u8 msg[19];
 
        msg[0] = addr_id;
        memcpy(&msg[1], &addr->s6_addr, 16);
-       msg[17] = 0;
-       msg[18] = 0;
+       msg[17] = port >> 8;
+       msg[18] = port & 0xFF;
 
        mptcp_crypto_hmac_sha(key1, key2, msg, 19, hmac);
 
@@ -646,7 +646,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
                        opts->ahmac = add_addr_generate_hmac(msk->local_key,
                                                             msk->remote_key,
                                                             opts->addr_id,
-                                                            &opts->addr);
+                                                            &opts->addr,
+                                                            opts->port);
                }
        }
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
@@ -657,7 +658,8 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
                        opts->ahmac = add_addr6_generate_hmac(msk->local_key,
                                                              msk->remote_key,
                                                              opts->addr_id,
-                                                             &opts->addr6);
+                                                             &opts->addr6,
+                                                             opts->port);
                }
        }
 #endif
@@ -962,12 +964,14 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
        if (mp_opt->family == MPTCP_ADDR_IPVERSION_4)
                hmac = add_addr_generate_hmac(msk->remote_key,
                                              msk->local_key,
-                                             mp_opt->addr_id, &mp_opt->addr);
+                                             mp_opt->addr_id, &mp_opt->addr,
+                                             mp_opt->port);
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
        else
                hmac = add_addr6_generate_hmac(msk->remote_key,
                                               msk->local_key,
-                                              mp_opt->addr_id, &mp_opt->addr6);
+                                              mp_opt->addr_id, &mp_opt->addr6,
+                                              mp_opt->port);
 #endif
 
        pr_debug("msk=%p, ahmac=%llu, mp_opt->ahmac=%llu\n",
index 76958570ae7f258a0276e423bb7ddbbd0a200224..1590b9d4cde28f0e6acdd04d7bf2a09ad6a0da8a 100644 (file)
@@ -2968,7 +2968,7 @@ static void mptcp_release_cb(struct sock *sk)
        for (;;) {
                flags = 0;
                if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags))
-                       flags |= MPTCP_PUSH_PENDING;
+                       flags |= BIT(MPTCP_PUSH_PENDING);
                if (!flags)
                        break;
 
@@ -2981,7 +2981,7 @@ static void mptcp_release_cb(struct sock *sk)
                 */
 
                spin_unlock_bh(&sk->sk_lock.slock);
-               if (flags & MPTCP_PUSH_PENDING)
+               if (flags & BIT(MPTCP_PUSH_PENDING))
                        __mptcp_push_pending(sk, 0);
 
                cond_resched();
index 3d47d670e66546f2f4cd4be7588cead55687be06..d17d39ccdf34bbd14d7f79b2c9b90173ce25f06f 100644 (file)
@@ -477,6 +477,11 @@ static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (!ipv6_unicast_destination(skb))
                goto drop;
 
+       if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
+               __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
+               return 0;
+       }
+
        return tcp_conn_request(&mptcp_subflow_request_sock_ops,
                                &subflow_request_sock_ipv6_ops, sk, skb);
 
index 1469365bac7e4ea21d0df9307d2a2a1a17b1a4cf..1d519b0e51a5de33dec47c75f9b3e9b12f7830d8 100644 (file)
@@ -2962,6 +2962,7 @@ static int ctnetlink_exp_dump_mask(struct sk_buff *skb,
        memset(&m, 0xFF, sizeof(m));
        memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
        m.src.u.all = mask->src.u.all;
+       m.src.l3num = tuple->src.l3num;
        m.dst.protonum = tuple->dst.protonum;
 
        nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK);
index 5b05487a60d21cc019d0e9709e0aff9b67bf586a..db11e403d81874e4eff320a604ed6f7a18231a8b 100644 (file)
@@ -218,9 +218,6 @@ int nf_conntrack_gre_packet(struct nf_conn *ct,
                            enum ip_conntrack_info ctinfo,
                            const struct nf_hook_state *state)
 {
-       if (state->pf != NFPROTO_IPV4)
-               return -NF_ACCEPT;
-
        if (!nf_ct_is_confirmed(ct)) {
                unsigned int *timeouts = nf_ct_timeout_lookup(ct);
 
index 5fa657b8e03dff15a2fa828f22b09a364e7d5887..c77ba8690ed8d881211d713a604d95fc19f13209 100644 (file)
@@ -506,7 +506,7 @@ int nf_flow_table_init(struct nf_flowtable *flowtable)
 {
        int err;
 
-       INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
+       INIT_DELAYED_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
        flow_block_init(&flowtable->flow_block);
        init_rwsem(&flowtable->flow_block_lock);
 
index 224c8e537cb3357200da964ac29e336cf10798a4..f57f1a6ba96f6c6479ffa696ecd89d277223302f 100644 (file)
@@ -6783,6 +6783,9 @@ static int nft_register_flowtable_net_hooks(struct net *net,
 
        list_for_each_entry(hook, hook_list, list) {
                list_for_each_entry(ft, &table->flowtables, list) {
+                       if (!nft_is_active_next(net, ft))
+                               continue;
+
                        list_for_each_entry(hook2, &ft->hook_list, list) {
                                if (hook->ops.dev == hook2->ops.dev &&
                                    hook->ops.pf == hook2->ops.pf) {
@@ -6842,6 +6845,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
        struct nft_hook *hook, *next;
        struct nft_trans *trans;
        bool unregister = false;
+       u32 flags;
        int err;
 
        err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
@@ -6856,6 +6860,17 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
                }
        }
 
+       if (nla[NFTA_FLOWTABLE_FLAGS]) {
+               flags = ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
+               if (flags & ~NFT_FLOWTABLE_MASK)
+                       return -EOPNOTSUPP;
+               if ((flowtable->data.flags & NFT_FLOWTABLE_HW_OFFLOAD) ^
+                   (flags & NFT_FLOWTABLE_HW_OFFLOAD))
+                       return -EOPNOTSUPP;
+       } else {
+               flags = flowtable->data.flags;
+       }
+
        err = nft_register_flowtable_net_hooks(ctx->net, ctx->table,
                                               &flowtable_hook.list, flowtable);
        if (err < 0)
@@ -6869,6 +6884,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
                goto err_flowtable_update_hook;
        }
 
+       nft_trans_flowtable_flags(trans) = flags;
        nft_trans_flowtable(trans) = flowtable;
        nft_trans_flowtable_update(trans) = true;
        INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans));
@@ -6963,8 +6979,10 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
        if (nla[NFTA_FLOWTABLE_FLAGS]) {
                flowtable->data.flags =
                        ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
-               if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK)
+               if (flowtable->data.flags & ~NFT_FLOWTABLE_MASK) {
+                       err = -EOPNOTSUPP;
                        goto err3;
+               }
        }
 
        write_pnet(&flowtable->data.net, net);
@@ -8176,6 +8194,8 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                        break;
                case NFT_MSG_NEWFLOWTABLE:
                        if (nft_trans_flowtable_update(trans)) {
+                               nft_trans_flowtable(trans)->data.flags =
+                                       nft_trans_flowtable_flags(trans);
                                nf_tables_flowtable_notify(&trans->ctx,
                                                           nft_trans_flowtable(trans),
                                                           &nft_trans_flowtable_hooks(trans),
index bce6ca203d46242aaa993ae5e485af1556e824de..6bd31a7a27fc5856271e8405d638a5aa33441c9a 100644 (file)
@@ -1351,14 +1351,6 @@ struct xt_counters *xt_counters_alloc(unsigned int counters)
 }
 EXPORT_SYMBOL(xt_counters_alloc);
 
-struct xt_table_info
-*xt_table_get_private_protected(const struct xt_table *table)
-{
-       return rcu_dereference_protected(table->private,
-                                        mutex_is_locked(&xt[table->af].mutex));
-}
-EXPORT_SYMBOL(xt_table_get_private_protected);
-
 struct xt_table_info *
 xt_replace_table(struct xt_table *table,
              unsigned int num_counters,
@@ -1366,6 +1358,7 @@ xt_replace_table(struct xt_table *table,
              int *error)
 {
        struct xt_table_info *private;
+       unsigned int cpu;
        int ret;
 
        ret = xt_jumpstack_alloc(newinfo);
@@ -1375,20 +1368,47 @@ xt_replace_table(struct xt_table *table,
        }
 
        /* Do the substitution. */
-       private = xt_table_get_private_protected(table);
+       local_bh_disable();
+       private = table->private;
 
        /* Check inside lock: is the old number correct? */
        if (num_counters != private->number) {
                pr_debug("num_counters != table->private->number (%u/%u)\n",
                         num_counters, private->number);
+               local_bh_enable();
                *error = -EAGAIN;
                return NULL;
        }
 
        newinfo->initial_entries = private->initial_entries;
+       /*
+        * Ensure contents of newinfo are visible before assigning to
+        * private.
+        */
+       smp_wmb();
+       table->private = newinfo;
+
+       /* make sure all cpus see new ->private value */
+       smp_mb();
 
-       rcu_assign_pointer(table->private, newinfo);
-       synchronize_rcu();
+       /*
+        * Even though table entries have now been swapped, other CPU's
+        * may still be using the old entries...
+        */
+       local_bh_enable();
+
+       /* ... so wait for even xt_recseq on all cpus */
+       for_each_possible_cpu(cpu) {
+               seqcount_t *s = &per_cpu(xt_recseq, cpu);
+               u32 seq = raw_read_seqcount(s);
+
+               if (seq & 1) {
+                       do {
+                               cond_resched();
+                               cpu_relax();
+                       } while (seq == raw_read_seqcount(s));
+               }
+       }
 
        audit_log_nfcfg(table->name, table->af, private->number,
                        !private->number ? AUDIT_XT_OP_REGISTER :
@@ -1424,12 +1444,12 @@ struct xt_table *xt_register_table(struct net *net,
        }
 
        /* Simplifies replace_table code. */
-       rcu_assign_pointer(table->private, bootstrap);
+       table->private = bootstrap;
 
        if (!xt_replace_table(table, 0, newinfo, &ret))
                goto unlock;
 
-       private = xt_table_get_private_protected(table);
+       private = table->private;
        pr_debug("table->private->number = %u\n", private->number);
 
        /* save number of initial entries */
@@ -1452,8 +1472,7 @@ void *xt_unregister_table(struct xt_table *table)
        struct xt_table_info *private;
 
        mutex_lock(&xt[table->af].mutex);
-       private = xt_table_get_private_protected(table);
-       RCU_INIT_POINTER(table->private, NULL);
+       private = table->private;
        list_del(&table->list);
        mutex_unlock(&xt[table->af].mutex);
        audit_log_nfcfg(table->name, table->af, private->number,
index 5eddfe7bd3910a0f4cc47d3daa01bbabd6bbe32e..71cec03e861249209c1cb3f20ca09be2f300091b 100644 (file)
@@ -271,9 +271,11 @@ static void ovs_ct_update_key(const struct sk_buff *skb,
 /* This is called to initialize CT key fields possibly coming in from the local
  * stack.
  */
-void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key)
+void ovs_ct_fill_key(const struct sk_buff *skb,
+                    struct sw_flow_key *key,
+                    bool post_ct)
 {
-       ovs_ct_update_key(skb, NULL, key, false, false);
+       ovs_ct_update_key(skb, NULL, key, post_ct, false);
 }
 
 int ovs_ct_put_key(const struct sw_flow_key *swkey,
@@ -1332,7 +1334,7 @@ int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key)
        if (skb_nfct(skb)) {
                nf_conntrack_put(skb_nfct(skb));
                nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
-               ovs_ct_fill_key(skb, key);
+               ovs_ct_fill_key(skb, key, false);
        }
 
        return 0;
index 59dc32761b91fab84ea0bf3f21a172725bc42a48..317e525c8a113f65785580fd4c503faad330344e 100644 (file)
@@ -25,7 +25,8 @@ int ovs_ct_execute(struct net *, struct sk_buff *, struct sw_flow_key *,
                   const struct ovs_conntrack_info *);
 int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key);
 
-void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key);
+void ovs_ct_fill_key(const struct sk_buff *skb, struct sw_flow_key *key,
+                    bool post_ct);
 int ovs_ct_put_key(const struct sw_flow_key *swkey,
                   const struct sw_flow_key *output, struct sk_buff *skb);
 void ovs_ct_free_action(const struct nlattr *a);
@@ -74,7 +75,8 @@ static inline int ovs_ct_clear(struct sk_buff *skb,
 }
 
 static inline void ovs_ct_fill_key(const struct sk_buff *skb,
-                                  struct sw_flow_key *key)
+                                  struct sw_flow_key *key,
+                                  bool post_ct)
 {
        key->ct_state = 0;
        key->ct_zone = 0;
index c7f34d6a9934a8b756a9ab9e6feec1c0149b0050..e586424d8b04a377d7b1b3649f95168771fd6feb 100644 (file)
@@ -857,6 +857,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
        struct tc_skb_ext *tc_ext;
 #endif
+       bool post_ct = false;
        int res, err;
 
        /* Extract metadata from packet. */
@@ -895,6 +896,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
                tc_ext = skb_ext_find(skb, TC_SKB_EXT);
                key->recirc_id = tc_ext ? tc_ext->chain : 0;
                OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0;
+               post_ct = tc_ext ? tc_ext->post_ct : false;
        } else {
                key->recirc_id = 0;
        }
@@ -904,7 +906,7 @@ int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
 
        err = key_extract(skb, key);
        if (!err)
-               ovs_ct_fill_key(skb, key);   /* Must be after key_extract(). */
+               ovs_ct_fill_key(skb, key, post_ct);   /* Must be after key_extract(). */
        return err;
 }
 
index edb6ac17cecabd94fe392eb4f589dbbbf7bfa2c0..dfc820ee553a0948cc64f25f5b8f9c5d0061cfd4 100644 (file)
@@ -1058,6 +1058,11 @@ static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
        rc = copied;
 
        if (addr) {
+               /* There is an anonymous 2-byte hole after sq_family,
+                * make sure to clear it.
+                */
+               memset(addr, 0, sizeof(*addr));
+
                addr->sq_family = AF_QIPCRTR;
                addr->sq_node = cb->src_node;
                addr->sq_port = cb->src_port;
index f0a0aa125b00ad9e34725daf0ce4457d2d2ec32c..16e888a9601dd18c7b38c6ae72494d1aa975a37e 100644 (file)
@@ -945,13 +945,14 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
        tcf_lastuse_update(&c->tcf_tm);
 
        if (clear) {
+               qdisc_skb_cb(skb)->post_ct = false;
                ct = nf_ct_get(skb, &ctinfo);
                if (ct) {
                        nf_conntrack_put(&ct->ct_general);
                        nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
                }
 
-               goto out;
+               goto out_clear;
        }
 
        family = tcf_ct_skb_nf_family(skb);
@@ -1030,8 +1031,9 @@ out_push:
        skb_push_rcsum(skb, nh_ofs);
 
 out:
-       tcf_action_update_bstats(&c->common, skb);
        qdisc_skb_cb(skb)->post_ct = true;
+out_clear:
+       tcf_action_update_bstats(&c->common, skb);
        if (defrag)
                qdisc_skb_cb(skb)->pkt_len = skb->len;
        return retval;
index e37556cc37ab6d1ad6e163b2579aabf3f2ece446..13341e7fb077c481d66cbb028fc026746e81aa3e 100644 (file)
@@ -1629,6 +1629,7 @@ int tcf_classify_ingress(struct sk_buff *skb,
                        return TC_ACT_SHOT;
                ext->chain = last_executed_chain;
                ext->mru = qdisc_skb_cb(skb)->mru;
+               ext->post_ct = qdisc_skb_cb(skb)->post_ct;
        }
 
        return ret;
index d097b5c15faa5da51e850281ebcc39f04c5c2788..c69a4ba9c33fdf416064ed3989a1b0424b515637 100644 (file)
@@ -1451,7 +1451,7 @@ static int fl_set_key_ct(struct nlattr **tb,
                               &mask->ct_state, TCA_FLOWER_KEY_CT_STATE_MASK,
                               sizeof(key->ct_state));
 
-               err = fl_validate_ct_state(mask->ct_state,
+               err = fl_validate_ct_state(key->ct_state & mask->ct_state,
                                           tb[TCA_FLOWER_KEY_CT_STATE_MASK],
                                           extack);
                if (err)
index 50f680f03a547ebe952eae9657b40d4de0fc5c9f..2adbd945bf15aee182bc1505ec54b9695d104d52 100644 (file)
@@ -345,6 +345,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
        struct sk_buff **old = NULL;
        unsigned int mask;
        u32 max_P;
+       u8 *stab;
 
        if (opt == NULL)
                return -EINVAL;
@@ -361,8 +362,8 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
        max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
 
        ctl = nla_data(tb[TCA_CHOKE_PARMS]);
-
-       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
+       stab = nla_data(tb[TCA_CHOKE_STAB]);
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
                return -EINVAL;
 
        if (ctl->limit > CHOKE_MAX_QUEUE)
@@ -412,7 +413,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
 
        red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
                      ctl->Plog, ctl->Scell_log,
-                     nla_data(tb[TCA_CHOKE_STAB]),
+                     stab,
                      max_P);
        red_set_vars(&q->vars);
 
index e0bc77533acc39d90dbbffc88241ea3063e27d3f..f4132dc25ac05bf276c22e8ab967541fbf898463 100644 (file)
@@ -480,7 +480,7 @@ static inline int gred_change_vq(struct Qdisc *sch, int dp,
        struct gred_sched *table = qdisc_priv(sch);
        struct gred_sched_data *q = table->tab[dp];
 
-       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log)) {
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
                NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
                return -EINVAL;
        }
index dff3adf5a9156c2412c64a10ad1b2ce9e1367433..62e12cb41a3e1c0b2403552eb05128e4d3ac50b6 100644 (file)
@@ -1020,6 +1020,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
        struct nlattr *tb[TCA_HTB_MAX + 1];
        struct tc_htb_glob *gopt;
        unsigned int ntx;
+       bool offload;
        int err;
 
        qdisc_watchdog_init(&q->watchdog, sch);
@@ -1044,9 +1045,9 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
        if (gopt->version != HTB_VER >> 16)
                return -EINVAL;
 
-       q->offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
+       offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
 
-       if (q->offload) {
+       if (offload) {
                if (sch->parent != TC_H_ROOT)
                        return -EOPNOTSUPP;
 
@@ -1076,7 +1077,7 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
                q->rate2quantum = 1;
        q->defcls = gopt->defcls;
 
-       if (!q->offload)
+       if (!offload)
                return 0;
 
        for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
@@ -1107,12 +1108,14 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt,
        if (err)
                goto err_free_qdiscs;
 
+       /* Defer this assignment, so that htb_destroy skips offload-related
+        * parts (especially calling ndo_setup_tc) on errors.
+        */
+       q->offload = true;
+
        return 0;
 
 err_free_qdiscs:
-       /* TC_HTB_CREATE call failed, avoid any further calls to the driver. */
-       q->offload = false;
-
        for (ntx = 0; ntx < q->num_direct_qdiscs && q->direct_qdiscs[ntx];
             ntx++)
                qdisc_put(q->direct_qdiscs[ntx]);
@@ -1340,8 +1343,12 @@ htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
 {
        struct net_device *dev = qdisc_dev(sch);
        struct tc_htb_qopt_offload offload_opt;
+       struct htb_sched *q = qdisc_priv(sch);
        int err;
 
+       if (!q->offload)
+               return sch->dev_queue;
+
        offload_opt = (struct tc_htb_qopt_offload) {
                .command = TC_HTB_LEAF_QUERY_QUEUE,
                .classid = TC_H_MIN(tcm->tcm_parent),
index b4ae34d7aa965decf541660ee195df92f646f799..40adf1f07a82dfdb8f704eecfa9a14f000213a91 100644 (file)
@@ -242,6 +242,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
        unsigned char flags;
        int err;
        u32 max_P;
+       u8 *stab;
 
        if (tb[TCA_RED_PARMS] == NULL ||
            tb[TCA_RED_STAB] == NULL)
@@ -250,7 +251,9 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
        max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
 
        ctl = nla_data(tb[TCA_RED_PARMS]);
-       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log))
+       stab = nla_data(tb[TCA_RED_STAB]);
+       if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
+                             ctl->Scell_log, stab))
                return -EINVAL;
 
        err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
@@ -288,7 +291,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
        red_set_parms(&q->parms,
                      ctl->qth_min, ctl->qth_max, ctl->Wlog,
                      ctl->Plog, ctl->Scell_log,
-                     nla_data(tb[TCA_RED_STAB]),
+                     stab,
                      max_P);
        red_set_vars(&q->vars);
 
index b25e51440623bce56628ff33142c63739c4ac239..066754a18569ba8f8a295f14049b06dfbeb4c7c6 100644 (file)
@@ -647,7 +647,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        }
 
        if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
-                                       ctl_v1->Wlog, ctl_v1->Scell_log))
+                                       ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
                return -EINVAL;
        if (ctl_v1 && ctl_v1->qth_min) {
                p = kmalloc(sizeof(*p), GFP_KERNEL);
index 6614c9fdc51e581d343f0d6e4adbbe8a1385b0cf..a6aa17df09efbe6c3e88652ae9f3c242398af389 100644 (file)
@@ -584,13 +584,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
                goto out;
        }
 
-       rcu_read_lock();
-       if (__sk_dst_get(sk) != tp->dst) {
-               dst_hold(tp->dst);
-               sk_setup_caps(sk, tp->dst);
-       }
-       rcu_read_unlock();
-
        /* pack up chunks */
        pkt_count = sctp_packet_pack(packet, head, gso, gfp);
        if (!pkt_count) {
index 3fd06a27105ddbfdaf9fc288558d57e471572341..5cb1aa5f067bcae7f4b74fd59927dd968c092785 100644 (file)
@@ -1135,6 +1135,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
 
 static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
 {
+       struct sock *sk = ctx->asoc->base.sk;
        struct list_head *ltransport;
        struct sctp_packet *packet;
        struct sctp_transport *t;
@@ -1144,6 +1145,12 @@ static void sctp_outq_flush_transports(struct sctp_flush_ctx *ctx)
                t = list_entry(ltransport, struct sctp_transport, send_ready);
                packet = &t->packet;
                if (!sctp_packet_empty(packet)) {
+                       rcu_read_lock();
+                       if (t->dst && __sk_dst_get(sk) != t->dst) {
+                               dst_hold(t->dst);
+                               sk_setup_caps(sk, t->dst);
+                       }
+                       rcu_read_unlock();
                        error = sctp_packet_transmit(packet, ctx->gfp);
                        if (error < 0)
                                ctx->q->asoc->base.sk->sk_err = -error;
index 008670d1f43e1c2e9153a706cc18cc8e8ba62a6c..136338b85504bee716a48cd03c92eb88d016eadf 100644 (file)
@@ -2895,17 +2895,22 @@ int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
 
 #ifdef CONFIG_TIPC_CRYPTO
 static int tipc_nl_retrieve_key(struct nlattr **attrs,
-                               struct tipc_aead_key **key)
+                               struct tipc_aead_key **pkey)
 {
        struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
+       struct tipc_aead_key *key;
 
        if (!attr)
                return -ENODATA;
 
-       *key = (struct tipc_aead_key *)nla_data(attr);
-       if (nla_len(attr) < tipc_aead_key_size(*key))
+       if (nla_len(attr) < sizeof(*key))
+               return -EINVAL;
+       key = (struct tipc_aead_key *)nla_data(attr);
+       if (key->keylen > TIPC_AEAD_KEYLEN_MAX ||
+           nla_len(attr) < tipc_aead_key_size(key))
                return -EINVAL;
 
+       *pkey = key;
        return 0;
 }
 
index 5546710d8ac1a52fbabbac9e27d0ea687875ace8..bc7fb9bf3351ea2ee13f00e3c4ab646b8fd1b8ab 100644 (file)
@@ -755,6 +755,7 @@ static struct sock *__vsock_create(struct net *net,
                vsk->buffer_size = psk->buffer_size;
                vsk->buffer_min_size = psk->buffer_min_size;
                vsk->buffer_max_size = psk->buffer_max_size;
+               security_sk_clone(parent, sk);
        } else {
                vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
                vsk->owner = get_current_cred();
index 521d36bb0803667e5c55d52f8ef8af515f0b8da9..034af85f79d84117cd2e2505cfd0d4a391bacdb3 100644 (file)
@@ -70,7 +70,7 @@ __cfg80211_wdev_from_attrs(struct cfg80211_registered_device *rdev,
        struct wireless_dev *result = NULL;
        bool have_ifidx = attrs[NL80211_ATTR_IFINDEX];
        bool have_wdev_id = attrs[NL80211_ATTR_WDEV];
-       u64 wdev_id;
+       u64 wdev_id = 0;
        int wiphy_idx = -1;
        int ifidx = -1;
 
@@ -14789,6 +14789,7 @@ bad_tid_conf:
 #define NL80211_FLAG_NEED_WDEV_UP      (NL80211_FLAG_NEED_WDEV |\
                                         NL80211_FLAG_CHECK_NETDEV_UP)
 #define NL80211_FLAG_CLEAR_SKB         0x20
+#define NL80211_FLAG_NO_WIPHY_MTX      0x40
 
 static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
                            struct genl_info *info)
@@ -14840,7 +14841,7 @@ static int nl80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
                info->user_ptr[0] = rdev;
        }
 
-       if (rdev) {
+       if (rdev && !(ops->internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) {
                wiphy_lock(&rdev->wiphy);
                /* we keep the mutex locked until post_doit */
                __release(&rdev->wiphy.mtx);
@@ -14865,7 +14866,8 @@ static void nl80211_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
                }
        }
 
-       if (info->user_ptr[0]) {
+       if (info->user_ptr[0] &&
+           !(ops->internal_flags & NL80211_FLAG_NO_WIPHY_MTX)) {
                struct cfg80211_registered_device *rdev = info->user_ptr[0];
 
                /* we kept the mutex locked since pre_doit */
@@ -15329,7 +15331,9 @@ static const struct genl_small_ops nl80211_small_ops[] = {
                .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
                .doit = nl80211_wiphy_netns,
                .flags = GENL_UNS_ADMIN_PERM,
-               .internal_flags = NL80211_FLAG_NEED_WIPHY,
+               .internal_flags = NL80211_FLAG_NEED_WIPHY |
+                                 NL80211_FLAG_NEED_RTNL |
+                                 NL80211_FLAG_NO_WIPHY_MTX,
        },
        {
                .cmd = NL80211_CMD_GET_SURVEY,
index 168cd27e6122b65b16a76be4e68eb846e01668cd..2c52535f9b566d8a6b3ed0406937e8b392764322 100644 (file)
@@ -20,6 +20,7 @@ SECTIONS {
 
        __patchable_function_entries : { *(__patchable_function_entries) }
 
+#ifdef CONFIG_LTO_CLANG
        /*
         * With CONFIG_LTO_CLANG, LLD always enables -fdata-sections and
         * -ffunction-sections, which increases the size of the final module.
@@ -41,6 +42,7 @@ SECTIONS {
        }
 
        .text : { *(.text .text.[0-9a-zA-Z_]*) }
+#endif
 }
 
 /* bring in arch-specific sections */
index 1d20003243c3fb6f176dbefd76dea1e087c52458..0ba01847e836c326d5e880d34c18fd5a8157ed0a 100644 (file)
@@ -98,6 +98,14 @@ struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
        struct rb_node *node, *parent = NULL;
        struct integrity_iint_cache *iint, *test_iint;
 
+       /*
+        * The integrity's "iint_cache" is initialized at security_init(),
+        * unless it is not included in the ordered list of LSMs enabled
+        * on the boot command line.
+        */
+       if (!iint_cache)
+               panic("%s: lsm=integrity required.\n", __func__);
+
        iint = integrity_iint_find(inode);
        if (iint)
                return iint;
index 6fe25300b89dc5b7aa40e55c0a525d691fe3f364..7650de0485700d4dd32d83e9e02d884b316fdc2e 100644 (file)
@@ -219,14 +219,21 @@ static inline bool selinux_policycap_genfs_seclabel_symlinks(void)
        return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]);
 }
 
+struct selinux_policy_convert_data;
+
+struct selinux_load_state {
+       struct selinux_policy *policy;
+       struct selinux_policy_convert_data *convert_data;
+};
+
 int security_mls_enabled(struct selinux_state *state);
 int security_load_policy(struct selinux_state *state,
-                       void *data, size_t len,
-                       struct selinux_policy **newpolicyp);
+                        void *data, size_t len,
+                        struct selinux_load_state *load_state);
 void selinux_policy_commit(struct selinux_state *state,
-                       struct selinux_policy *newpolicy);
+                          struct selinux_load_state *load_state);
 void selinux_policy_cancel(struct selinux_state *state,
-                       struct selinux_policy *policy);
+                          struct selinux_load_state *load_state);
 int security_read_policy(struct selinux_state *state,
                         void **data, size_t *len);
 int security_read_state_kernel(struct selinux_state *state,
index 01a7d50ed39b8a29c96259b4bf2b123387c6877b..fff6babeeae669d6d40a8325d54c921d4c537706 100644 (file)
@@ -563,17 +563,13 @@ static int sel_make_policy_nodes(struct selinux_fs_info *fsi,
 
        ret = sel_make_bools(newpolicy, tmp_bool_dir, &tmp_bool_num,
                             &tmp_bool_names, &tmp_bool_values);
-       if (ret) {
-               pr_err("SELinux: failed to load policy booleans\n");
+       if (ret)
                goto out;
-       }
 
        ret = sel_make_classes(newpolicy, tmp_class_dir,
                               &fsi->last_class_ino);
-       if (ret) {
-               pr_err("SELinux: failed to load policy classes\n");
+       if (ret)
                goto out;
-       }
 
        /* booleans */
        old_dentry = fsi->bool_dir;
@@ -616,7 +612,7 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
 
 {
        struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
-       struct selinux_policy *newpolicy;
+       struct selinux_load_state load_state;
        ssize_t length;
        void *data = NULL;
 
@@ -642,23 +638,23 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
        if (copy_from_user(data, buf, count) != 0)
                goto out;
 
-       length = security_load_policy(fsi->state, data, count, &newpolicy);
+       length = security_load_policy(fsi->state, data, count, &load_state);
        if (length) {
                pr_warn_ratelimited("SELinux: failed to load policy\n");
                goto out;
        }
 
-       length = sel_make_policy_nodes(fsi, newpolicy);
+       length = sel_make_policy_nodes(fsi, load_state.policy);
        if (length) {
-               selinux_policy_cancel(fsi->state, newpolicy);
-               goto out1;
+               pr_warn_ratelimited("SELinux: failed to initialize selinuxfs\n");
+               selinux_policy_cancel(fsi->state, &load_state);
+               goto out;
        }
 
-       selinux_policy_commit(fsi->state, newpolicy);
+       selinux_policy_commit(fsi->state, &load_state);
 
        length = count;
 
-out1:
        audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
                "auid=%u ses=%u lsm=selinux res=1",
                from_kuid(&init_user_ns, audit_get_loginuid(current)),
index 3438d0130378610132edaf6a1c8e846076f6b8fb..d91e41d47777b26390b626fd38897548724c4b58 100644 (file)
 #include "policycap_names.h"
 #include "ima.h"
 
+struct convert_context_args {
+       struct selinux_state *state;
+       struct policydb *oldp;
+       struct policydb *newp;
+};
+
+struct selinux_policy_convert_data {
+       struct convert_context_args args;
+       struct sidtab_convert_params sidtab_params;
+};
+
 /* Forward declaration. */
 static int context_struct_to_string(struct policydb *policydb,
                                    struct context *context,
@@ -1974,12 +1985,6 @@ static inline int convert_context_handle_invalid_context(
        return 0;
 }
 
-struct convert_context_args {
-       struct selinux_state *state;
-       struct policydb *oldp;
-       struct policydb *newp;
-};
-
 /*
  * Convert the values in the security context
  * structure `oldc' from the values specified
@@ -2159,7 +2164,7 @@ static void selinux_policy_cond_free(struct selinux_policy *policy)
 }
 
 void selinux_policy_cancel(struct selinux_state *state,
-                       struct selinux_policy *policy)
+                          struct selinux_load_state *load_state)
 {
        struct selinux_policy *oldpolicy;
 
@@ -2167,7 +2172,8 @@ void selinux_policy_cancel(struct selinux_state *state,
                                        lockdep_is_held(&state->policy_mutex));
 
        sidtab_cancel_convert(oldpolicy->sidtab);
-       selinux_policy_free(policy);
+       selinux_policy_free(load_state->policy);
+       kfree(load_state->convert_data);
 }
 
 static void selinux_notify_policy_change(struct selinux_state *state,
@@ -2183,9 +2189,9 @@ static void selinux_notify_policy_change(struct selinux_state *state,
 }
 
 void selinux_policy_commit(struct selinux_state *state,
-                       struct selinux_policy *newpolicy)
+                          struct selinux_load_state *load_state)
 {
-       struct selinux_policy *oldpolicy;
+       struct selinux_policy *oldpolicy, *newpolicy = load_state->policy;
        u32 seqno;
 
        oldpolicy = rcu_dereference_protected(state->policy,
@@ -2225,6 +2231,7 @@ void selinux_policy_commit(struct selinux_state *state,
        /* Free the old policy */
        synchronize_rcu();
        selinux_policy_free(oldpolicy);
+       kfree(load_state->convert_data);
 
        /* Notify others of the policy change */
        selinux_notify_policy_change(state, seqno);
@@ -2241,11 +2248,10 @@ void selinux_policy_commit(struct selinux_state *state,
  * loading the new policy.
  */
 int security_load_policy(struct selinux_state *state, void *data, size_t len,
-                       struct selinux_policy **newpolicyp)
+                        struct selinux_load_state *load_state)
 {
        struct selinux_policy *newpolicy, *oldpolicy;
-       struct sidtab_convert_params convert_params;
-       struct convert_context_args args;
+       struct selinux_policy_convert_data *convert_data;
        int rc = 0;
        struct policy_file file = { data, len }, *fp = &file;
 
@@ -2275,10 +2281,10 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len,
                goto err_mapping;
        }
 
-
        if (!selinux_initialized(state)) {
                /* First policy load, so no need to preserve state from old policy */
-               *newpolicyp = newpolicy;
+               load_state->policy = newpolicy;
+               load_state->convert_data = NULL;
                return 0;
        }
 
@@ -2292,29 +2298,38 @@ int security_load_policy(struct selinux_state *state, void *data, size_t len,
                goto err_free_isids;
        }
 
+       convert_data = kmalloc(sizeof(*convert_data), GFP_KERNEL);
+       if (!convert_data) {
+               rc = -ENOMEM;
+               goto err_free_isids;
+       }
+
        /*
         * Convert the internal representations of contexts
         * in the new SID table.
         */
-       args.state = state;
-       args.oldp = &oldpolicy->policydb;
-       args.newp = &newpolicy->policydb;
+       convert_data->args.state = state;
+       convert_data->args.oldp = &oldpolicy->policydb;
+       convert_data->args.newp = &newpolicy->policydb;
 
-       convert_params.func = convert_context;
-       convert_params.args = &args;
-       convert_params.target = newpolicy->sidtab;
+       convert_data->sidtab_params.func = convert_context;
+       convert_data->sidtab_params.args = &convert_data->args;
+       convert_data->sidtab_params.target = newpolicy->sidtab;
 
-       rc = sidtab_convert(oldpolicy->sidtab, &convert_params);
+       rc = sidtab_convert(oldpolicy->sidtab, &convert_data->sidtab_params);
        if (rc) {
                pr_err("SELinux:  unable to convert the internal"
                        " representation of contexts in the new SID"
                        " table\n");
-               goto err_free_isids;
+               goto err_free_convert_data;
        }
 
-       *newpolicyp = newpolicy;
+       load_state->policy = newpolicy;
+       load_state->convert_data = convert_data;
        return 0;
 
+err_free_convert_data:
+       kfree(convert_data);
 err_free_isids:
        sidtab_destroy(newpolicy->sidtab);
 err_mapping:
index 478f757ff8435ea3d54b1c456f2d71646f537dd0..8dc61335f65e15387d0239d8e8e444ef2a5b3d77 100644 (file)
@@ -613,7 +613,7 @@ static int tomoyo_check_unix_address(struct sockaddr *addr,
 static bool tomoyo_kernel_service(void)
 {
        /* Nothing to do if I am a kernel service. */
-       return (current->flags & (PF_KTHREAD | PF_IO_WORKER)) == PF_KTHREAD;
+       return current->flags & PF_KTHREAD;
 }
 
 /**
index b59b0f323d4e0271598767bec73a1117bba650ce..79ade335c8a09494c1b7b5698c83ad674b8ec604 100644 (file)
@@ -989,8 +989,12 @@ static int azx_prepare(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip;
 
+       if (!azx_is_pm_ready(card))
+               return 0;
+
        chip = card->private_data;
        chip->pm_prepared = 1;
+       snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
 
        flush_work(&azx_bus(chip)->unsol_work);
 
@@ -1005,7 +1009,11 @@ static void azx_complete(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip;
 
+       if (!azx_is_pm_ready(card))
+               return;
+
        chip = card->private_data;
+       snd_power_change_state(card, SNDRV_CTL_POWER_D0);
        chip->pm_prepared = 0;
 }
 
index 316b9b4ccb32d179a00a2c87645d85b231a5ee1d..58946d069ee59e068bf8990fdbe3a9cab59632e8 100644 (file)
@@ -5256,7 +5256,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
        case 0x10ec0274:
        case 0x10ec0294:
                alc_process_coef_fw(codec, coef0274);
-               msleep(80);
+               msleep(850);
                val = alc_read_coef_idx(codec, 0x46);
                is_ctia = (val & 0x00f0) == 0x00f0;
                break;
@@ -5440,6 +5440,7 @@ static void alc_update_headset_jack_cb(struct hda_codec *codec,
                                       struct hda_jack_callback *jack)
 {
        snd_hda_gen_hp_automute(codec, jack);
+       alc_update_headset_mode(codec);
 }
 
 static void alc_probe_headset_mode(struct hda_codec *codec)
@@ -8057,6 +8058,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
                      ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x87f2, "HP ProBook 640 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f4, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f5, "HP", ALC287_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x87f7, "HP Spectre x360 14", ALC245_FIXUP_HP_X360_AMP),
index d3001fb18141fe02317a3056dd35e9b9b541db91..176437a441e6c7052a0583018a78a806ce3c62b6 100644 (file)
@@ -1521,6 +1521,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
        case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
        case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */
+       case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */
                return true;
        }
 
index 8b281f722e5bd8f098a384dd9f00c5115ad497d7..f6afee209620d81c8b0314c8f03c1f05d1949fbb 100644 (file)
@@ -1154,6 +1154,7 @@ struct kvm_x86_mce {
 #define KVM_XEN_HVM_CONFIG_HYPERCALL_MSR       (1 << 0)
 #define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL     (1 << 1)
 #define KVM_XEN_HVM_CONFIG_SHARED_INFO         (1 << 2)
+#define KVM_XEN_HVM_CONFIG_RUNSTATE            (1 << 3)
 
 struct kvm_xen_hvm_config {
        __u32 flags;
@@ -1621,12 +1622,24 @@ struct kvm_xen_vcpu_attr {
        union {
                __u64 gpa;
                __u64 pad[8];
+               struct {
+                       __u64 state;
+                       __u64 state_entry_time;
+                       __u64 time_running;
+                       __u64 time_runnable;
+                       __u64 time_blocked;
+                       __u64 time_offline;
+               } runstate;
        } u;
 };
 
 /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO       0x0
 #define KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO  0x1
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR   0x2
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT        0x3
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA   0x4
+#define KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST 0x5
 
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {
index 71aabaffe779122c9cf7d5b1391780605e21d00b..8f13b843d5b4e47bb9eaa9d17dfdbb81596fd335 100644 (file)
@@ -9,6 +9,7 @@ Type=simple
 ExecStart=/usr/bin/kvm_stat -dtcz -s 10 -L /var/log/kvm_stat.csv
 ExecReload=/bin/kill -HUP $MAINPID
 Restart=always
+RestartSec=60s
 SyslogIdentifier=kvm_stat
 SyslogLevel=debug
 
index 887a494ad5fca0215cf29af0ab8af504162b6dc9..e9eb6a6e80d25ace2bd0062c402448ad52153b90 100644 (file)
@@ -215,7 +215,7 @@ define do_install
        if [ ! -d '$(DESTDIR_SQ)$2' ]; then             \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
        fi;                                             \
-       $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
+       $(INSTALL) $(if $3,-m $3,) $1 '$(DESTDIR_SQ)$2'
 endef
 
 install_lib: all_cmd
index 2f9d685bd522c06819fd96e8b2790e4b6ab4984a..0911aea4cdbe5c68406b5bc9c15ec58c37058c94 100644 (file)
@@ -462,7 +462,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
                return err;
 
        case BTF_KIND_ARRAY:
-               return btf_dump_order_type(d, btf_array(t)->type, through_ptr);
+               return btf_dump_order_type(d, btf_array(t)->type, false);
 
        case BTF_KIND_STRUCT:
        case BTF_KIND_UNION: {
index d43cc3f29daee987c4dbde5a4ec3c018716d3fc4..4181d178ee7b10e0c7652f6ac5f0777257d22bd7 100644 (file)
@@ -1181,7 +1181,8 @@ static int bpf_object__elf_init(struct bpf_object *obj)
        if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
                pr_warn("elf: failed to get section names strings from %s: %s\n",
                        obj->path, elf_errmsg(-1));
-               return -LIBBPF_ERRNO__FORMAT;
+               err = -LIBBPF_ERRNO__FORMAT;
+               goto errout;
        }
 
        /* Old LLVM set e_machine to EM_NONE */
index 4dd73de00b6f1c0a413ff01c15c5352f824f4167..d2cb28e9ef52ec2af43c8497732a8c40916c920d 100644 (file)
@@ -40,7 +40,7 @@ static int libbpf_netlink_open(__u32 *nl_pid)
        memset(&sa, 0, sizeof(sa));
        sa.nl_family = AF_NETLINK;
 
-       sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+       sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, NETLINK_ROUTE);
        if (sock < 0)
                return -errno;
 
index ace8772a4f038d6b4f0730cd0d06d832f1d6c976..7c4a9d424a64a4052da32c09b3869b0af0e08421 100644 (file)
@@ -402,35 +402,42 @@ static pid_t handle_signalfd(struct daemon *daemon)
        int status;
        pid_t pid;
 
+       /*
+        * Take signal fd data as pure signal notification and check all
+        * the sessions state. The reason is that multiple signals can get
+        * coalesced in kernel and we can receive only single signal even
+        * if multiple SIGCHLD were generated.
+        */
        err = read(daemon->signal_fd, &si, sizeof(struct signalfd_siginfo));
-       if (err != sizeof(struct signalfd_siginfo))
+       if (err != sizeof(struct signalfd_siginfo)) {
+               pr_err("failed to read signal fd\n");
                return -1;
+       }
 
        list_for_each_entry(session, &daemon->sessions, list) {
+               if (session->pid == -1)
+                       continue;
 
-               if (session->pid != (int) si.ssi_pid)
+               pid = waitpid(session->pid, &status, WNOHANG);
+               if (pid <= 0)
                        continue;
 
-               pid = waitpid(session->pid, &status, 0);
-               if (pid == session->pid) {
-                       if (WIFEXITED(status)) {
-                               pr_info("session '%s' exited, status=%d\n",
-                                       session->name, WEXITSTATUS(status));
-                       } else if (WIFSIGNALED(status)) {
-                               pr_info("session '%s' killed (signal %d)\n",
-                                       session->name, WTERMSIG(status));
-                       } else if (WIFSTOPPED(status)) {
-                               pr_info("session '%s' stopped (signal %d)\n",
-                                       session->name, WSTOPSIG(status));
-                       } else {
-                               pr_info("session '%s' Unexpected status (0x%x)\n",
-                                       session->name, status);
-                       }
+               if (WIFEXITED(status)) {
+                       pr_info("session '%s' exited, status=%d\n",
+                               session->name, WEXITSTATUS(status));
+               } else if (WIFSIGNALED(status)) {
+                       pr_info("session '%s' killed (signal %d)\n",
+                               session->name, WTERMSIG(status));
+               } else if (WIFSTOPPED(status)) {
+                       pr_info("session '%s' stopped (signal %d)\n",
+                               session->name, WSTOPSIG(status));
+               } else {
+                       pr_info("session '%s' Unexpected status (0x%x)\n",
+                               session->name, status);
                }
 
                session->state = KILL;
                session->pid = -1;
-               return pid;
        }
 
        return 0;
@@ -443,7 +450,6 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d
                .fd     = daemon->signal_fd,
                .events = POLLIN,
        };
-       pid_t wpid = 0, pid = session->pid;
        time_t start;
 
        start = time(NULL);
@@ -452,7 +458,7 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d
                int err = poll(&pollfd, 1, 1000);
 
                if (err > 0) {
-                       wpid = handle_signalfd(daemon);
+                       handle_signalfd(daemon);
                } else if (err < 0) {
                        perror("failed: poll\n");
                        return -1;
@@ -460,7 +466,7 @@ static int daemon_session__wait(struct daemon_session *session, struct daemon *d
 
                if (start + secs < time(NULL))
                        return -1;
-       } while (wpid != pid);
+       } while (session->pid != -1);
 
        return 0;
 }
@@ -902,7 +908,9 @@ static void daemon_session__kill(struct daemon_session *session,
                        daemon_session__signal(session, SIGKILL);
                        break;
                default:
-                       break;
+                       pr_err("failed to wait for session %s\n",
+                              session->name);
+                       return;
                }
                how++;
 
@@ -955,7 +963,8 @@ static void daemon__kill(struct daemon *daemon)
                        daemon__signal(daemon, SIGKILL);
                        break;
                default:
-                       break;
+                       pr_err("failed to wait for sessions\n");
+                       return;
                }
                how++;
 
@@ -1344,7 +1353,7 @@ out:
                close(sock_fd);
        if (conf_fd != -1)
                close(conf_fd);
-       if (conf_fd != -1)
+       if (signal_fd != -1)
                close(signal_fd);
 
        pr_info("daemon exited\n");
index f57e075b0ed2f9362f720cff5319fa1022863d8a..c72adbd67386e9df2daa410cc4c3b87fdc2bcdb6 100644 (file)
@@ -86,7 +86,7 @@ static struct {
                .msg_load_fail    = "check your vmlinux setting?",
                .target_func      = &epoll_pwait_loop,
                .expect_result    = (NR_ITERS + 1) / 2,
-               .pin              = true,
+               .pin              = true,
        },
 #ifdef HAVE_BPF_PROLOGUE
        {
@@ -99,13 +99,6 @@ static struct {
                .expect_result    = (NR_ITERS + 1) / 4,
        },
 #endif
-       {
-               .prog_id          = LLVM_TESTCASE_BPF_RELOCATION,
-               .desc             = "BPF relocation checker",
-               .name             = "[bpf_relocation_test]",
-               .msg_compile_fail = "fix 'perf test LLVM' first",
-               .msg_load_fail    = "libbpf error when dealing with relocation",
-       },
 };
 
 static int do_test(struct bpf_object *obj, int (*func)(void),
index 5ad3ca8d681b7d8a8d8156942c39dfe61d10836a..58984380b211c286d6ebb0c5c9690cc7cd23cd4f 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # daemon operations
 # SPDX-License-Identifier: GPL-2.0
 
index 953f4afacd3b5d2cdf9e53c5990cdf2a6bf42c76..5b6ccb90b39712916203d405019460caeac7812a 100644 (file)
@@ -298,10 +298,6 @@ static int auxtrace_queues__queue_buffer(struct auxtrace_queues *queues,
                queue->set = true;
                queue->tid = buffer->tid;
                queue->cpu = buffer->cpu;
-       } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
-               pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
-                      queue->cpu, queue->tid, buffer->cpu, buffer->tid);
-               return -EINVAL;
        }
 
        buffer->buffer_nr = queues->next_buffer_nr++;
index 57d58c81a5f8eb365b7749e90dd2cccf53a5a675..cdecda1ddd36e29eb5d04fbdf8c4bf2fa9c6b393 100644 (file)
@@ -196,25 +196,32 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
        }
 
        if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
+               free(info_linear);
                pr_debug("%s: the kernel is too old, aborting\n", __func__);
                return -2;
        }
 
        info = &info_linear->info;
+       if (!info->jited_ksyms) {
+               free(info_linear);
+               return -1;
+       }
 
        /* number of ksyms, func_lengths, and tags should match */
        sub_prog_cnt = info->nr_jited_ksyms;
        if (sub_prog_cnt != info->nr_prog_tags ||
-           sub_prog_cnt != info->nr_jited_func_lens)
+           sub_prog_cnt != info->nr_jited_func_lens) {
+               free(info_linear);
                return -1;
+       }
 
        /* check BTF func info support */
        if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
                /* btf func info number should be same as sub_prog_cnt */
                if (sub_prog_cnt != info->nr_func_info) {
                        pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
-                       err = -1;
-                       goto out;
+                       free(info_linear);
+                       return -1;
                }
                if (btf__get_from_id(info->btf_id, &btf)) {
                        pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
index 42c84adeb2fbe1e65d82b9b069e024b36aa2cb62..c0c0fab22cb869f0c2d1a01433473c17528d3a86 100644 (file)
@@ -356,6 +356,9 @@ __add_event(struct list_head *list, int *idx,
        struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
                               cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
 
+       if (pmu && attr->type == PERF_TYPE_RAW)
+               perf_pmu__warn_invalid_config(pmu, attr->config, name);
+
        if (init_attr)
                event_attr_init(attr);
 
index 44ef28302fc77cbdb681684701e60bb42fc96874..46fd0f9984842a72f7bc4c0f80f1ae7cc40ed720 100644 (file)
@@ -1812,3 +1812,36 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu)
 
        return nr_caps;
 }
+
+void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
+                                  char *name)
+{
+       struct perf_pmu_format *format;
+       __u64 masks = 0, bits;
+       char buf[100];
+       unsigned int i;
+
+       list_for_each_entry(format, &pmu->format, list) {
+               if (format->value != PERF_PMU_FORMAT_VALUE_CONFIG)
+                       continue;
+
+               for_each_set_bit(i, format->bits, PERF_PMU_FORMAT_BITS)
+                       masks |= 1ULL << i;
+       }
+
+       /*
+        * Kernel doesn't export any valid format bits.
+        */
+       if (masks == 0)
+               return;
+
+       bits = config & ~masks;
+       if (bits == 0)
+               return;
+
+       bitmap_scnprintf((unsigned long *)&bits, sizeof(bits) * 8, buf, sizeof(buf));
+
+       pr_warning("WARNING: event '%s' not valid (bits %s of config "
+                  "'%llx' not supported by kernel)!\n",
+                  name ?: "N/A", buf, config);
+}
index 8164388478c658692cac4710e2d613d665fc5088..160b0f5617711b0f5cd73de92484f7b5c0114de6 100644 (file)
@@ -123,4 +123,7 @@ int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
 
 int perf_pmu__caps_parse(struct perf_pmu *pmu);
 
+void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
+                                  char *name);
+
 #endif /* __PMU_H */
index b698046ec2db2ad58ddfa666f64af5dca8a095a3..dff178103ce5ae2e17f2a2f58e2eda63e88bc825 100644 (file)
@@ -424,7 +424,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
 
        while (!io.eof) {
                static const char anonstr[] = "//anon";
-               size_t size;
+               size_t size, aligned_size;
 
                /* ensure null termination since stack will be reused. */
                event->mmap2.filename[0] = '\0';
@@ -484,11 +484,12 @@ out:
                }
 
                size = strlen(event->mmap2.filename) + 1;
-               size = PERF_ALIGN(size, sizeof(u64));
+               aligned_size = PERF_ALIGN(size, sizeof(u64));
                event->mmap2.len -= event->mmap.start;
                event->mmap2.header.size = (sizeof(event->mmap2) -
-                                       (sizeof(event->mmap2.filename) - size));
-               memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
+                                       (sizeof(event->mmap2.filename) - aligned_size));
+               memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
+                       (aligned_size - size));
                event->mmap2.header.size += machine->id_hdr_size;
                event->mmap2.pid = tgid;
                event->mmap2.tid = pid;
@@ -758,7 +759,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
        for (i = 0; i < n; i++) {
                char *end;
                pid_t _pid;
-               bool kernel_thread;
+               bool kernel_thread = false;
 
                _pid = strtol(dirent[i]->d_name, &end, 10);
                if (*end)
index 3cc91ad048ea87a9b954711ffb2e901e754253c4..43beb169631d318a5d0fd6501e61060394fa3fcc 100644 (file)
@@ -133,6 +133,8 @@ static struct dso *__machine__addnew_vdso(struct machine *machine, const char *s
        if (dso != NULL) {
                __dsos__add(&machine->dsos, dso);
                dso__set_long_name(dso, long_name, false);
+               /* Put dso here because __dsos_add already got it */
+               dso__put(dso);
        }
 
        return dso;
index a7f0603d33f62b6632d7930579774a1a6e3351fc..690870043ac0e3a389f8ceb52530aebc62dcb7f8 100644 (file)
@@ -40,3 +40,5 @@
 # CONFIG_RESET_BRCMSTB_RESCAL is not set
 # CONFIG_RESET_INTEL_GW is not set
 # CONFIG_ADI_AXI_ADC is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_PAGE_POISONING is not set
index 0b550cbd667d1ea36aaa16c3c2d4ff1ba4a6679f..1e2683dcc0e7a987b90f295eb4342b5731ff3bf9 100644 (file)
@@ -13,7 +13,7 @@ from typing import List, Set
 CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
 CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
 
-KconfigEntryBase = collections.namedtuple('KconfigEntry', ['name', 'value'])
+KconfigEntryBase = collections.namedtuple('KconfigEntryBase', ['name', 'value'])
 
 class KconfigEntry(KconfigEntryBase):
 
index 3b796dd5e5772e828f43f6aee46752c8b6b2e3da..ca24f6839d50c49f585414620ec93af6d19422dd 100644 (file)
@@ -296,21 +296,34 @@ static void *idr_throbber(void *arg)
        return NULL;
 }
 
+/*
+ * There are always either 1 or 2 objects in the IDR.  If we find nothing,
+ * or we find something at an ID we didn't expect, that's a bug.
+ */
 void idr_find_test_1(int anchor_id, int throbber_id)
 {
        pthread_t throbber;
        time_t start = time(NULL);
 
-       pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
-
        BUG_ON(idr_alloc(&find_idr, xa_mk_value(anchor_id), anchor_id,
                                anchor_id + 1, GFP_KERNEL) != anchor_id);
 
+       pthread_create(&throbber, NULL, idr_throbber, &throbber_id);
+
+       rcu_read_lock();
        do {
                int id = 0;
                void *entry = idr_get_next(&find_idr, &id);
-               BUG_ON(entry != xa_mk_value(id));
+               rcu_read_unlock();
+               if ((id != anchor_id && id != throbber_id) ||
+                   entry != xa_mk_value(id)) {
+                       printf("%s(%d, %d): %p at %d\n", __func__, anchor_id,
+                               throbber_id, entry, id);
+                       abort();
+               }
+               rcu_read_lock();
        } while (time(NULL) < start + 11);
+       rcu_read_unlock();
 
        pthread_join(throbber, NULL);
 
@@ -577,6 +590,7 @@ void ida_tests(void)
 
 int __weak main(void)
 {
+       rcu_register_thread();
        radix_tree_init();
        idr_checks();
        ida_tests();
@@ -584,5 +598,6 @@ int __weak main(void)
        rcu_barrier();
        if (nr_allocated)
                printf("nr_allocated = %d\n", nr_allocated);
+       rcu_unregister_thread();
        return 0;
 }
diff --git a/tools/testing/radix-tree/linux/compiler_types.h b/tools/testing/radix-tree/linux/compiler_types.h
deleted file mode 100644 (file)
index e69de29..0000000
index 9eae0fb5a67d1ef746e859958eb247df18a01f21..e00520cc63498d3f670dd8a5677d728f9ca19760 100644 (file)
@@ -224,7 +224,9 @@ void multiorder_checks(void)
 
 int __weak main(void)
 {
+       rcu_register_thread();
        radix_tree_init();
        multiorder_checks();
+       rcu_unregister_thread();
        return 0;
 }
index e61e43efe463cdb0aae7d6efb08066a5d93f59be..f20e12cbbfd407dfaa7ff81ef6e735cb3ae93fbe 100644 (file)
@@ -25,11 +25,13 @@ void xarray_tests(void)
 
 int __weak main(void)
 {
+       rcu_register_thread();
        radix_tree_init();
        xarray_tests();
        radix_tree_cpu_dead(1);
        rcu_barrier();
        if (nr_allocated)
                printf("nr_allocated = %d\n", nr_allocated);
+       rcu_unregister_thread();
        return 0;
 }
index 9210691aa9985601995eef4b8af58da33cd993b9..e3e08d9c7020e4ecab3833f292caf9a06c1fd894 100644 (file)
@@ -284,16 +284,28 @@ endfunction
 // Set up test pattern in the FFR
 // x0: pid
 // x2: generation
+//
+// We need to generate a canonical FFR value, which consists of a number of
+// low "1" bits, followed by a number of zeros. This gives us 17 unique values
+// per 16 bits of FFR, so we create a 4 bit signature out of the PID and
+// generation, and use that as the initial number of ones in the pattern.
+// We fill the upper lanes of FFR with zeros.
 // Beware: corrupts P0.
 function setup_ffr
        mov     x4, x30
 
-       bl      pattern
+       and     w0, w0, #0x3
+       bfi     w0, w2, #2, #2
+       mov     w1, #1
+       lsl     w1, w1, w0
+       sub     w1, w1, #1
+
        ldr     x0, =ffrref
-       ldr     x1, =scratch
-       rdvl    x2, #1
-       lsr     x2, x2, #3
-       bl      memcpy
+       strh    w1, [x0], 2
+       rdvl    x1, #1
+       lsr     x1, x1, #3
+       sub     x1, x1, #2
+       bl      memclr
 
        mov     x0, #0
        ldr     x1, =ffrref
index 36af1c138faf04eafad22be651dc6a310eb0c5fd..b62a393153363eebf01cc225a8856cfbdcf7d34f 100644 (file)
@@ -128,6 +128,8 @@ static void test_check_mtu_xdp(__u32 mtu, __u32 ifindex)
        test_check_mtu_run_xdp(skel, skel->progs.xdp_use_helper, mtu);
        test_check_mtu_run_xdp(skel, skel->progs.xdp_exceed_mtu, mtu);
        test_check_mtu_run_xdp(skel, skel->progs.xdp_minus_delta, mtu);
+       test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len, mtu);
+       test_check_mtu_run_xdp(skel, skel->progs.xdp_input_len_exceed, mtu);
 
 cleanup:
        test_check_mtu__destroy(skel);
@@ -187,6 +189,8 @@ static void test_check_mtu_tc(__u32 mtu, __u32 ifindex)
        test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu, mtu);
        test_check_mtu_run_tc(skel, skel->progs.tc_exceed_mtu_da, mtu);
        test_check_mtu_run_tc(skel, skel->progs.tc_minus_delta, mtu);
+       test_check_mtu_run_tc(skel, skel->progs.tc_input_len, mtu);
+       test_check_mtu_run_tc(skel, skel->progs.tc_input_len_exceed, mtu);
 cleanup:
        test_check_mtu__destroy(skel);
 }
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c b/tools/testing/selftests/bpf/prog_tests/fexit_sleep.c
new file mode 100644 (file)
index 0000000..6c4d42a
--- /dev/null
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <test_progs.h>
+#include <time.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include "fexit_sleep.skel.h"
+
+static int do_sleep(void *skel)
+{
+       struct fexit_sleep *fexit_skel = skel;
+       struct timespec ts1 = { .tv_nsec = 1 };
+       struct timespec ts2 = { .tv_sec = 10 };
+
+       fexit_skel->bss->pid = getpid();
+       (void)syscall(__NR_nanosleep, &ts1, NULL);
+       (void)syscall(__NR_nanosleep, &ts2, NULL);
+       return 0;
+}
+
+#define STACK_SIZE (1024 * 1024)
+static char child_stack[STACK_SIZE];
+
+void test_fexit_sleep(void)
+{
+       struct fexit_sleep *fexit_skel = NULL;
+       int wstatus, duration = 0;
+       pid_t cpid;
+       int err, fexit_cnt;
+
+       fexit_skel = fexit_sleep__open_and_load();
+       if (CHECK(!fexit_skel, "fexit_skel_load", "fexit skeleton failed\n"))
+               goto cleanup;
+
+       err = fexit_sleep__attach(fexit_skel);
+       if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
+               goto cleanup;
+
+       cpid = clone(do_sleep, child_stack + STACK_SIZE, CLONE_FILES | SIGCHLD, fexit_skel);
+       if (CHECK(cpid == -1, "clone", strerror(errno)))
+               goto cleanup;
+
+       /* wait until first sys_nanosleep ends and second sys_nanosleep starts */
+       while (READ_ONCE(fexit_skel->bss->fentry_cnt) != 2);
+       fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt);
+       if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt))
+               goto cleanup;
+
+       /* close progs and detach them. That will trigger two nop5->jmp5 rewrites
+        * in the trampolines to skip nanosleep_fexit prog.
+        * The nanosleep_fentry prog will get detached first.
+        * The nanosleep_fexit prog will get detached second.
+        * Detaching will trigger freeing of both progs JITed images.
+        * There will be two dying bpf_tramp_image-s, but only the initial
+        * bpf_tramp_image (with both _fentry and _fexit progs will be stuck
+        * waiting for percpu_ref_kill to confirm). The other one
+        * will be freed quickly.
+        */
+       close(bpf_program__fd(fexit_skel->progs.nanosleep_fentry));
+       close(bpf_program__fd(fexit_skel->progs.nanosleep_fexit));
+       fexit_sleep__detach(fexit_skel);
+
+       /* kill the thread to unwind sys_nanosleep stack through the trampoline */
+       kill(cpid, 9);
+
+       if (CHECK(waitpid(cpid, &wstatus, 0) == -1, "waitpid", strerror(errno)))
+               goto cleanup;
+       if (CHECK(WEXITSTATUS(wstatus) != 0, "exitstatus", "failed"))
+               goto cleanup;
+
+       /* The bypassed nanosleep_fexit prog shouldn't have executed.
+        * Unlike progs the maps were not freed and directly accessible.
+        */
+       fexit_cnt = READ_ONCE(fexit_skel->bss->fexit_cnt);
+       if (CHECK(fexit_cnt != 1, "fexit_cnt", "%d", fexit_cnt))
+               goto cleanup;
+
+cleanup:
+       fexit_sleep__destroy(fexit_skel);
+}
index 31975c96e2c9cfb97dc72bc85c87bde0cfc4d7e6..3ac0c9afc35a3a7187d7251a940bf7a58c50441d 100644 (file)
@@ -174,6 +174,12 @@ struct struct_in_struct {
        };
 };
 
+struct struct_in_array {};
+
+struct struct_in_array_typed {};
+
+typedef struct struct_in_array_typed struct_in_array_t[2];
+
 struct struct_with_embedded_stuff {
        int a;
        struct {
@@ -203,6 +209,8 @@ struct struct_with_embedded_stuff {
        } r[5];
        struct struct_in_struct s[10];
        int t[11];
+       struct struct_in_array (*u)[2];
+       struct_in_array_t *v;
 };
 
 struct root_struct {
diff --git a/tools/testing/selftests/bpf/progs/fexit_sleep.c b/tools/testing/selftests/bpf/progs/fexit_sleep.c
new file mode 100644 (file)
index 0000000..03a672d
--- /dev/null
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021 Facebook */
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char LICENSE[] SEC("license") = "GPL";
+
+int pid = 0;
+int fentry_cnt = 0;
+int fexit_cnt = 0;
+
+SEC("fentry/__x64_sys_nanosleep")
+int BPF_PROG(nanosleep_fentry, const struct pt_regs *regs)
+{
+       if ((int)bpf_get_current_pid_tgid() != pid)
+               return 0;
+
+       fentry_cnt++;
+       return 0;
+}
+
+SEC("fexit/__x64_sys_nanosleep")
+int BPF_PROG(nanosleep_fexit, const struct pt_regs *regs, int ret)
+{
+       if ((int)bpf_get_current_pid_tgid() != pid)
+               return 0;
+
+       fexit_cnt++;
+       return 0;
+}
index b7787b43f9dba605051fd036b7e2405c1f42924c..c4a9bae96e75b7ba4aaf9087bab0a1a4fadac9eb 100644 (file)
@@ -105,6 +105,54 @@ int xdp_minus_delta(struct xdp_md *ctx)
        return retval;
 }
 
+SEC("xdp")
+int xdp_input_len(struct xdp_md *ctx)
+{
+       int retval = XDP_PASS; /* Expected retval on successful test */
+       void *data_end = (void *)(long)ctx->data_end;
+       void *data = (void *)(long)ctx->data;
+       __u32 ifindex = GLOBAL_USER_IFINDEX;
+       __u32 data_len = data_end - data;
+
+       /* API allow user give length to check as input via mtu_len param,
+        * resulting MTU value is still output in mtu_len param after call.
+        *
+        * Input len is L3, like MTU and iph->tot_len.
+        * Remember XDP data_len is L2.
+        */
+       __u32 mtu_len = data_len - ETH_HLEN;
+
+       if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0))
+               retval = XDP_ABORTED;
+
+       global_bpf_mtu_xdp = mtu_len;
+       return retval;
+}
+
+SEC("xdp")
+int xdp_input_len_exceed(struct xdp_md *ctx)
+{
+       int retval = XDP_ABORTED; /* Fail */
+       __u32 ifindex = GLOBAL_USER_IFINDEX;
+       int err;
+
+       /* API allow user give length to check as input via mtu_len param,
+        * resulting MTU value is still output in mtu_len param after call.
+        *
+        * Input length value is L3 size like MTU.
+        */
+       __u32 mtu_len = GLOBAL_USER_MTU;
+
+       mtu_len += 1; /* Exceed with 1 */
+
+       err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0);
+       if (err == BPF_MTU_CHK_RET_FRAG_NEEDED)
+               retval = XDP_PASS ; /* Success in exceeding MTU check */
+
+       global_bpf_mtu_xdp = mtu_len;
+       return retval;
+}
+
 SEC("classifier")
 int tc_use_helper(struct __sk_buff *ctx)
 {
@@ -196,3 +244,47 @@ int tc_minus_delta(struct __sk_buff *ctx)
        global_bpf_mtu_xdp = mtu_len;
        return retval;
 }
+
+SEC("classifier")
+int tc_input_len(struct __sk_buff *ctx)
+{
+       int retval = BPF_OK; /* Expected retval on successful test */
+       __u32 ifindex = GLOBAL_USER_IFINDEX;
+
+       /* API allow user give length to check as input via mtu_len param,
+        * resulting MTU value is still output in mtu_len param after call.
+        *
+        * Input length value is L3 size.
+        */
+       __u32 mtu_len = GLOBAL_USER_MTU;
+
+       if (bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0))
+               retval = BPF_DROP;
+
+       global_bpf_mtu_xdp = mtu_len;
+       return retval;
+}
+
+SEC("classifier")
+int tc_input_len_exceed(struct __sk_buff *ctx)
+{
+       int retval = BPF_DROP; /* Fail */
+       __u32 ifindex = GLOBAL_USER_IFINDEX;
+       int err;
+
+       /* API allow user give length to check as input via mtu_len param,
+        * resulting MTU value is still output in mtu_len param after call.
+        *
+        * Input length value is L3 size like MTU.
+        */
+       __u32 mtu_len = GLOBAL_USER_MTU;
+
+       mtu_len += 1; /* Exceed with 1 */
+
+       err = bpf_check_mtu(ctx, ifindex, &mtu_len, 0, 0);
+       if (err == BPF_MTU_CHK_RET_FRAG_NEEDED)
+               retval = BPF_OK; /* Success in exceeding MTU check */
+
+       global_bpf_mtu_xdp = mtu_len;
+       return retval;
+}
index 9afe947cfae95302937b36264791475acce22fc3..ba6eadfec56531134e532d8bfdb516cb12960198 100644 (file)
@@ -508,10 +508,8 @@ int _ip6geneve_get_tunnel(struct __sk_buff *skb)
        }
 
        ret = bpf_skb_get_tunnel_opt(skb, &gopt, sizeof(gopt));
-       if (ret < 0) {
-               ERROR(ret);
-               return TC_ACT_SHOT;
-       }
+       if (ret < 0)
+               gopt.opt_class = 0;
 
        bpf_trace_printk(fmt, sizeof(fmt),
                        key.tunnel_id, key.remote_ipv4, gopt.opt_class);
index 1fd07a4f27ac215a5fd8f258c13079793b2a0e0e..c162498a64fc6a0644903bde7255304690af66b7 100644 (file)
@@ -6,8 +6,9 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
        .errstr = "R0 tried to subtract pointer from scalar",
+       .result = REJECT,
 },
 {
        "check deducing bounds from const, 2",
@@ -20,6 +21,8 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
                BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
        .retval = 1,
 },
@@ -31,8 +34,9 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
        .errstr = "R0 tried to subtract pointer from scalar",
+       .result = REJECT,
 },
 {
        "check deducing bounds from const, 4",
@@ -45,6 +49,8 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
                BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
+       .result_unpriv = REJECT,
        .result = ACCEPT,
 },
 {
@@ -55,8 +61,9 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
        .errstr = "R0 tried to subtract pointer from scalar",
+       .result = REJECT,
 },
 {
        "check deducing bounds from const, 6",
@@ -67,8 +74,9 @@
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
        .errstr = "R0 tried to subtract pointer from scalar",
+       .result = REJECT,
 },
 {
        "check deducing bounds from const, 7",
@@ -80,8 +88,9 @@
                            offsetof(struct __sk_buff, mark)),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types",
        .errstr = "dereference of modified ctx ptr",
+       .result = REJECT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
                            offsetof(struct __sk_buff, mark)),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
        .errstr = "dereference of modified ctx ptr",
+       .result = REJECT,
        .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
 },
 {
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
+       .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types",
        .errstr = "R0 tried to subtract pointer from scalar",
+       .result = REJECT,
 },
 {
        "check deducing bounds from const, 10",
                BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                BPF_EXIT_INSN(),
        },
-       .result = REJECT,
        .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
+       .result = REJECT,
 },
index b117bdd3806d8789c2d15c4729b4c5c2504f5477..6f610cfddae53b1099c905e9ff7b345e3485cb09 100644 (file)
@@ -75,6 +75,8 @@
        BPF_EXIT_INSN(),
        },
        .fixup_map_hash_16b = { 4 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
        .result = ACCEPT,
 },
 {
@@ -91,5 +93,7 @@
        BPF_EXIT_INSN(),
        },
        .fixup_map_hash_16b = { 4 },
+       .result_unpriv = REJECT,
+       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
        .result = ACCEPT,
 },
index b018ad71e0a82554ea7924f9e1116536260baec9..3e32400c4b44b5590166f69a073b9276c89e1ed4 100644 (file)
        .result = ACCEPT,
 },
 {
-       "unpriv: adding of fp",
+       "unpriv: adding of fp, reg",
        .insns = {
        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_MOV64_IMM(BPF_REG_1, 0),
        BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
        BPF_EXIT_INSN(),
        },
+       .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types",
+       .result_unpriv = REJECT,
+       .result = ACCEPT,
+},
+{
+       "unpriv: adding of fp, imm",
+       .insns = {
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
+       BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
+       BPF_EXIT_INSN(),
+       },
        .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
        .result_unpriv = REJECT,
        .result = ACCEPT,
index ed4e76b246499a3c8f96d88aa52eca377a335360..feb91266db39a09cf78f21d34666fd129b0684fb 100644 (file)
        .fixup_map_array_48b = { 1 },
        .result = ACCEPT,
        .result_unpriv = REJECT,
-       .errstr_unpriv = "R2 tried to add from different maps or paths",
+       .errstr_unpriv = "R2 tried to add from different maps, paths, or prohibited types",
        .retval = 0,
 },
 {
        .result = ACCEPT,
        .retval = 0xabcdef12,
 },
+{
+       "map access: value_ptr += N, value_ptr -= N known scalar",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+       BPF_MOV32_IMM(BPF_REG_1, 0x12345678),
+       BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+       BPF_MOV64_IMM(BPF_REG_1, 2),
+       BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .fixup_map_array_48b = { 3 },
+       .result = ACCEPT,
+       .retval = 0x12345678,
+},
 {
        "map access: unknown scalar += value_ptr, 1",
        .insns = {
index 2f2eeb8a1d86f52df11a8ae715ca428df3f030b5..5aadf84c91c04911d6b574f8f7f4e9e8e8d3b91c 100644 (file)
@@ -108,7 +108,7 @@ static void run_test(uint32_t run)
        kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
        vm_create_irqchip(vm);
 
-       fprintf(stderr, "%s: [%d] start vcpus\n", __func__, run);
+       pr_debug("%s: [%d] start vcpus\n", __func__, run);
        for (i = 0; i < VCPU_NUM; ++i) {
                vm_vcpu_add_default(vm, i, guest_code);
                payloads[i].vm = vm;
@@ -124,7 +124,7 @@ static void run_test(uint32_t run)
                        check_set_affinity(throw_away, &cpu_set);
                }
        }
-       fprintf(stderr, "%s: [%d] all threads launched\n", __func__, run);
+       pr_debug("%s: [%d] all threads launched\n", __func__, run);
        sem_post(sem);
        for (i = 0; i < VCPU_NUM; ++i)
                check_join(threads[i], &b);
@@ -147,16 +147,16 @@ int main(int argc, char **argv)
                if (pid == 0)
                        run_test(i); /* This function always exits */
 
-               fprintf(stderr, "%s: [%d] waiting semaphore\n", __func__, i);
+               pr_debug("%s: [%d] waiting semaphore\n", __func__, i);
                sem_wait(sem);
                r = (rand() % DELAY_US_MAX) + 1;
-               fprintf(stderr, "%s: [%d] waiting %dus\n", __func__, i, r);
+               pr_debug("%s: [%d] waiting %dus\n", __func__, i, r);
                usleep(r);
                r = waitpid(pid, &s, WNOHANG);
                TEST_ASSERT(r != pid,
                            "%s: [%d] child exited unexpectedly status: [%d]",
                            __func__, i, s);
-               fprintf(stderr, "%s: [%d] killing child\n", __func__, i);
+               pr_debug("%s: [%d] killing child\n", __func__, i);
                kill(pid, SIGKILL);
        }
 
index ffbc4555c6e285eae2bb78f09726bde5c170098a..7f1d2765572c381812e7d91b388baeed48901e5c 100644 (file)
@@ -80,19 +80,24 @@ static inline void check_tsc_msr_rdtsc(void)
        GUEST_ASSERT(delta_ns * 100 < (t2 - t1) * 100);
 }
 
+static inline u64 get_tscpage_ts(struct ms_hyperv_tsc_page *tsc_page)
+{
+       return mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
+}
+
 static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page *tsc_page)
 {
        u64 r1, r2, t1, t2;
 
        /* Compare TSC page clocksource with HV_X64_MSR_TIME_REF_COUNT */
-       t1 = mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
+       t1 = get_tscpage_ts(tsc_page);
        r1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
 
        /* 10 ms tolerance */
        GUEST_ASSERT(r1 >= t1 && r1 - t1 < 100000);
        nop_loop();
 
-       t2 = mul_u64_u64_shr64(rdtsc(), tsc_page->tsc_scale) + tsc_page->tsc_offset;
+       t2 = get_tscpage_ts(tsc_page);
        r2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT);
        GUEST_ASSERT(r2 >= t1 && r2 - t2 < 100000);
 }
@@ -130,7 +135,11 @@ static void guest_main(struct ms_hyperv_tsc_page *tsc_page, vm_paddr_t tsc_page_
 
        tsc_offset = tsc_page->tsc_offset;
        /* Call KVM_SET_CLOCK from userspace, check that TSC page was updated */
+
        GUEST_SYNC(7);
+       /* Sanity check TSC page timestamp, it should be close to 0 */
+       GUEST_ASSERT(get_tscpage_ts(tsc_page) < 100000);
+
        GUEST_ASSERT(tsc_page->tsc_offset != tsc_offset);
 
        nop_loop();
index ce6bea9675c074587ca1ebc51a3402cdcf782ee0..0ccb1dda099ae9558d7cb8b2579092f1128f0a5d 100755 (executable)
@@ -658,7 +658,7 @@ test_ecn_decap()
        # In accordance with INET_ECN_decapsulate()
        __test_ecn_decap 00 00 0x00
        __test_ecn_decap 01 01 0x01
-       __test_ecn_decap 02 01 0x02
+       __test_ecn_decap 02 01 0x01
        __test_ecn_decap 01 03 0x03
        __test_ecn_decap 02 03 0x03
        test_ecn_decap_error
index 964db9ed544f9b0a9a3fbd99c5e2beeb88f2a7b2..ad32240fbfdad54361eafbc79f03480936b04241 100755 (executable)
@@ -11,6 +11,7 @@ ksft_skip=4
 timeout=30
 mptcp_connect=""
 capture=0
+do_all_tests=1
 
 TEST_COUNT=0
 
@@ -121,12 +122,6 @@ reset_with_add_addr_timeout()
                -j DROP
 }
 
-for arg in "$@"; do
-       if [ "$arg" = "-c" ]; then
-               capture=1
-       fi
-done
-
 ip -Version > /dev/null 2>&1
 if [ $? -ne 0 ];then
        echo "SKIP: Could not run test without ip tool"
@@ -1221,7 +1216,8 @@ usage()
        echo "  -4 v4mapped_tests"
        echo "  -b backup_tests"
        echo "  -p add_addr_ports_tests"
-       echo "  -c syncookies_tests"
+       echo "  -k syncookies_tests"
+       echo "  -c capture pcap files"
        echo "  -h help"
 }
 
@@ -1235,12 +1231,24 @@ make_file "$cin" "client" 1
 make_file "$sin" "server" 1
 trap cleanup EXIT
 
-if [ -z $1 ]; then
+for arg in "$@"; do
+       # check for "capture" arg before launching tests
+       if [[ "${arg}" =~ ^"-"[0-9a-zA-Z]*"c"[0-9a-zA-Z]*$ ]]; then
+               capture=1
+       fi
+
+       # exception for the capture option, the rest means: a part of the tests
+       if [ "${arg}" != "-c" ]; then
+               do_all_tests=0
+       fi
+done
+
+if [ $do_all_tests -eq 1 ]; then
        all_tests
        exit $ret
 fi
 
-while getopts 'fsltra64bpch' opt; do
+while getopts 'fsltra64bpkch' opt; do
        case $opt in
                f)
                        subflows_tests
@@ -1272,9 +1280,11 @@ while getopts 'fsltra64bpch' opt; do
                p)
                        add_addr_ports_tests
                        ;;
-               c)
+               k)
                        syncookies_tests
                        ;;
+               c)
+                       ;;
                h | *)
                        usage
                        ;;
index 7b01b7c2ec104aaf1f5e15419904516070b8ea3c..066efd30e29460f81956ad9023f21200221e86cb 100644 (file)
@@ -30,25 +30,25 @@ struct reuse_opts {
 };
 
 struct reuse_opts unreusable_opts[12] = {
-       {0, 0, 0, 0},
-       {0, 0, 0, 1},
-       {0, 0, 1, 0},
-       {0, 0, 1, 1},
-       {0, 1, 0, 0},
-       {0, 1, 0, 1},
-       {0, 1, 1, 0},
-       {0, 1, 1, 1},
-       {1, 0, 0, 0},
-       {1, 0, 0, 1},
-       {1, 0, 1, 0},
-       {1, 0, 1, 1},
+       {{0, 0}, {0, 0}},
+       {{0, 0}, {0, 1}},
+       {{0, 0}, {1, 0}},
+       {{0, 0}, {1, 1}},
+       {{0, 1}, {0, 0}},
+       {{0, 1}, {0, 1}},
+       {{0, 1}, {1, 0}},
+       {{0, 1}, {1, 1}},
+       {{1, 0}, {0, 0}},
+       {{1, 0}, {0, 1}},
+       {{1, 0}, {1, 0}},
+       {{1, 0}, {1, 1}},
 };
 
 struct reuse_opts reusable_opts[4] = {
-       {1, 1, 0, 0},
-       {1, 1, 0, 1},
-       {1, 1, 1, 0},
-       {1, 1, 1, 1},
+       {{1, 1}, {0, 0}},
+       {{1, 1}, {0, 1}},
+       {{1, 1}, {1, 0}},
+       {{1, 1}, {1, 1}},
 };
 
 int bind_port(struct __test_metadata *_metadata, int reuseaddr, int reuseport)
index d42115e4284d75fea30850c19b3c172ff52f4a49..8b0cd421ebd38b570266f53c297c74d8aafc38aa 100644 (file)
@@ -101,7 +101,7 @@ endef
 ifeq ($(CAN_BUILD_I386),1)
 $(BINARIES_32): CFLAGS += -m32
 $(BINARIES_32): LDLIBS += -lrt -ldl -lm
-$(BINARIES_32): %_32: %.c
+$(BINARIES_32): $(OUTPUT)/%_32: %.c
        $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
 $(foreach t,$(TARGETS),$(eval $(call gen-target-rule-32,$(t))))
 endif
@@ -109,7 +109,7 @@ endif
 ifeq ($(CAN_BUILD_X86_64),1)
 $(BINARIES_64): CFLAGS += -m64
 $(BINARIES_64): LDLIBS += -lrt -ldl
-$(BINARIES_64): %_64: %.c
+$(BINARIES_64): $(OUTPUT)/%_64: %.c
        $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(notdir $^) $(LDLIBS) -o $@
 $(foreach t,$(TARGETS),$(eval $(call gen-target-rule-64,$(t))))
 endif