Merge tag 'vfs/v6.4-rc3/misc.fixes' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 25 May 2023 18:03:58 +0000 (11:03 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 25 May 2023 18:03:58 +0000 (11:03 -0700)
Pull vfs fixes from Christian Brauner:

 - During the acl rework we merged this cycle the generic_listxattr()
   helper had to be modified in a way that in principle it would allow
   for POSIX ACLs to be reported. At least that was the impression we
   had initially. Because before the acl rework POSIX ACLs would be
   reported if the filesystem did have POSIX ACL xattr handlers in
   sb->s_xattr. That logic changed and now we can simply check whether
   the superblock has SB_POSIXACL set and if the inode has
   inode->i_{default_}acl set report the appropriate POSIX ACL name.

   However, we didn't realize that generic_listxattr() was only ever
   used by two filesystems. Both of them don't support POSIX ACLs via
   sb->s_xattr handlers and so never reported POSIX ACLs via
   generic_listxattr() even if they raised SB_POSIXACL and did contain
   inodes which had acls set. The example here is nfs4.

   As a result, generic_listxattr() suddenly started reporting POSIX
   ACLs when it wouldn't have before. Since SB_POSIXACL implies that the
   umask isn't stripped in the VFS nfs4 can't just drop SB_POSIXACL from
   the superblock as it would also alter umask handling for them.

   So just have generic_listxattr() not report POSIX ACLs as it never
   did anyway. It's documented as such.

 - Our SB_* flags currently use a signed integer and we shift the last
   bit causing UBSAN to complain about undefined behavior. Switch to
   using unsigned. While the original patch used an explicit unsigned
   bitshift it's now pretty common to rely on the BIT() macro in a lot
   of headers nowadays. So the patch has been adjusted to use that.

 - Add Namjae as ntfs reviewer. They're already active this cycle so
   let's make it explicit right now.

* tag 'vfs/v6.4-rc3/misc.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
  ntfs: Add myself as a reviewer
  fs: don't call posix_acl_listxattr in generic_listxattr
  fs: fix undefined behavior in bit shift for SB_NOUSER

591 files changed:
.mailmap
CREDITS
Documentation/admin-guide/quickly-build-trimmed-linux.rst
Documentation/cdrom/index.rst
Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
Documentation/devicetree/bindings/net/can/st,stm32-bxcan.yaml
Documentation/devicetree/bindings/sound/tas2562.yaml
Documentation/devicetree/bindings/sound/tas2770.yaml
Documentation/devicetree/bindings/sound/tas27xx.yaml
Documentation/devicetree/bindings/sound/tlv320aic32x4.txt
Documentation/filesystems/ramfs-rootfs-initramfs.rst
Documentation/filesystems/sharedsubtree.rst
Documentation/fpga/index.rst
Documentation/locking/index.rst
Documentation/netlink/specs/handshake.yaml
Documentation/networking/tls-handshake.rst
Documentation/pcmcia/index.rst
Documentation/process/maintainer-netdev.rst
Documentation/s390/vfio-ap.rst
Documentation/staging/crc32.rst
Documentation/timers/index.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/stm32f429.dtsi
arch/arm/boot/dts/stm32f7-pinctrl.dtsi
arch/arm/include/asm/arm_pmuv3.h
arch/arm64/include/asm/arm_pmuv3.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/kernel/mte.c
arch/arm64/kernel/vdso.c
arch/arm64/kvm/fpsimd.c
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/kvm/inject_fault.c
arch/arm64/kvm/vgic/vgic-v3.c
arch/arm64/kvm/vmid.c
arch/arm64/mm/copypage.c
arch/arm64/mm/fault.c
arch/m68k/kernel/signal.c
arch/powerpc/boot/Makefile
arch/powerpc/crypto/Kconfig
arch/powerpc/include/asm/iommu.h
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/isa-bridge.c
arch/powerpc/mm/book3s64/radix_pgtable.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/pseries/iommu.c
arch/riscv/kernel/probes/Makefile
arch/s390/Kconfig
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/crypto/chacha-glue.c
arch/s390/include/asm/compat.h
arch/s390/include/uapi/asm/statfs.h
arch/s390/kernel/Makefile
arch/s390/kernel/ipl.c
arch/s390/kernel/topology.c
arch/um/drivers/Makefile
arch/um/drivers/harddog.h [new file with mode: 0644]
arch/um/drivers/harddog_kern.c
arch/um/drivers/harddog_user.c
arch/um/drivers/harddog_user_exp.c [new file with mode: 0644]
arch/x86/include/asm/vmx.h
arch/x86/kernel/Makefile
arch/x86/kvm/cpuid.c
arch/x86/kvm/vmx/sgx.c
arch/x86/kvm/x86.c
arch/x86/mm/init.c
arch/xtensa/kernel/signal.c
arch/xtensa/kernel/xtensa_ksyms.c
arch/xtensa/lib/Makefile
arch/xtensa/lib/bswapdi2.S [new file with mode: 0644]
arch/xtensa/lib/bswapsi2.S [new file with mode: 0644]
block/fops.c
drivers/acpi/resource.c
drivers/base/class.c
drivers/block/ublk_drv.c
drivers/bluetooth/btnxpuart.c
drivers/char/tpm/tpm-chip.c
drivers/char/tpm/tpm-interface.c
drivers/char/tpm/tpm_tis.c
drivers/char/tpm/tpm_tis_core.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/pcc-cpufreq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
drivers/gpu/drm/amd/display/dc/link/link_validation.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/exynos/exynos_drm_g2d.h
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
drivers/gpu/drm/msm/dp/dp_audio.c
drivers/gpu/drm/msm/dp/dp_audio.h
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/iommu/Kconfig
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
drivers/media/dvb-core/dvb_ca_en50221.c
drivers/media/dvb-core/dvb_demux.c
drivers/media/dvb-core/dvb_frontend.c
drivers/media/dvb-core/dvb_net.c
drivers/media/dvb-core/dvbdev.c
drivers/media/dvb-frontends/mn88443x.c
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
drivers/media/usb/dvb-usb-v2/ce6230.c
drivers/media/usb/dvb-usb-v2/ec168.c
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/media/usb/dvb-usb/az6027.c
drivers/media/usb/dvb-usb/digitv.c
drivers/media/usb/dvb-usb/dw2102.c
drivers/media/usb/pvrusb2/Kconfig
drivers/media/usb/ttusb-dec/ttusb_dec.c
drivers/mmc/core/block.c
drivers/mmc/host/sdhci-cadence.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/net/bonding/bond_main.c
drivers/net/can/Kconfig
drivers/net/can/bxcan.c
drivers/net/can/dev/skb.c
drivers/net/can/kvaser_pciefd.c
drivers/net/dsa/mv88e6xxx/port.h
drivers/net/dsa/rzn1_a5psw.c
drivers/net/dsa/rzn1_a5psw.h
drivers/net/ethernet/3com/3c515.c
drivers/net/ethernet/3com/3c589_cs.c
drivers/net/ethernet/8390/ne.c
drivers/net/ethernet/8390/smc-ultra.c
drivers/net/ethernet/8390/wd.c
drivers/net/ethernet/amd/lance.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/cirrus/cs89x0.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice_dcb_lib.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_sriov.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.h
drivers/net/ethernet/intel/ice/ice_vf_lib.c
drivers/net/ethernet/intel/ice/ice_vf_lib.h
drivers/net/ethernet/intel/ice/ice_virtchnl.c
drivers/net/ethernet/intel/igb/e1000_mac.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
drivers/net/ethernet/netronome/nfp/nic/main.h
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/sfc/ef100_netdev.c
drivers/net/ethernet/sfc/efx_devlink.c
drivers/net/ethernet/sun/cassini.c
drivers/net/mdio/mdio-i2c.c
drivers/net/pcs/pcs-xpcs.c
drivers/net/phy/dp83867.c
drivers/net/phy/mscc/mscc.h
drivers/net/phy/mscc/mscc_main.c
drivers/net/phy/phylink.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/cdc_ncm.c
drivers/net/virtio_net.c
drivers/net/wireless/broadcom/b43/b43.h
drivers/net/wireless/broadcom/b43legacy/b43legacy.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/link.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
drivers/net/wireless/realtek/rtw88/mac80211.c
drivers/net/wireless/realtek/rtw88/main.c
drivers/net/wireless/realtek/rtw88/main.h
drivers/net/wireless/realtek/rtw88/sdio.c
drivers/net/wireless/realtek/rtw88/usb.h
drivers/net/wireless/realtek/rtw89/mac.c
drivers/net/wireless/realtek/rtw89/mac.h
drivers/net/wireless/realtek/rtw89/rtw8852b.c
drivers/net/wireless/virtual/mac80211_hwsim.c
drivers/net/wwan/iosm/iosm_ipc_imem.c
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
drivers/net/wwan/t7xx/t7xx_pci.c
drivers/net/wwan/t7xx/t7xx_pci.h
drivers/nvme/host/core.c
drivers/nvme/host/hwmon.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/platform/mellanox/mlxbf-pmc.c
drivers/platform/x86/amd/pmf/core.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/intel/ifs/load.c
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
drivers/power/supply/ab8500_btemp.c
drivers/power/supply/ab8500_fg.c
drivers/power/supply/axp288_fuel_gauge.c
drivers/power/supply/bq24190_charger.c
drivers/power/supply/bq25890_charger.c
drivers/power/supply/bq27xxx_battery.c
drivers/power/supply/bq27xxx_battery_i2c.c
drivers/power/supply/mt6360_charger.c
drivers/power/supply/power_supply_core.c
drivers/power/supply/power_supply_leds.c
drivers/power/supply/power_supply_sysfs.c
drivers/power/supply/rt9467-charger.c
drivers/power/supply/sbs-charger.c
drivers/power/supply/sc27xx_fuel_gauge.c
drivers/regulator/core.c
drivers/regulator/mt6359-regulator.c
drivers/regulator/pca9450-regulator.c
drivers/s390/block/dasd_eckd.c
drivers/s390/cio/device.c
drivers/s390/cio/qdio.h
drivers/s390/crypto/pkey_api.c
drivers/scsi/scsi_lib.c
drivers/scsi/storvsc_drv.c
drivers/spi/spi-cadence.c
drivers/spi/spi-dw-mmio.c
drivers/spi/spi-geni-qcom.c
drivers/thunderbolt/nhi.c
drivers/thunderbolt/nhi_regs.h
drivers/tty/serial/8250/8250_bcm7271.c
drivers/tty/serial/8250/8250_exar.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/arc_uart.c
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/vt/vc_screen.c
drivers/ufs/core/ufs-mcq.c
drivers/ufs/core/ufshcd.c
drivers/usb/class/usbtmc.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/debugfs.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/udc/core.c
drivers/usb/host/uhci-pci.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.h
drivers/usb/storage/scsiglue.c
drivers/usb/typec/altmodes/displayport.c
drivers/usb/typec/tipd/core.c
drivers/video/fbdev/Kconfig
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/core/fbmem.c
drivers/video/fbdev/i810/i810_dvt.c
drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
drivers/video/fbdev/stifb.c
drivers/video/fbdev/udlfb.c
fs/ceph/mds_client.c
fs/ceph/snap.c
fs/cifs/cifsglob.h
fs/cifs/file.c
fs/cifs/smb1ops.c
fs/cifs/smb2ops.c
fs/erofs/Kconfig
fs/erofs/Makefile
fs/erofs/internal.h
fs/erofs/xattr.c
fs/erofs/zdata.c
fs/ksmbd/connection.c
fs/ksmbd/oplock.c
fs/ksmbd/oplock.h
fs/ksmbd/smb2misc.c
fs/ksmbd/smb2pdu.c
fs/lockd/svc.c
fs/nfs/dir.c
fs/nfs/nfs4proc.c
fs/nfsd/nfsctl.c
fs/nfsd/trace.h
fs/nilfs2/inode.c
fs/statfs.c
include/linux/blkdev.h
include/linux/compiler.h
include/linux/device/class.h
include/linux/if_team.h
include/linux/mlx5/mlx5_ifc.h
include/linux/phy.h
include/linux/power/bq27xxx_battery.h
include/linux/shrinker.h
include/linux/skbuff.h
include/linux/skmsg.h
include/linux/sunrpc/svc_rdma.h
include/linux/sunrpc/svc_xprt.h
include/linux/tpm.h
include/linux/usb/composite.h
include/media/dvb_frontend.h
include/media/dvb_net.h
include/media/dvbdev.h
include/net/bluetooth/hci_core.h
include/net/bonding.h
include/net/handshake.h
include/net/ip.h
include/net/nexthop.h
include/net/page_pool.h
include/net/tcp.h
include/net/tls.h
include/sound/hda-mlink.h
include/sound/soc-acpi.h
include/sound/soc-dpcm.h
include/uapi/linux/handshake.h
include/uapi/linux/in.h
include/uapi/sound/skl-tplg-interface.h
include/uapi/sound/sof/tokens.h
include/ufs/ufshcd.h
kernel/bpf/hashtab.c
kernel/bpf/offload.c
kernel/bpf/verifier.c
kernel/module/stats.c
kernel/trace/fprobe.c
kernel/trace/rethook.c
lib/maple_tree.c
mm/kfence/kfence.h
mm/shrinker_debug.c
mm/vmscan.c
mm/zsmalloc.c
mm/zswap.c
net/8021q/vlan_dev.c
net/atm/resources.c
net/bluetooth/hci_conn.c
net/bridge/br_private_tunnel.h
net/can/isotp.c
net/can/j1939/socket.c
net/core/page_pool.c
net/core/skbuff.c
net/core/skmsg.c
net/core/sock_map.c
net/devlink/core.c
net/devlink/devl_internal.h
net/devlink/leftover.c
net/handshake/handshake-test.c
net/handshake/handshake.h
net/handshake/netlink.c
net/handshake/request.c
net/handshake/tlshd.c
net/ipv4/ip_sockglue.c
net/ipv4/raw.c
net/ipv4/tcp.c
net/ipv4/tcp_bpf.c
net/ipv4/tcp_ipv4.c
net/ipv4/udp.c
net/ipv4/udplite.c
net/ipv6/exthdrs_core.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/raw.c
net/ipv6/udplite.c
net/key/af_key.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/ieee80211_i.h
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_set_rbtree.c
net/nsh/nsh.c
net/sctp/transport.c
net/smc/af_smc.c
net/smc/smc_core.c
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/sunrpc/sched.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcsock.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/link.c
net/tipc/udp_media.c
net/tls/tls.h
net/tls/tls_device.c
net/tls/tls_strp.c
net/tls/tls_sw.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/scan.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_interface_core.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
samples/bpf/hbm.c
sound/core/oss/pcm_plugin.h
sound/firewire/digi00x/digi00x-stream.c
sound/hda/hdac_device.c
sound/pci/cs46xx/cs46xx_lib.c
sound/pci/hda/hda_generic.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/codecs/cs35l41-lib.c
sound/soc/codecs/cs35l56.c
sound/soc/codecs/lpass-tx-macro.c
sound/soc/codecs/rt5682-i2c.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/rt5682.h
sound/soc/codecs/ssm2602.c
sound/soc/dwc/dwc-i2s.c
sound/soc/fsl/fsl_micfil.c
sound/soc/intel/avs/apl.c
sound/soc/intel/avs/avs.h
sound/soc/intel/avs/board_selection.c
sound/soc/intel/avs/control.c
sound/soc/intel/avs/dsp.c
sound/soc/intel/avs/messages.h
sound/soc/intel/avs/path.h
sound/soc/intel/avs/pcm.c
sound/soc/intel/avs/probes.c
sound/soc/jz4740/jz4740-i2s.c
sound/soc/mediatek/mt8186/mt8186-afe-clk.c
sound/soc/mediatek/mt8186/mt8186-afe-clk.h
sound/soc/mediatek/mt8186/mt8186-afe-pcm.c
sound/soc/mediatek/mt8186/mt8186-audsys-clk.c
sound/soc/mediatek/mt8186/mt8186-audsys-clk.h
sound/soc/soc-pcm.c
sound/soc/sof/amd/acp-ipc.c
sound/soc/sof/debug.c
sound/soc/sof/intel/hda-mlink.c
sound/soc/sof/ipc3-topology.c
sound/soc/sof/ipc4-topology.c
sound/soc/sof/pcm.c
sound/soc/sof/pm.c
sound/soc/sof/sof-client-probes.c
sound/soc/sof/topology.c
sound/usb/format.c
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/disabled-features.h
tools/arch/x86/include/asm/msr-index.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/prctl.h
tools/arch/x86/include/uapi/asm/unistd_32.h
tools/arch/x86/lib/memcpy_64.S
tools/arch/x86/lib/memset_64.S
tools/include/asm/alternative.h
tools/include/uapi/drm/drm.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/const.h
tools/include/uapi/linux/in.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/prctl.h
tools/include/uapi/sound/asound.h
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/arm/util/cs-etm.c
tools/perf/arch/arm64/util/header.c
tools/perf/arch/arm64/util/pmu.c
tools/perf/arch/s390/entry/syscalls/syscall.tbl
tools/perf/bench/mem-memcpy-x86-64-asm-def.h
tools/perf/bench/mem-memcpy-x86-64-asm.S
tools/perf/bench/mem-memset-x86-64-asm-def.h
tools/perf/bench/mem-memset-x86-64-asm.S
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
tools/perf/pmu-events/jevents.py
tools/perf/pmu-events/pmu-events.h
tools/perf/tests/attr.py
tools/perf/tests/attr/base-stat
tools/perf/tests/attr/test-stat-default
tools/perf/tests/attr/test-stat-detailed-1
tools/perf/tests/attr/test-stat-detailed-2
tools/perf/tests/attr/test-stat-detailed-3
tools/perf/tests/expr.c
tools/perf/tests/parse-metric.c
tools/perf/tests/shell/stat.sh
tools/perf/tests/shell/test_intel_pt.sh
tools/perf/tests/shell/test_java_symbol.sh
tools/perf/trace/beauty/arch_prctl.c
tools/perf/trace/beauty/x86_arch_prctl.sh
tools/perf/util/bpf_skel/lock_contention.bpf.c
tools/perf/util/bpf_skel/vmlinux.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/expr.y
tools/perf/util/metricgroup.c
tools/perf/util/parse-events.c
tools/perf/util/stat-display.c
tools/perf/util/stat-shadow.c
tools/power/cpupower/lib/powercap.c
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_sockmap_kern.h
tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c [new file with mode: 0644]
tools/testing/selftests/ftrace/Makefile
tools/testing/selftests/ftrace/ftracetest
tools/testing/selftests/ftrace/ftracetest-ktap [new file with mode: 0755]
tools/testing/selftests/net/fib_nexthops.sh
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh
tools/testing/selftests/sgx/Makefile
virt/kvm/kvm_main.c

index 71127b2..bf076bb 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -364,6 +364,11 @@ Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
 Nicolas Saenz Julienne <nsaenz@kernel.org> <nsaenzjulienne@suse.de>
 Nicolas Saenz Julienne <nsaenz@kernel.org> <nsaenzjulienne@suse.com>
 Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+Nikolay Aleksandrov <razor@blackwall.org> <naleksan@redhat.com>
+Nikolay Aleksandrov <razor@blackwall.org> <nikolay@redhat.com>
+Nikolay Aleksandrov <razor@blackwall.org> <nikolay@cumulusnetworks.com>
+Nikolay Aleksandrov <razor@blackwall.org> <nikolay@nvidia.com>
+Nikolay Aleksandrov <razor@blackwall.org> <nikolay@isovalent.com>
 Oleksandr Natalenko <oleksandr@natalenko.name> <oleksandr@redhat.com>
 Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
 Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
diff --git a/CREDITS b/CREDITS
index 2d9da9a..de7e4db 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -1706,6 +1706,10 @@ S: Panoramastrasse 18
 S: D-69126 Heidelberg
 S: Germany
 
+N: Neil Horman
+M: nhorman@tuxdriver.com
+D: SCTP protocol maintainer.
+
 N: Simon Horman
 M: horms@verge.net.au
 D: Renesas ARM/ARM64 SoC maintainer
index ff4f4cc..f08149b 100644 (file)
@@ -215,12 +215,14 @@ again.
    reduce the compile time enormously, especially if you are running an
    universal kernel from a commodity Linux distribution.
 
-   There is a catch: the make target 'localmodconfig' will disable kernel
-   features you have not directly or indirectly through some program utilized
-   since you booted the system. You can reduce or nearly eliminate that risk by
-   using tricks outlined in the reference section; for quick testing purposes
-   that risk is often negligible, but it is an aspect you want to keep in mind
-   in case your kernel behaves oddly.
+   There is a catch: 'localmodconfig' is likely to disable kernel features you
+   did not use since you booted your Linux -- like drivers for currently
+   disconnected peripherals or a virtualization software not haven't used yet.
+   You can reduce or nearly eliminate that risk with tricks the reference
+   section outlines; but when building a kernel just for quick testing purposes
+   it is often negligible if such features are missing. But you should keep that
+   aspect in mind when using a kernel built with this make target, as it might
+   be the reason why something you only use occasionally stopped working.
 
    [:ref:`details<configuration>`]
 
@@ -271,6 +273,9 @@ again.
    does nothing at all; in that case you have to manually install your kernel,
    as outlined in the reference section.
 
+   If you are running a immutable Linux distribution, check its documentation
+   and the web to find out how to install your own kernel there.
+
    [:ref:`details<install>`]
 
 .. _another_sbs:
@@ -291,29 +296,29 @@ again.
    version you care about, as git otherwise might retrieve the entire commit
    history::
 
-     git fetch --shallow-exclude=v6.1 origin
-
-   If you modified the sources (for example by applying a patch), you now need
-   to discard those modifications; that's because git otherwise will not be able
-   to switch to the sources of another version due to potential conflicting
-   changes::
-
-     git reset --hard
+     git fetch --shallow-exclude=v6.0 origin
 
-   Now checkout the version you are interested in, as explained above::
+   Now switch to the version you are interested in -- but be aware the command
+   used here will discard any modifications you performed, as they would
+   conflict with the sources you want to checkout::
 
-     git checkout --detach origin/master
+     git checkout --force --detach origin/master
 
    At this point you might want to patch the sources again or set/modify a build
-   tag, as explained earlier; afterwards adjust the build configuration to the
-   new codebase and build your next kernel::
+   tag, as explained earlier. Afterwards adjust the build configuration to the
+   new codebase using olddefconfig, which will now adjust the configuration file
+   you prepared earlier using localmodconfig  (~/linux/.config) for your next
+   kernel::
 
      # reminder: if you want to apply patches, do it at this point
      # reminder: you might want to update your build tag at this point
      make olddefconfig
+
+   Now build your kernel::
+
      make -j $(nproc --all)
 
-   Install the kernel as outlined above::
+   Afterwards install the kernel as outlined above::
 
      command -v installkernel && sudo make modules_install install
 
@@ -584,11 +589,11 @@ versions and individual commits at hand at any time::
     curl -L \
       https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/clone.bundle \
       -o linux-stable.git.bundle
-    git clone clone.bundle ~/linux/
+    git clone linux-stable.git.bundle ~/linux/
     rm linux-stable.git.bundle
     cd ~/linux/
-    git remote set-url origin
-    https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
+    git remote set-url origin \
+      https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
     git fetch origin
     git checkout --detach origin/master
 
index e87a878..e9b022d 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-=====
-cdrom
-=====
+======
+CD-ROM
+======
 
 .. toctree::
     :maxdepth: 1
index 9b31f86..71364c6 100644 (file)
@@ -32,7 +32,7 @@ properties:
     maxItems: 1
 
   iommus:
-    maxItems: 1
+    maxItems: 4
 
   power-domains:
     maxItems: 1
index e6c1ebf..130e16d 100644 (file)
@@ -82,6 +82,18 @@ properties:
       Indicates if the DSI controller is driving a panel which needs
       2 DSI links.
 
+  qcom,master-dsi:
+    type: boolean
+    description: |
+      Indicates if the DSI controller is the master DSI controller when
+      qcom,dual-dsi-mode enabled.
+
+  qcom,sync-dual-dsi:
+    type: boolean
+    description: |
+      Indicates if the DSI controller needs to sync the other DSI controller
+      with MIPI DCS commands when qcom,dual-dsi-mode enabled.
+
   assigned-clocks:
     minItems: 2
     maxItems: 4
index 769fa5c..de1d429 100644 (file)
@@ -21,11 +21,22 @@ properties:
 
   st,can-primary:
     description:
-      Primary and secondary mode of the bxCAN peripheral is only relevant
-      if the chip has two CAN peripherals. In that case they share some
-      of the required logic.
+      Primary mode of the bxCAN peripheral is only relevant if the chip has
+      two CAN peripherals in dual CAN configuration. In that case they share
+      some of the required logic.
+      Not to be used if the peripheral is in single CAN configuration.
       To avoid misunderstandings, it should be noted that ST documentation
-      uses the terms master/slave instead of primary/secondary.
+      uses the terms master instead of primary.
+    type: boolean
+
+  st,can-secondary:
+    description:
+      Secondary mode of the bxCAN peripheral is only relevant if the chip
+      has two CAN peripherals in dual CAN configuration. In that case they
+      share some of the required logic.
+      Not to be used if the peripheral is in single CAN configuration.
+      To avoid misunderstandings, it should be noted that ST documentation
+      uses the terms slave instead of secondary.
     type: boolean
 
   reg:
index a5bb561..31a3024 100644 (file)
@@ -55,7 +55,9 @@ properties:
     description: TDM TX current sense time slot.
 
   '#sound-dai-cells':
-    const: 1
+    # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward
+    # compatibility but is deprecated.
+    enum: [0, 1]
 
 required:
   - compatible
@@ -72,7 +74,7 @@ examples:
      codec: codec@4c {
        compatible = "ti,tas2562";
        reg = <0x4c>;
-       #sound-dai-cells = <1>;
+       #sound-dai-cells = <0>;
        interrupt-parent = <&gpio1>;
        interrupts = <14>;
        shutdown-gpios = <&gpio1 15 0>;
index 26088ad..8908bf1 100644 (file)
@@ -57,7 +57,9 @@ properties:
       - 1 # Falling edge
 
   '#sound-dai-cells':
-    const: 1
+    # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward
+    # compatibility but is deprecated.
+    enum: [0, 1]
 
 required:
   - compatible
@@ -74,7 +76,7 @@ examples:
      codec: codec@41 {
        compatible = "ti,tas2770";
        reg = <0x41>;
-       #sound-dai-cells = <1>;
+       #sound-dai-cells = <0>;
        interrupt-parent = <&gpio1>;
        interrupts = <14>;
        reset-gpio = <&gpio1 15 0>;
index 8cba013..a876545 100644 (file)
@@ -50,7 +50,9 @@ properties:
     description: TDM TX voltage sense time slot.
 
   '#sound-dai-cells':
-    const: 1
+    # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward
+    # compatibility but is deprecated.
+    enum: [0, 1]
 
 required:
   - compatible
@@ -67,7 +69,7 @@ examples:
      codec: codec@38 {
        compatible = "ti,tas2764";
        reg = <0x38>;
-       #sound-dai-cells = <1>;
+       #sound-dai-cells = <0>;
        interrupt-parent = <&gpio1>;
        interrupts = <14>;
        reset-gpios = <&gpio1 15 0>;
index f59125b..0b4e21b 100644 (file)
@@ -8,7 +8,7 @@ Required properties:
        "ti,tlv320aic32x6" TLV320AIC3206, TLV320AIC3256
        "ti,tas2505" TAS2505, TAS2521
  - reg: I2C slave address
- - supply-*: Required supply regulators are:
+ - *-supply: Required supply regulators are:
     "iov" - digital IO power supply
     "ldoin" - LDO power supply
     "dv" - Digital core power supply
index 1649606..447f767 100644 (file)
@@ -6,8 +6,7 @@ Ramfs, rootfs and initramfs
 
 October 17, 2005
 
-Rob Landley <rob@landley.net>
-=============================
+:Author: Rob Landley <rob@landley.net>
 
 What is ramfs?
 --------------
index d833953..1cf5648 100644 (file)
@@ -147,6 +147,7 @@ replicas continue to be exactly same.
 
 
 3) Setting mount states
+-----------------------
 
        The mount command (util-linux package) can be used to set mount
        states::
@@ -612,6 +613,7 @@ replicas continue to be exactly same.
 
 
 6) Quiz
+-------
 
        A. What is the result of the following command sequence?
 
@@ -673,6 +675,7 @@ replicas continue to be exactly same.
                /mnt/1/test be?
 
 7) FAQ
+------
 
        Q1. Why is bind mount needed? How is it different from symbolic links?
                symbolic links can get stale if the destination mount gets
@@ -841,6 +844,7 @@ replicas continue to be exactly same.
                             tmp  usr tmp usr tmp usr
 
 8) Implementation
+-----------------
 
 8A) Datastructure
 
index f80f956..43c9688 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0
 
 ====
-fpga
+FPGA
 ====
 
 .. toctree::
index 7003bd5..6a9ea96 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0
 
 =======
-locking
+Locking
 =======
 
 .. toctree::
index 614f1a5..6d89e30 100644 (file)
@@ -68,6 +68,9 @@ attribute-sets:
         type: nest
         nested-attributes: x509
         multi-attr: true
+      -
+        name: peername
+        type: string
   -
     name: done
     attributes:
@@ -105,6 +108,7 @@ operations:
             - auth-mode
             - peer-identity
             - certificate
+            - peername
     -
       name: done
       doc: Handler reports handshake completion
index a2817a8..6f5ea16 100644 (file)
@@ -53,6 +53,7 @@ fills in a structure that contains the parameters of the request:
         struct socket   *ta_sock;
         tls_done_func_t ta_done;
         void            *ta_data;
+        const char      *ta_peername;
         unsigned int    ta_timeout_ms;
         key_serial_t    ta_keyring;
         key_serial_t    ta_my_cert;
@@ -71,6 +72,10 @@ instantiated a struct file in sock->file.
 has completed. Further explanation of this function is in the "Handshake
 Completion" sesction below.
 
+The consumer can provide a NUL-terminated hostname in the @ta_peername
+field that is sent as part of ClientHello. If no peername is provided,
+the DNS hostname associated with the server's IP address is used instead.
+
 The consumer can fill in the @ta_timeout_ms field to force the servicing
 handshake agent to exit after a number of milliseconds. This enables the
 socket to be fully closed once both the kernel and the handshake agent
index 7ae1f62..8067236 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0
 
 ======
-pcmcia
+PCMCIA
 ======
 
 .. toctree::
index f73ac9e..83614ce 100644 (file)
@@ -127,13 +127,32 @@ the value of ``Message-ID`` to the URL above.
 Updating patch status
 ~~~~~~~~~~~~~~~~~~~~~
 
-It may be tempting to help the maintainers and update the state of your
-own patches when you post a new version or spot a bug. Please **do not**
-do that.
-Interfering with the patch status on patchwork will only cause confusion. Leave
-it to the maintainer to figure out what is the most recent and current
-version that should be applied. If there is any doubt, the maintainer
-will reply and ask what should be done.
+Contributors and reviewers do not have the permissions to update patch
+state directly in patchwork. Patchwork doesn't expose much information
+about the history of the state of patches, therefore having multiple
+people update the state leads to confusion.
+
+Instead of delegating patchwork permissions netdev uses a simple mail
+bot which looks for special commands/lines within the emails sent to
+the mailing list. For example to mark a series as Changes Requested
+one needs to send the following line anywhere in the email thread::
+
+  pw-bot: changes-requested
+
+As a result the bot will set the entire series to Changes Requested.
+This may be useful when author discovers a bug in their own series
+and wants to prevent it from getting applied.
+
+The use of the bot is entirely optional, if in doubt ignore its existence
+completely. Maintainers will classify and update the state of the patches
+themselves. No email should ever be sent to the list with the main purpose
+of communicating with the bot, the bot commands should be seen as metadata.
+
+The use of the bot is restricted to authors of the patches (the ``From:``
+header on patch submission and command must match!), maintainers themselves
+and a handful of senior reviewers. Bot records its activity here:
+
+  https://patchwork.hopto.org/pw-bot.html
 
 Review timelines
 ~~~~~~~~~~~~~~~~
index d46e98c..bb3f4c4 100644 (file)
@@ -551,7 +551,6 @@ These are the steps:
    * IOMMU_SUPPORT
    * S390
    * ZCRYPT
-   * S390_AP_IOMMU
    * VFIO
    * KVM
 
index 8a6860f..7542220 100644 (file)
@@ -1,5 +1,5 @@
 =================================
-brief tutorial on CRC computation
+Brief tutorial on CRC computation
 =================================
 
 A CRC is a long-division remainder.  You add the CRC to the message,
index df510ad..983f91f 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0
 
 ======
-timers
+Timers
 ======
 
 .. toctree::
index 83888aa..c812189 100644 (file)
@@ -1677,10 +1677,7 @@ F:       drivers/power/reset/arm-versatile-reboot.c
 F:     drivers/soc/versatile/
 
 ARM KOMEDA DRM-KMS DRIVER
-M:     James (Qian) Wang <james.qian.wang@arm.com>
 M:     Liviu Dudau <liviu.dudau@arm.com>
-M:     Mihail Atanassov <mihail.atanassov@arm.com>
-L:     Mali DP Maintainers <malidp@foss.arm.com>
 S:     Supported
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     Documentation/devicetree/bindings/display/arm,komeda.yaml
@@ -1701,8 +1698,6 @@ F:        include/uapi/drm/panfrost_drm.h
 
 ARM MALI-DP DRM DRIVER
 M:     Liviu Dudau <liviu.dudau@arm.com>
-M:     Brian Starkey <brian.starkey@arm.com>
-L:     Mali DP Maintainers <malidp@foss.arm.com>
 S:     Supported
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     Documentation/devicetree/bindings/display/arm,malidp.yaml
@@ -4914,7 +4909,6 @@ F:        drivers/media/cec/i2c/ch7322.c
 CIRRUS LOGIC AUDIO CODEC DRIVERS
 M:     James Schulman <james.schulman@cirrus.com>
 M:     David Rhodes <david.rhodes@cirrus.com>
-M:     Lucas Tanure <tanureal@opensource.cirrus.com>
 M:     Richard Fitzgerald <rf@opensource.cirrus.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 L:     patches@opensource.cirrus.com
@@ -6012,7 +6006,7 @@ W:        http://www.dialog-semiconductor.com/products
 F:     Documentation/devicetree/bindings/input/da90??-onkey.txt
 F:     Documentation/devicetree/bindings/input/dlg,da72??.txt
 F:     Documentation/devicetree/bindings/mfd/da90*.txt
-F:     Documentation/devicetree/bindings/mfd/da90*.yaml
+F:     Documentation/devicetree/bindings/mfd/dlg,da90*.yaml
 F:     Documentation/devicetree/bindings/regulator/da92*.txt
 F:     Documentation/devicetree/bindings/regulator/dlg,da9*.yaml
 F:     Documentation/devicetree/bindings/regulator/slg51000.txt
@@ -6211,6 +6205,7 @@ X:        Documentation/devicetree/
 X:     Documentation/driver-api/media/
 X:     Documentation/firmware-guide/acpi/
 X:     Documentation/i2c/
+X:     Documentation/netlink/
 X:     Documentation/power/
 X:     Documentation/spi/
 X:     Documentation/userspace-api/media/
@@ -8158,6 +8153,7 @@ F:        include/linux/spi/spi-fsl-dspi.h
 
 FREESCALE ENETC ETHERNET DRIVERS
 M:     Claudiu Manoil <claudiu.manoil@nxp.com>
+M:     Vladimir Oltean <vladimir.oltean@nxp.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/freescale/enetc/
@@ -14566,6 +14562,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 F:     Documentation/devicetree/bindings/net/
 F:     drivers/connector/
 F:     drivers/net/
+X:     drivers/net/wireless/
 F:     include/dt-bindings/net/
 F:     include/linux/etherdevice.h
 F:     include/linux/fcdevice.h
@@ -14615,6 +14612,7 @@ B:      mailto:netdev@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 F:     Documentation/core-api/netlink.rst
+F:     Documentation/netlink/
 F:     Documentation/networking/
 F:     Documentation/process/maintainer-netdev.rst
 F:     Documentation/userspace-api/netlink/
@@ -14629,6 +14627,7 @@ F:      include/uapi/linux/netdevice.h
 F:     lib/net_utils.c
 F:     lib/random32.c
 F:     net/
+X:     net/bluetooth/
 F:     tools/net/
 F:     tools/testing/selftests/net/
 
@@ -18576,10 +18575,9 @@ F:     Documentation/admin-guide/LSM/SafeSetID.rst
 F:     security/safesetid/
 
 SAMSUNG AUDIO (ASoC) DRIVERS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
-S:     Supported
+S:     Maintained
 B:     mailto:linux-samsung-soc@vger.kernel.org
 F:     Documentation/devicetree/bindings/sound/samsung*
 F:     sound/soc/samsung/
@@ -18707,7 +18705,6 @@ F:      include/dt-bindings/clock/samsung,*.h
 F:     include/linux/clk/samsung.h
 
 SAMSUNG SPI DRIVERS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Andi Shyti <andi.shyti@kernel.org>
 L:     linux-spi@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -18843,12 +18840,11 @@ F:    drivers/target/
 F:     include/target/
 
 SCTP PROTOCOL
-M:     Neil Horman <nhorman@tuxdriver.com>
 M:     Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
 M:     Xin Long <lucien.xin@gmail.com>
 L:     linux-sctp@vger.kernel.org
 S:     Maintained
-W:     http://lksctp.sourceforge.net
+W:     https://github.com/sctp/lksctp-tools/wiki
 F:     Documentation/networking/sctp.rst
 F:     include/linux/sctp.h
 F:     include/net/sctp/
index f836936..3cd9bc2 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 4
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
index c9e05e3..00bf53f 100644 (file)
                        interrupt-names = "tx", "rx0", "rx1", "sce";
                        resets = <&rcc STM32F4_APB1_RESET(CAN2)>;
                        clocks = <&rcc 0 STM32F4_APB1_CLOCK(CAN2)>;
+                       st,can-secondary;
                        st,gcan = <&gcan>;
                        status = "disabled";
                };
index c8e6c52..9f65403 100644 (file)
                                        slew-rate = <2>;
                                };
                        };
+
+                       can1_pins_a: can1-0 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('A', 12, AF9)>; /* CAN1_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('A', 11, AF9)>; /* CAN1_RX */
+                                       bias-pull-up;
+                               };
+                       };
+
+                       can1_pins_b: can1-1 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('B', 9, AF9)>; /* CAN1_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('B', 8, AF9)>; /* CAN1_RX */
+                                       bias-pull-up;
+                               };
+                       };
+
+                       can1_pins_c: can1-2 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('D', 1, AF9)>; /* CAN1_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('D', 0, AF9)>; /* CAN1_RX */
+                                       bias-pull-up;
+
+                               };
+                       };
+
+                       can1_pins_d: can1-3 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('H', 13, AF9)>; /* CAN1_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('H', 14, AF9)>; /* CAN1_RX */
+                                       bias-pull-up;
+
+                               };
+                       };
+
+                       can2_pins_a: can2-0 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('B', 6, AF9)>; /* CAN2_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('B', 5, AF9)>; /* CAN2_RX */
+                                       bias-pull-up;
+                               };
+                       };
+
+                       can2_pins_b: can2-1 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('B', 13, AF9)>; /* CAN2_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('B', 12, AF9)>; /* CAN2_RX */
+                                       bias-pull-up;
+                               };
+                       };
+
+                       can3_pins_a: can3-0 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('A', 15, AF11)>; /* CAN3_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('A', 8, AF11)>; /* CAN3_RX */
+                                       bias-pull-up;
+                               };
+                       };
+
+                       can3_pins_b: can3-1 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('B', 4, AF11)>;  /* CAN3_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('B', 3, AF11)>; /* CAN3_RX */
+                                       bias-pull-up;
+                               };
+                       };
                };
        };
 };
index 78d3d4b..f4db3e7 100644 (file)
@@ -92,7 +92,7 @@
 
 #define RETURN_READ_PMEVCNTRN(n) \
        return read_sysreg(PMEVCNTR##n)
-static unsigned long read_pmevcntrn(int n)
+static inline unsigned long read_pmevcntrn(int n)
 {
        PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
        return 0;
@@ -100,14 +100,14 @@ static unsigned long read_pmevcntrn(int n)
 
 #define WRITE_PMEVCNTRN(n) \
        write_sysreg(val, PMEVCNTR##n)
-static void write_pmevcntrn(int n, unsigned long val)
+static inline void write_pmevcntrn(int n, unsigned long val)
 {
        PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
 }
 
 #define WRITE_PMEVTYPERN(n) \
        write_sysreg(val, PMEVTYPER##n)
-static void write_pmevtypern(int n, unsigned long val)
+static inline void write_pmevtypern(int n, unsigned long val)
 {
        PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
 }
index d6b51de..18dc2fb 100644 (file)
@@ -13,7 +13,7 @@
 
 #define RETURN_READ_PMEVCNTRN(n) \
        return read_sysreg(pmevcntr##n##_el0)
-static unsigned long read_pmevcntrn(int n)
+static inline unsigned long read_pmevcntrn(int n)
 {
        PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
        return 0;
@@ -21,14 +21,14 @@ static unsigned long read_pmevcntrn(int n)
 
 #define WRITE_PMEVCNTRN(n) \
        write_sysreg(val, pmevcntr##n##_el0)
-static void write_pmevcntrn(int n, unsigned long val)
+static inline void write_pmevcntrn(int n, unsigned long val)
 {
        PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
 }
 
 #define WRITE_PMEVTYPERN(n) \
        write_sysreg(val, pmevtyper##n##_el0)
-static void write_pmevtypern(int n, unsigned long val)
+static inline void write_pmevtypern(int n, unsigned long val)
 {
        PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
 }
index 683ca3a..5f6f848 100644 (file)
 #define APPLE_CPU_PART_M1_FIRESTORM_MAX        0x029
 #define APPLE_CPU_PART_M2_BLIZZARD     0x032
 #define APPLE_CPU_PART_M2_AVALANCHE    0x033
+#define APPLE_CPU_PART_M2_BLIZZARD_PRO 0x034
+#define APPLE_CPU_PART_M2_AVALANCHE_PRO        0x035
+#define APPLE_CPU_PART_M2_BLIZZARD_MAX 0x038
+#define APPLE_CPU_PART_M2_AVALANCHE_MAX        0x039
 
 #define AMPERE_CPU_PART_AMPERE1                0xAC3
 
 #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
 #define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD)
 #define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE)
+#define MIDR_APPLE_M2_BLIZZARD_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_PRO)
+#define MIDR_APPLE_M2_AVALANCHE_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_PRO)
+#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
+#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
 #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
 
 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
index 4cd6762..dc3c072 100644 (file)
@@ -209,6 +209,7 @@ struct kvm_pgtable_visit_ctx {
        kvm_pte_t                               old;
        void                                    *arg;
        struct kvm_pgtable_mm_ops               *mm_ops;
+       u64                                     start;
        u64                                     addr;
        u64                                     end;
        u32                                     level;
index f5bcb0d..7e89968 100644 (file)
@@ -66,13 +66,10 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
                return;
 
        /* if PG_mte_tagged is set, tags have already been initialised */
-       for (i = 0; i < nr_pages; i++, page++) {
-               if (!page_mte_tagged(page)) {
+       for (i = 0; i < nr_pages; i++, page++)
+               if (!page_mte_tagged(page))
                        mte_sync_page_tags(page, old_pte, check_swap,
                                           pte_is_tagged);
-                       set_page_mte_tagged(page);
-               }
-       }
 
        /* ensure the tags are visible before the PTE is set */
        smp_wmb();
index 0119dc9..d9e1355 100644 (file)
@@ -288,7 +288,7 @@ static int aarch32_alloc_kuser_vdso_page(void)
 
        memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
               kuser_sz);
-       aarch32_vectors_page = virt_to_page(vdso_page);
+       aarch32_vectors_page = virt_to_page((void *)vdso_page);
        return 0;
 }
 
index 1279949..4c9dcd8 100644 (file)
@@ -81,26 +81,34 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
 
        fpsimd_kvm_prepare();
 
+       /*
+        * We will check TIF_FOREIGN_FPSTATE just before entering the
+        * guest in kvm_arch_vcpu_ctxflush_fp() and override this to
+        * FP_STATE_FREE if the flag set.
+        */
        vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
 
        vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
        if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
                vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
 
-       /*
-        * We don't currently support SME guests but if we leave
-        * things in streaming mode then when the guest starts running
-        * FPSIMD or SVE code it may generate SME traps so as a
-        * special case if we are in streaming mode we force the host
-        * state to be saved now and exit streaming mode so that we
-        * don't have to handle any SME traps for valid guest
-        * operations. Do this for ZA as well for now for simplicity.
-        */
        if (system_supports_sme()) {
                vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
                if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
                        vcpu_set_flag(vcpu, HOST_SME_ENABLED);
 
+               /*
+                * If PSTATE.SM is enabled then save any pending FP
+                * state and disable PSTATE.SM. If we leave PSTATE.SM
+                * enabled and the guest does not enable SME via
+                * CPACR_EL1.SMEN then operations that should be valid
+                * may generate SME traps from EL1 to EL1 which we
+                * can't intercept and which would confuse the guest.
+                *
+                * Do the same for PSTATE.ZA in the case where there
+                * is state in the registers which has not already
+                * been saved, this is very unlikely to happen.
+                */
                if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
                        vcpu->arch.fp_state = FP_STATE_FREE;
                        fpsimd_save_and_flush_cpu_state();
index c41166f..e78a08a 100644 (file)
@@ -177,9 +177,17 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
        sve_guest = vcpu_has_sve(vcpu);
        esr_ec = kvm_vcpu_trap_get_class(vcpu);
 
-       /* Don't handle SVE traps for non-SVE vcpus here: */
-       if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD)
+       /* Only handle traps the vCPU can support here: */
+       switch (esr_ec) {
+       case ESR_ELx_EC_FP_ASIMD:
+               break;
+       case ESR_ELx_EC_SVE:
+               if (!sve_guest)
+                       return false;
+               break;
+       default:
                return false;
+       }
 
        /* Valid trap.  Switch the context: */
 
index 3d61bd3..5282cb9 100644 (file)
@@ -58,8 +58,9 @@
 struct kvm_pgtable_walk_data {
        struct kvm_pgtable_walker       *walker;
 
+       const u64                       start;
        u64                             addr;
-       u64                             end;
+       const u64                       end;
 };
 
 static bool kvm_phys_is_valid(u64 phys)
@@ -201,6 +202,7 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
                .old    = READ_ONCE(*ptep),
                .arg    = data->walker->arg,
                .mm_ops = mm_ops,
+               .start  = data->start,
                .addr   = data->addr,
                .end    = data->end,
                .level  = level,
@@ -293,6 +295,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
                     struct kvm_pgtable_walker *walker)
 {
        struct kvm_pgtable_walk_data walk_data = {
+               .start  = ALIGN_DOWN(addr, PAGE_SIZE),
                .addr   = ALIGN_DOWN(addr, PAGE_SIZE),
                .end    = PAGE_ALIGN(walk_data.addr + size),
                .walker = walker,
@@ -349,7 +352,7 @@ int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
 }
 
 struct hyp_map_data {
-       u64                             phys;
+       const u64                       phys;
        kvm_pte_t                       attr;
 };
 
@@ -407,13 +410,12 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
 static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
                                    struct hyp_map_data *data)
 {
+       u64 phys = data->phys + (ctx->addr - ctx->start);
        kvm_pte_t new;
-       u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
 
        if (!kvm_block_mapping_supported(ctx, phys))
                return false;
 
-       data->phys += granule;
        new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
        if (ctx->old == new)
                return true;
@@ -576,7 +578,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
 }
 
 struct stage2_map_data {
-       u64                             phys;
+       const u64                       phys;
        kvm_pte_t                       attr;
        u8                              owner_id;
 
@@ -794,20 +796,43 @@ static bool stage2_pte_executable(kvm_pte_t pte)
        return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
 }
 
+static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
+                                      const struct stage2_map_data *data)
+{
+       u64 phys = data->phys;
+
+       /*
+        * Stage-2 walks to update ownership data are communicated to the map
+        * walker using an invalid PA. Avoid offsetting an already invalid PA,
+        * which could overflow and make the address valid again.
+        */
+       if (!kvm_phys_is_valid(phys))
+               return phys;
+
+       /*
+        * Otherwise, work out the correct PA based on how far the walk has
+        * gotten.
+        */
+       return phys + (ctx->addr - ctx->start);
+}
+
 static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
                                        struct stage2_map_data *data)
 {
+       u64 phys = stage2_map_walker_phys_addr(ctx, data);
+
        if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
                return false;
 
-       return kvm_block_mapping_supported(ctx, data->phys);
+       return kvm_block_mapping_supported(ctx, phys);
 }
 
 static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
                                      struct stage2_map_data *data)
 {
        kvm_pte_t new;
-       u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
+       u64 phys = stage2_map_walker_phys_addr(ctx, data);
+       u64 granule = kvm_granule_size(ctx->level);
        struct kvm_pgtable *pgt = data->mmu->pgt;
        struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
 
@@ -841,8 +866,6 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
 
        stage2_make_pte(ctx, new);
 
-       if (kvm_phys_is_valid(phys))
-               data->phys += granule;
        return 0;
 }
 
index 64c3aec..0bd93a5 100644 (file)
@@ -204,7 +204,7 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
         * Size Fault at level 0, as if exceeding PARange.
         *
         * Non-LPAE guests will only get the external abort, as there
-        * is no way to to describe the ASF.
+        * is no way to describe the ASF.
         */
        if (vcpu_el1_is_32bit(vcpu) &&
            !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
index 469d816..93a47a5 100644 (file)
@@ -616,6 +616,10 @@ static const struct midr_range broken_seis[] = {
        MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
        MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
        MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
        {},
 };
 
index 08978d0..7fe8ba1 100644 (file)
@@ -47,7 +47,7 @@ static void flush_context(void)
        int cpu;
        u64 vmid;
 
-       bitmap_clear(vmid_map, 0, NUM_USER_VMIDS);
+       bitmap_zero(vmid_map, NUM_USER_VMIDS);
 
        for_each_possible_cpu(cpu) {
                vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
@@ -182,8 +182,7 @@ int __init kvm_arm_vmid_alloc_init(void)
         */
        WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus());
        atomic64_set(&vmid_generation, VMID_FIRST_VERSION);
-       vmid_map = kcalloc(BITS_TO_LONGS(NUM_USER_VMIDS),
-                          sizeof(*vmid_map), GFP_KERNEL);
+       vmid_map = bitmap_zalloc(NUM_USER_VMIDS, GFP_KERNEL);
        if (!vmid_map)
                return -ENOMEM;
 
@@ -192,5 +191,5 @@ int __init kvm_arm_vmid_alloc_init(void)
 
 void __init kvm_arm_vmid_alloc_free(void)
 {
-       kfree(vmid_map);
+       bitmap_free(vmid_map);
 }
index 4aadcfb..a7bb200 100644 (file)
@@ -21,9 +21,10 @@ void copy_highpage(struct page *to, struct page *from)
 
        copy_page(kto, kfrom);
 
+       if (kasan_hw_tags_enabled())
+               page_kasan_tag_reset(to);
+
        if (system_supports_mte() && page_mte_tagged(from)) {
-               if (kasan_hw_tags_enabled())
-                       page_kasan_tag_reset(to);
                /* It's a new page, shouldn't have been tagged yet */
                WARN_ON_ONCE(!try_page_mte_tagging(to));
                mte_copy_page_tags(kto, kfrom);
index 9e0db5c..cb21ccd 100644 (file)
@@ -480,8 +480,8 @@ static void do_bad_area(unsigned long far, unsigned long esr,
        }
 }
 
-#define VM_FAULT_BADMAP                0x010000
-#define VM_FAULT_BADACCESS     0x020000
+#define VM_FAULT_BADMAP                ((__force vm_fault_t)0x010000)
+#define VM_FAULT_BADACCESS     ((__force vm_fault_t)0x020000)
 
 static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
                                  unsigned int mm_flags, unsigned long vm_flags,
index b9f6908..ba468b5 100644 (file)
@@ -858,11 +858,17 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
 }
 
 static inline void __user *
-get_sigframe(struct ksignal *ksig, size_t frame_size)
+get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size)
 {
        unsigned long usp = sigsp(rdusp(), ksig);
+       unsigned long gap = 0;
 
-       return (void __user *)((usp - frame_size) & -8UL);
+       if (CPU_IS_020_OR_030 && tregs->format == 0xb) {
+               /* USP is unreliable so use worst-case value */
+               gap = 256;
+       }
+
+       return (void __user *)((usp - gap - frame_size) & -8UL);
 }
 
 static int setup_frame(struct ksignal *ksig, sigset_t *set,
@@ -880,7 +886,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
                return -EFAULT;
        }
 
-       frame = get_sigframe(ksig, sizeof(*frame) + fsize);
+       frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize);
 
        if (fsize)
                err |= copy_to_user (frame + 1, regs + 1, fsize);
@@ -952,7 +958,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
                return -EFAULT;
        }
 
-       frame = get_sigframe(ksig, sizeof(*frame));
+       frame = get_sigframe(ksig, tregs, sizeof(*frame));
 
        if (fsize)
                err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
index 85cde5b..771b794 100644 (file)
@@ -34,8 +34,6 @@ endif
 
 BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
                 -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
-                $(call cc-option,-mno-prefixed) $(call cc-option,-mno-pcrel) \
-                $(call cc-option,-mno-mma) \
                 $(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \
                 -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
                 $(LINUXINCLUDE)
@@ -71,6 +69,10 @@ BOOTAFLAGS   := -D__ASSEMBLY__ $(BOOTCFLAGS) -nostdinc
 
 BOOTARFLAGS    := -crD
 
+BOOTCFLAGS     += $(call cc-option,-mno-prefixed) \
+                  $(call cc-option,-mno-pcrel) \
+                  $(call cc-option,-mno-mma)
+
 ifdef CONFIG_CC_IS_CLANG
 BOOTCFLAGS += $(CLANG_FLAGS)
 BOOTAFLAGS += $(CLANG_FLAGS)
index 7113f93..ad18725 100644 (file)
@@ -96,7 +96,7 @@ config CRYPTO_AES_PPC_SPE
 
 config CRYPTO_AES_GCM_P10
        tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
-       depends on PPC64 && CPU_LITTLE_ENDIAN
+       depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
        select CRYPTO_LIB_AES
        select CRYPTO_ALGAPI
        select CRYPTO_AEAD
index 678b5bd..34e14df 100644 (file)
@@ -205,7 +205,6 @@ extern void iommu_register_group(struct iommu_table_group *table_group,
                                 int pci_domain_number, unsigned long pe_num);
 extern int iommu_add_device(struct iommu_table_group *table_group,
                struct device *dev);
-extern void iommu_del_device(struct device *dev);
 extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
                unsigned long entry, unsigned long *hpa,
                enum dma_data_direction *direction);
@@ -229,10 +228,6 @@ static inline int iommu_add_device(struct iommu_table_group *table_group,
 {
        return 0;
 }
-
-static inline void iommu_del_device(struct device *dev)
-{
-}
 #endif /* !CONFIG_IOMMU_API */
 
 u64 dma_iommu_get_required_mask(struct device *dev);
index 038ce8d..8920862 100644 (file)
@@ -144,7 +144,7 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
 /* We support DMA to/from any memory page via the iommu */
 int dma_iommu_dma_supported(struct device *dev, u64 mask)
 {
-       struct iommu_table *tbl = get_iommu_table_base(dev);
+       struct iommu_table *tbl;
 
        if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
                /*
@@ -162,6 +162,8 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
                return 1;
        }
 
+       tbl = get_iommu_table_base(dev);
+
        if (!tbl) {
                dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask);
                return 0;
index 0089dd4..67f0b01 100644 (file)
@@ -518,7 +518,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
                /* Convert entry to a dma_addr_t */
                entry += tbl->it_offset;
                dma_addr = entry << tbl->it_page_shift;
-               dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
+               dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
 
                DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
                            npages, entry, dma_addr);
@@ -905,6 +905,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
        unsigned int order;
        unsigned int nio_pages, io_order;
        struct page *page;
+       int tcesize = (1 << tbl->it_page_shift);
 
        size = PAGE_ALIGN(size);
        order = get_order(size);
@@ -931,7 +932,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
        memset(ret, 0, size);
 
        /* Set up tces to cover the allocated range */
-       nio_pages = size >> tbl->it_page_shift;
+       nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
+
        io_order = get_iommu_order(size, tbl);
        mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
                              mask >> tbl->it_page_shift, io_order, 0);
@@ -939,7 +941,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
                free_pages((unsigned long)ret, order);
                return NULL;
        }
-       *dma_handle = mapping;
+
+       *dma_handle = mapping | ((u64)ret & (tcesize - 1));
        return ret;
 }
 
@@ -950,7 +953,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
                unsigned int nio_pages;
 
                size = PAGE_ALIGN(size);
-               nio_pages = size >> tbl->it_page_shift;
+               nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
                iommu_free(tbl, dma_handle, nio_pages);
                size = PAGE_ALIGN(size);
                free_pages((unsigned long)vaddr, get_order(size));
@@ -1168,23 +1171,6 @@ int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
 }
 EXPORT_SYMBOL_GPL(iommu_add_device);
 
-void iommu_del_device(struct device *dev)
-{
-       /*
-        * Some devices might not have IOMMU table and group
-        * and we needn't detach them from the associated
-        * IOMMU groups
-        */
-       if (!device_iommu_mapped(dev)) {
-               pr_debug("iommu_tce: skipping device %s with no tbl\n",
-                        dev_name(dev));
-               return;
-       }
-
-       iommu_group_remove_device(dev);
-}
-EXPORT_SYMBOL_GPL(iommu_del_device);
-
 /*
  * A simple iommu_table_group_ops which only allows reusing the existing
  * iommu_table. This handles VFIO for POWER7 or the nested KVM.
index 85bdd7d..48e0eaf 100644 (file)
@@ -93,11 +93,12 @@ static int process_ISA_OF_ranges(struct device_node *isa_node,
        }
 
 inval_range:
-       if (!phb_io_base_phys) {
+       if (phb_io_base_phys) {
                pr_err("no ISA IO ranges or unexpected isa range, mapping 64k\n");
                remap_isa_base(phb_io_base_phys, 0x10000);
+               return 0;
        }
-       return 0;
+       return -EINVAL;
 }
 
 
index 26245aa..2297aa7 100644 (file)
@@ -1040,8 +1040,8 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
                                  pte_t entry, unsigned long address, int psize)
 {
        struct mm_struct *mm = vma->vm_mm;
-       unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
-                                             _PAGE_RW | _PAGE_EXEC);
+       unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY |
+                                             _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
 
        unsigned long change = pte_val(entry) ^ pte_val(*ptep);
        /*
index e93aefc..37043df 100644 (file)
@@ -101,6 +101,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
                bpf_hdr = jit_data->header;
                proglen = jit_data->proglen;
                extra_pass = true;
+               /* During extra pass, ensure index is reset before repopulating extable entries */
+               cgctx.exentry_idx = 0;
                goto skip_init_ctx;
        }
 
index 0d9b760..3e2e252 100644 (file)
@@ -265,6 +265,7 @@ config CPM2
 config FSL_ULI1575
        bool "ULI1575 PCIe south bridge support"
        depends on FSL_SOC_BOOKE || PPC_86xx
+       depends on PCI
        select FSL_PCI
        select GENERIC_ISA_DMA
        help
index 233a50e..7725492 100644 (file)
@@ -865,28 +865,3 @@ void __init pnv_pci_init(void)
        /* Configure IOMMU DMA hooks */
        set_pci_dma_ops(&dma_iommu_ops);
 }
-
-static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
-               unsigned long action, void *data)
-{
-       struct device *dev = data;
-
-       switch (action) {
-       case BUS_NOTIFY_DEL_DEVICE:
-               iommu_del_device(dev);
-               return 0;
-       default:
-               return 0;
-       }
-}
-
-static struct notifier_block pnv_tce_iommu_bus_nb = {
-       .notifier_call = pnv_tce_iommu_bus_notifier,
-};
-
-static int __init pnv_tce_iommu_bus_notifier_init(void)
-{
-       bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb);
-       return 0;
-}
-machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init);
index 7464fa6..918f511 100644 (file)
@@ -91,19 +91,24 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node)
 static void iommu_pseries_free_group(struct iommu_table_group *table_group,
                const char *node_name)
 {
-       struct iommu_table *tbl;
-
        if (!table_group)
                return;
 
-       tbl = table_group->tables[0];
 #ifdef CONFIG_IOMMU_API
        if (table_group->group) {
                iommu_group_put(table_group->group);
                BUG_ON(table_group->group);
        }
 #endif
-       iommu_tce_table_put(tbl);
+
+       /* Default DMA window table is at index 0, while DDW at 1. SR-IOV
+        * adapters only have table on index 1.
+        */
+       if (table_group->tables[0])
+               iommu_tce_table_put(table_group->tables[0]);
+
+       if (table_group->tables[1])
+               iommu_tce_table_put(table_group->tables[1]);
 
        kfree(table_group);
 }
@@ -1695,31 +1700,6 @@ static int __init disable_multitce(char *str)
 
 __setup("multitce=", disable_multitce);
 
-static int tce_iommu_bus_notifier(struct notifier_block *nb,
-               unsigned long action, void *data)
-{
-       struct device *dev = data;
-
-       switch (action) {
-       case BUS_NOTIFY_DEL_DEVICE:
-               iommu_del_device(dev);
-               return 0;
-       default:
-               return 0;
-       }
-}
-
-static struct notifier_block tce_iommu_bus_nb = {
-       .notifier_call = tce_iommu_bus_notifier,
-};
-
-static int __init tce_iommu_bus_notifier_init(void)
-{
-       bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
-       return 0;
-}
-machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);
-
 #ifdef CONFIG_SPAPR_TCE_IOMMU
 struct iommu_group *pSeries_pci_device_group(struct pci_controller *hose,
                                             struct pci_dev *pdev)
index c40139e..8265ff4 100644 (file)
@@ -4,3 +4,5 @@ obj-$(CONFIG_RETHOOK)           += rethook.o rethook_trampoline.o
 obj-$(CONFIG_KPROBES_ON_FTRACE)        += ftrace.o
 obj-$(CONFIG_UPROBES)          += uprobes.o decode-insn.o simulate-insn.o
 CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_rethook.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_rethook_trampoline.o = $(CC_FLAGS_FTRACE)
index db20c15..6dab9c1 100644 (file)
@@ -469,19 +469,11 @@ config SCHED_SMT
 config SCHED_MC
        def_bool n
 
-config SCHED_BOOK
-       def_bool n
-
-config SCHED_DRAWER
-       def_bool n
-
 config SCHED_TOPOLOGY
        def_bool y
        prompt "Topology scheduler support"
        select SCHED_SMT
        select SCHED_MC
-       select SCHED_BOOK
-       select SCHED_DRAWER
        help
          Topology scheduler support improves the CPU scheduler's decision
          making when dealing with machines that have multi-threading,
@@ -716,7 +708,6 @@ config EADM_SCH
 config VFIO_CCW
        def_tristate n
        prompt "Support for VFIO-CCW subchannels"
-       depends on S390_CCW_IOMMU
        depends on VFIO
        select VFIO_MDEV
        help
@@ -728,7 +719,7 @@ config VFIO_CCW
 config VFIO_AP
        def_tristate n
        prompt "VFIO support for AP devices"
-       depends on S390_AP_IOMMU && KVM
+       depends on KVM
        depends on VFIO
        depends on ZCRYPT
        select VFIO_MDEV
index 4ccf66d..be3bf03 100644 (file)
@@ -591,8 +591,6 @@ CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
 CONFIG_VHOST_NET=m
 CONFIG_VHOST_VSOCK=m
-CONFIG_S390_CCW_IOMMU=y
-CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
@@ -703,6 +701,7 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y
 CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
 CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
+CONFIG_INIT_STACK_NONE=y
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
 CONFIG_CRYPTO_PCRYPT=m
index 693297a..769c7ee 100644 (file)
@@ -580,8 +580,6 @@ CONFIG_VIRTIO_BALLOON=m
 CONFIG_VIRTIO_INPUT=y
 CONFIG_VHOST_NET=m
 CONFIG_VHOST_VSOCK=m
-CONFIG_S390_CCW_IOMMU=y
-CONFIG_S390_AP_IOMMU=y
 CONFIG_EXT4_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
@@ -686,6 +684,7 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y
 CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
 CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
+CONFIG_INIT_STACK_NONE=y
 CONFIG_CRYPTO_FIPS=y
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
index 33a232b..6f68b39 100644 (file)
@@ -67,6 +67,7 @@ CONFIG_ZFCP=y
 # CONFIG_MISC_FILESYSTEMS is not set
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_LSM="yama,loadpin,safesetid,integrity"
+CONFIG_INIT_STACK_NONE=y
 # CONFIG_ZLIB_DFLTCC is not set
 CONFIG_XZ_DEC_MICROLZMA=y
 CONFIG_PRINTK_TIME=y
index 7752bd3..5fae187 100644 (file)
@@ -82,7 +82,7 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
         * it cannot handle a block of data or less, but otherwise
         * it can handle data of arbitrary size
         */
-       if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20)
+       if (bytes <= CHACHA_BLOCK_SIZE || nrounds != 20 || !MACHINE_HAS_VX)
                chacha_crypt_generic(state, dst, src, bytes, nrounds);
        else
                chacha20_crypt_s390(state, dst, src, bytes,
index a386070..3cb9d81 100644 (file)
@@ -112,7 +112,7 @@ struct compat_statfs64 {
        u32             f_namelen;
        u32             f_frsize;
        u32             f_flags;
-       u32             f_spare[4];
+       u32             f_spare[5];
 };
 
 /*
index 72604f7..f85b507 100644 (file)
@@ -30,7 +30,7 @@ struct statfs {
        unsigned int    f_namelen;
        unsigned int    f_frsize;
        unsigned int    f_flags;
-       unsigned int    f_spare[4];
+       unsigned int    f_spare[5];
 };
 
 struct statfs64 {
@@ -45,7 +45,7 @@ struct statfs64 {
        unsigned int    f_namelen;
        unsigned int    f_frsize;
        unsigned int    f_flags;
-       unsigned int    f_spare[4];
+       unsigned int    f_spare[5];
 };
 
 #endif
index 8983837..6b2a051 100644 (file)
@@ -10,6 +10,7 @@ CFLAGS_REMOVE_ftrace.o                = $(CC_FLAGS_FTRACE)
 
 # Do not trace early setup code
 CFLAGS_REMOVE_early.o          = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_rethook.o                = $(CC_FLAGS_FTRACE)
 
 endif
 
index 43de939..f44f70d 100644 (file)
@@ -1935,14 +1935,13 @@ static struct shutdown_action __refdata dump_action = {
 
 static void dump_reipl_run(struct shutdown_trigger *trigger)
 {
-       unsigned long ipib = (unsigned long) reipl_block_actual;
        struct lowcore *abs_lc;
        unsigned int csum;
 
        csum = (__force unsigned int)
               csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
        abs_lc = get_abs_lowcore();
-       abs_lc->ipib = ipib;
+       abs_lc->ipib = __pa(reipl_block_actual);
        abs_lc->ipib_checksum = csum;
        put_abs_lowcore(abs_lc);
        dump_run(trigger);
index 9fd1953..68adf1d 100644 (file)
@@ -95,7 +95,7 @@ out:
 static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
 {
        static cpumask_t mask;
-       int i;
+       unsigned int max_cpu;
 
        cpumask_clear(&mask);
        if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
@@ -104,9 +104,10 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
        if (topology_mode != TOPOLOGY_MODE_HW)
                goto out;
        cpu -= cpu % (smp_cpu_mtid + 1);
-       for (i = 0; i <= smp_cpu_mtid; i++) {
-               if (cpumask_test_cpu(cpu + i, &cpu_setup_mask))
-                       cpumask_set_cpu(cpu + i, &mask);
+       max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
+       for (; cpu <= max_cpu; cpu++) {
+               if (cpumask_test_cpu(cpu, &cpu_setup_mask))
+                       cpumask_set_cpu(cpu, &mask);
        }
 out:
        cpumask_copy(dst, &mask);
@@ -123,25 +124,26 @@ static void add_cpus_to_mask(struct topology_core *tl_core,
        unsigned int core;
 
        for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
-               unsigned int rcore;
-               int lcpu, i;
+               unsigned int max_cpu, rcore;
+               int cpu;
 
                rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
-               lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
-               if (lcpu < 0)
+               cpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
+               if (cpu < 0)
                        continue;
-               for (i = 0; i <= smp_cpu_mtid; i++) {
-                       topo = &cpu_topology[lcpu + i];
+               max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
+               for (; cpu <= max_cpu; cpu++) {
+                       topo = &cpu_topology[cpu];
                        topo->drawer_id = drawer->id;
                        topo->book_id = book->id;
                        topo->socket_id = socket->id;
                        topo->core_id = rcore;
-                       topo->thread_id = lcpu + i;
+                       topo->thread_id = cpu;
                        topo->dedicated = tl_core->d;
-                       cpumask_set_cpu(lcpu + i, &drawer->mask);
-                       cpumask_set_cpu(lcpu + i, &book->mask);
-                       cpumask_set_cpu(lcpu + i, &socket->mask);
-                       smp_cpu_set_polarization(lcpu + i, tl_core->pp);
+                       cpumask_set_cpu(cpu, &drawer->mask);
+                       cpumask_set_cpu(cpu, &book->mask);
+                       cpumask_set_cpu(cpu, &socket->mask);
+                       smp_cpu_set_polarization(cpu, tl_core->pp);
                }
        }
 }
index dee6f66..a461a95 100644 (file)
@@ -16,7 +16,8 @@ mconsole-objs := mconsole_kern.o mconsole_user.o
 hostaudio-objs := hostaudio_kern.o
 ubd-objs := ubd_kern.o ubd_user.o
 port-objs := port_kern.o port_user.o
-harddog-objs := harddog_kern.o harddog_user.o
+harddog-objs := harddog_kern.o
+harddog-builtin-$(CONFIG_UML_WATCHDOG) := harddog_user.o harddog_user_exp.o
 rtc-objs := rtc_kern.o rtc_user.o
 
 LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a)
@@ -60,6 +61,7 @@ obj-$(CONFIG_PTY_CHAN) += pty.o
 obj-$(CONFIG_TTY_CHAN) += tty.o 
 obj-$(CONFIG_XTERM_CHAN) += xterm.o xterm_kern.o
 obj-$(CONFIG_UML_WATCHDOG) += harddog.o
+obj-y += $(harddog-builtin-y) $(harddog-builtin-m)
 obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o
 obj-$(CONFIG_UML_RANDOM) += random.o
 obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o
diff --git a/arch/um/drivers/harddog.h b/arch/um/drivers/harddog.h
new file mode 100644 (file)
index 0000000..6d9ea60
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef UM_WATCHDOG_H
+#define UM_WATCHDOG_H
+
+int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock);
+void stop_watchdog(int in_fd, int out_fd);
+int ping_watchdog(int fd);
+
+#endif /* UM_WATCHDOG_H */
index e6d4f43..60d1c6c 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/spinlock.h>
 #include <linux/uaccess.h>
 #include "mconsole.h"
+#include "harddog.h"
 
 MODULE_LICENSE("GPL");
 
@@ -60,8 +61,6 @@ static int harddog_out_fd = -1;
  *     Allow only one person to hold it open
  */
 
-extern int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock);
-
 static int harddog_open(struct inode *inode, struct file *file)
 {
        int err = -EBUSY;
@@ -92,8 +91,6 @@ err:
        return err;
 }
 
-extern void stop_watchdog(int in_fd, int out_fd);
-
 static int harddog_release(struct inode *inode, struct file *file)
 {
        /*
@@ -112,8 +109,6 @@ static int harddog_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-extern int ping_watchdog(int fd);
-
 static ssize_t harddog_write(struct file *file, const char __user *data, size_t len,
                             loff_t *ppos)
 {
index 070468d..9ed8930 100644 (file)
@@ -7,6 +7,7 @@
 #include <unistd.h>
 #include <errno.h>
 #include <os.h>
+#include "harddog.h"
 
 struct dog_data {
        int stdin_fd;
diff --git a/arch/um/drivers/harddog_user_exp.c b/arch/um/drivers/harddog_user_exp.c
new file mode 100644 (file)
index 0000000..c74d4b8
--- /dev/null
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include "harddog.h"
+
+#if IS_MODULE(CONFIG_UML_WATCHDOG)
+EXPORT_SYMBOL(start_watchdog);
+EXPORT_SYMBOL(stop_watchdog);
+EXPORT_SYMBOL(ping_watchdog);
+#endif
index 498dc60..0d02c4a 100644 (file)
@@ -13,7 +13,9 @@
 
 
 #include <linux/bitops.h>
+#include <linux/bug.h>
 #include <linux/types.h>
+
 #include <uapi/asm/vmx.h>
 #include <asm/vmxfeatures.h>
 
index dd61752..4070a01 100644 (file)
@@ -17,6 +17,7 @@ CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
 CFLAGS_REMOVE_head64.o = -pg
 CFLAGS_REMOVE_sev.o = -pg
+CFLAGS_REMOVE_rethook.o = -pg
 endif
 
 KASAN_SANITIZE_head$(BITS).o                           := n
index 123bf8b..0c9660a 100644 (file)
@@ -253,7 +253,6 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
                                       int nent)
 {
        struct kvm_cpuid_entry2 *best;
-       u64 guest_supported_xcr0 = cpuid_get_supported_xcr0(entries, nent);
 
        best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
        if (best) {
@@ -292,21 +291,6 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
                                           vcpu->arch.ia32_misc_enable_msr &
                                           MSR_IA32_MISC_ENABLE_MWAIT);
        }
-
-       /*
-        * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
-        * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
-        * requested XCR0 value.  The enclave's XFRM must be a subset of XCRO
-        * at the time of EENTER, thus adjust the allowed XFRM by the guest's
-        * supported XCR0.  Similar to XCR0 handling, FP and SSE are forced to
-        * '1' even on CPUs that don't support XSAVE.
-        */
-       best = cpuid_entry2_find(entries, nent, 0x12, 0x1);
-       if (best) {
-               best->ecx &= guest_supported_xcr0 & 0xffffffff;
-               best->edx &= guest_supported_xcr0 >> 32;
-               best->ecx |= XFEATURE_MASK_FPSSE;
-       }
 }
 
 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
index 0574030..2261b68 100644 (file)
@@ -170,12 +170,19 @@ static int __handle_encls_ecreate(struct kvm_vcpu *vcpu,
                return 1;
        }
 
-       /* Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM. */
+       /*
+        * Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM.  Note
+        * that the allowed XFRM (XFeature Request Mask) isn't strictly bound
+        * by the supported XCR0.  FP+SSE *must* be set in XFRM, even if XSAVE
+        * is unsupported, i.e. even if XCR0 itself is completely unsupported.
+        */
        if ((u32)miscselect & ~sgx_12_0->ebx ||
            (u32)attributes & ~sgx_12_1->eax ||
            (u32)(attributes >> 32) & ~sgx_12_1->ebx ||
            (u32)xfrm & ~sgx_12_1->ecx ||
-           (u32)(xfrm >> 32) & ~sgx_12_1->edx) {
+           (u32)(xfrm >> 32) & ~sgx_12_1->edx ||
+           xfrm & ~(vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE) ||
+           (xfrm & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
                kvm_inject_gp(vcpu, 0);
                return 1;
        }
index ceb7c5e..c0778ca 100644 (file)
@@ -1446,7 +1446,7 @@ static const u32 msrs_to_save_base[] = {
 #endif
        MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
        MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
-       MSR_IA32_SPEC_CTRL,
+       MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL,
        MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
        MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
        MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
@@ -7155,6 +7155,10 @@ static void kvm_probe_msr_to_save(u32 msr_index)
                if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
                        return;
                break;
+       case MSR_IA32_TSX_CTRL:
+               if (!(kvm_get_arch_capabilities() & ARCH_CAP_TSX_CTRL_MSR))
+                       return;
+               break;
        default:
                break;
        }
index 3cdac0f..8192452 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/sched/task.h>
 
 #include <asm/set_memory.h>
+#include <asm/cpu_device_id.h>
 #include <asm/e820/api.h>
 #include <asm/init.h>
 #include <asm/page.h>
@@ -261,6 +262,24 @@ static void __init probe_page_size_mask(void)
        }
 }
 
+#define INTEL_MATCH(_model) { .vendor  = X86_VENDOR_INTEL,     \
+                             .family  = 6,                     \
+                             .model = _model,                  \
+                           }
+/*
+ * INVLPG may not properly flush Global entries
+ * on these CPUs when PCIDs are enabled.
+ */
+static const struct x86_cpu_id invlpg_miss_ids[] = {
+       INTEL_MATCH(INTEL_FAM6_ALDERLAKE   ),
+       INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
+       INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ),
+       INTEL_MATCH(INTEL_FAM6_RAPTORLAKE  ),
+       INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
+       INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
+       {}
+};
+
 static void setup_pcid(void)
 {
        if (!IS_ENABLED(CONFIG_X86_64))
@@ -269,6 +288,12 @@ static void setup_pcid(void)
        if (!boot_cpu_has(X86_FEATURE_PCID))
                return;
 
+       if (x86_match_cpu(invlpg_miss_ids)) {
+               pr_info("Incomplete global flushes, disabling PCID");
+               setup_clear_cpu_cap(X86_FEATURE_PCID);
+               return;
+       }
+
        if (boot_cpu_has(X86_FEATURE_PGE)) {
                /*
                 * This can't be cr4_set_bits_and_update_boot() -- the
index 876d5df..5c01d7e 100644 (file)
@@ -343,7 +343,19 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        struct rt_sigframe *frame;
        int err = 0, sig = ksig->sig;
        unsigned long sp, ra, tp, ps;
+       unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
+       unsigned long handler_fdpic_GOT = 0;
        unsigned int base;
+       bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&
+               (current->personality & FDPIC_FUNCPTRS);
+
+       if (fdpic) {
+               unsigned long __user *fdpic_func_desc =
+                       (unsigned long __user *)handler;
+               if (__get_user(handler, &fdpic_func_desc[0]) ||
+                   __get_user(handler_fdpic_GOT, &fdpic_func_desc[1]))
+                       return -EFAULT;
+       }
 
        sp = regs->areg[1];
 
@@ -373,20 +385,26 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
 
        if (ksig->ka.sa.sa_flags & SA_RESTORER) {
-               ra = (unsigned long)ksig->ka.sa.sa_restorer;
+               if (fdpic) {
+                       unsigned long __user *fdpic_func_desc =
+                               (unsigned long __user *)ksig->ka.sa.sa_restorer;
+
+                       err |= __get_user(ra, fdpic_func_desc);
+               } else {
+                       ra = (unsigned long)ksig->ka.sa.sa_restorer;
+               }
        } else {
 
                /* Create sys_rt_sigreturn syscall in stack frame */
 
                err |= gen_return_code(frame->retcode);
-
-               if (err) {
-                       return -EFAULT;
-               }
                ra = (unsigned long) frame->retcode;
        }
 
-       /* 
+       if (err)
+               return -EFAULT;
+
+       /*
         * Create signal handler execution context.
         * Return context not modified until this point.
         */
@@ -394,8 +412,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        /* Set up registers for signal handler; preserve the threadptr */
        tp = regs->threadptr;
        ps = regs->ps;
-       start_thread(regs, (unsigned long) ksig->ka.sa.sa_handler,
-                    (unsigned long) frame);
+       start_thread(regs, handler, (unsigned long)frame);
 
        /* Set up a stack frame for a call4 if userspace uses windowed ABI */
        if (ps & PS_WOE_MASK) {
@@ -413,6 +430,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        regs->areg[base + 4] = (unsigned long) &frame->uc;
        regs->threadptr = tp;
        regs->ps = ps;
+       if (fdpic)
+               regs->areg[base + 11] = handler_fdpic_GOT;
 
        pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n",
                 current->comm, current->pid, sig, frame, regs->pc);
index 2a31b1a..17a7ef8 100644 (file)
@@ -56,6 +56,8 @@ EXPORT_SYMBOL(empty_zero_page);
  */
 extern long long __ashrdi3(long long, int);
 extern long long __ashldi3(long long, int);
+extern long long __bswapdi2(long long);
+extern int __bswapsi2(int);
 extern long long __lshrdi3(long long, int);
 extern int __divsi3(int, int);
 extern int __modsi3(int, int);
@@ -66,6 +68,8 @@ extern unsigned long long __umulsidi3(unsigned int, unsigned int);
 
 EXPORT_SYMBOL(__ashldi3);
 EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__bswapdi2);
+EXPORT_SYMBOL(__bswapsi2);
 EXPORT_SYMBOL(__lshrdi3);
 EXPORT_SYMBOL(__divsi3);
 EXPORT_SYMBOL(__modsi3);
index 7ecef05..c9c2614 100644 (file)
@@ -4,7 +4,7 @@
 #
 
 lib-y  += memcopy.o memset.o checksum.o \
-          ashldi3.o ashrdi3.o lshrdi3.o \
+          ashldi3.o ashrdi3.o bswapdi2.o bswapsi2.o lshrdi3.o \
           divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o umulsidi3.o \
           usercopy.o strncpy_user.o strnlen_user.o
 lib-$(CONFIG_PCI) += pci-auto.o
diff --git a/arch/xtensa/lib/bswapdi2.S b/arch/xtensa/lib/bswapdi2.S
new file mode 100644 (file)
index 0000000..d8e52e0
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
+#include <linux/linkage.h>
+#include <asm/asmmacro.h>
+#include <asm/core.h>
+
+ENTRY(__bswapdi2)
+
+       abi_entry_default
+       ssai    8
+       srli    a4, a2, 16
+       src     a4, a4, a2
+       src     a4, a4, a4
+       src     a4, a2, a4
+       srli    a2, a3, 16
+       src     a2, a2, a3
+       src     a2, a2, a2
+       src     a2, a3, a2
+       mov     a3, a4
+       abi_ret_default
+
+ENDPROC(__bswapdi2)
diff --git a/arch/xtensa/lib/bswapsi2.S b/arch/xtensa/lib/bswapsi2.S
new file mode 100644 (file)
index 0000000..9c1de13
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
+#include <linux/linkage.h>
+#include <asm/asmmacro.h>
+#include <asm/core.h>
+
+ENTRY(__bswapsi2)
+
+       abi_entry_default
+       ssai    8
+       srli    a3, a2, 16
+       src     a3, a3, a2
+       src     a3, a3, a3
+       src     a2, a2, a3
+       abi_ret_default
+
+ENDPROC(__bswapsi2)
index d2e6be4..58d0aeb 100644 (file)
@@ -678,6 +678,16 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
        return error;
 }
 
+static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct inode *bd_inode = bdev_file_inode(file);
+
+       if (bdev_read_only(I_BDEV(bd_inode)))
+               return generic_file_readonly_mmap(file, vma);
+
+       return generic_file_mmap(file, vma);
+}
+
 const struct file_operations def_blk_fops = {
        .open           = blkdev_open,
        .release        = blkdev_close,
@@ -685,7 +695,7 @@ const struct file_operations def_blk_fops = {
        .read_iter      = blkdev_read_iter,
        .write_iter     = blkdev_write_iter,
        .iopoll         = iocb_bio_iopoll,
-       .mmap           = generic_file_mmap,
+       .mmap           = blkdev_mmap,
        .fsync          = blkdev_fsync,
        .unlocked_ioctl = blkdev_ioctl,
 #ifdef CONFIG_COMPAT
index e8492b3..0800a9d 100644 (file)
@@ -516,6 +516,17 @@ static const struct dmi_system_id maingear_laptop[] = {
        { }
 };
 
+static const struct dmi_system_id lg_laptop[] = {
+       {
+               .ident = "LG Electronics 17U70P",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+                       DMI_MATCH(DMI_BOARD_NAME, "17U70P"),
+               },
+       },
+       { }
+};
+
 struct irq_override_cmp {
        const struct dmi_system_id *system;
        unsigned char irq;
@@ -532,6 +543,7 @@ static const struct irq_override_cmp override_table[] = {
        { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
        { tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
        { maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
+       { lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
 };
 
 static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
index ac1808d..05d9df9 100644 (file)
@@ -320,6 +320,7 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
                start_knode = &start->p->knode_class;
        klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode);
        iter->type = type;
+       iter->sp = sp;
 }
 EXPORT_SYMBOL_GPL(class_dev_iter_init);
 
@@ -361,6 +362,7 @@ EXPORT_SYMBOL_GPL(class_dev_iter_next);
 void class_dev_iter_exit(struct class_dev_iter *iter)
 {
        klist_iter_exit(&iter->ki);
+       subsys_put(iter->sp);
 }
 EXPORT_SYMBOL_GPL(class_dev_iter_exit);
 
index c7ed5d6..33d3298 100644 (file)
@@ -1120,6 +1120,11 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq)
        return ubq->nr_io_ready == ubq->q_depth;
 }
 
+static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+{
+       io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+}
+
 static void ublk_cancel_queue(struct ublk_queue *ubq)
 {
        int i;
@@ -1131,8 +1136,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
                struct ublk_io *io = &ubq->ios[i];
 
                if (io->flags & UBLK_IO_FLAG_ACTIVE)
-                       io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
-                                               IO_URING_F_UNLOCKED);
+                       io_uring_cmd_complete_in_task(io->cmd,
+                                                     ublk_cmd_cancel_cb);
        }
 
        /* all io commands are canceled */
index 3a34d7c..52ef446 100644 (file)
@@ -1319,17 +1319,17 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
        hci_free_dev(hdev);
 }
 
-static struct btnxpuart_data w8987_data = {
+static struct btnxpuart_data w8987_data __maybe_unused = {
        .helper_fw_name = NULL,
        .fw_name = FIRMWARE_W8987,
 };
 
-static struct btnxpuart_data w8997_data = {
+static struct btnxpuart_data w8997_data __maybe_unused = {
        .helper_fw_name = FIRMWARE_HELPER,
        .fw_name = FIRMWARE_W8997,
 };
 
-static const struct of_device_id nxpuart_of_match_table[] = {
+static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = {
        { .compatible = "nxp,88w8987-bt", .data = &w8987_data },
        { .compatible = "nxp,88w8997-bt", .data = &w8997_data },
        { }
index c10a4aa..cd48033 100644 (file)
@@ -571,6 +571,10 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
 {
        struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
 
+       /* Give back zero bytes, as TPM chip has not yet fully resumed: */
+       if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
+               return 0;
+
        return tpm_get_random(chip, data, max);
 }
 
index 4463d00..586ca10 100644 (file)
@@ -412,6 +412,8 @@ int tpm_pm_suspend(struct device *dev)
        }
 
 suspended:
+       chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+
        if (rc)
                dev_err(dev, "Ignoring error %d while suspending\n", rc);
        return 0;
@@ -429,6 +431,14 @@ int tpm_pm_resume(struct device *dev)
        if (chip == NULL)
                return -ENODEV;
 
+       chip->flags &= ~TPM_CHIP_FLAG_SUSPENDED;
+
+       /*
+        * Guarantee that SUSPENDED is written last, so that hwrng does not
+        * activate before the chip has been fully resumed.
+        */
+       wmb();
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(tpm_pm_resume);
index 7af3898..7db3593 100644 (file)
@@ -122,6 +122,29 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
                },
        },
+       {
+               .callback = tpm_tis_disable_irq,
+               .ident = "ThinkStation P360 Tiny",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P360 Tiny"),
+               },
+       },
+       {
+               .callback = tpm_tis_disable_irq,
+               .ident = "ThinkPad L490",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"),
+               },
+       },
+       {
+               .callback = tpm_tis_disable_irq,
+               .ident = "UPX-TGL",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
+               },
+       },
        {}
 };
 
index 02945d5..558144f 100644 (file)
@@ -1209,25 +1209,20 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
        u32 intmask;
        int rc;
 
-       if (chip->ops->clk_enable != NULL)
-               chip->ops->clk_enable(chip, true);
-
-       /* reenable interrupts that device may have lost or
-        * BIOS/firmware may have disabled
+       /*
+        * Re-enable interrupts that device may have lost or BIOS/firmware may
+        * have disabled.
         */
        rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
-       if (rc < 0)
-               goto out;
+       if (rc < 0) {
+               dev_err(&chip->dev, "Setting IRQ failed.\n");
+               return;
+       }
 
        intmask = priv->int_mask | TPM_GLOBAL_INT_ENABLE;
-
-       tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
-
-out:
-       if (chip->ops->clk_enable != NULL)
-               chip->ops->clk_enable(chip, false);
-
-       return;
+       rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+       if (rc < 0)
+               dev_err(&chip->dev, "Enabling interrupts failed.\n");
 }
 
 int tpm_tis_resume(struct device *dev)
@@ -1235,27 +1230,27 @@ int tpm_tis_resume(struct device *dev)
        struct tpm_chip *chip = dev_get_drvdata(dev);
        int ret;
 
-       ret = tpm_tis_request_locality(chip, 0);
-       if (ret < 0)
+       ret = tpm_chip_start(chip);
+       if (ret)
                return ret;
 
        if (chip->flags & TPM_CHIP_FLAG_IRQ)
                tpm_tis_reenable_interrupts(chip);
 
-       ret = tpm_pm_resume(dev);
-       if (ret)
-               goto out;
-
        /*
         * TPM 1.2 requires self-test on resume. This function actually returns
         * an error code but for unknown reason it isn't handled.
         */
        if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
                tpm1_do_selftest(chip);
-out:
-       tpm_tis_relinquish_locality(chip, 0);
 
-       return ret;
+       tpm_chip_stop(chip);
+
+       ret = tpm_pm_resume(dev);
+       if (ret)
+               return ret;
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(tpm_tis_resume);
 #endif
index 2990439..b2f05d2 100644 (file)
@@ -975,7 +975,7 @@ static int __init acpi_cpufreq_probe(struct platform_device *pdev)
 
        /* don't keep reloading if cpufreq_driver exists */
        if (cpufreq_get_current_driver())
-               return -EEXIST;
+               return -ENODEV;
 
        pr_debug("%s\n", __func__);
 
index 1d2cfea..73efbcf 100644 (file)
@@ -583,7 +583,7 @@ static int __init pcc_cpufreq_probe(struct platform_device *pdev)
 
        /* Skip initialization if another cpufreq driver is there. */
        if (cpufreq_get_current_driver())
-               return -EEXIST;
+               return -ENODEV;
 
        if (acpi_disabled)
                return -ENODEV;
index f52d0ba..a7d2508 100644 (file)
@@ -582,7 +582,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
                if (r)
                        amdgpu_fence_driver_force_completion(ring);
 
-               if (ring->fence_drv.irq_src)
+               if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
+                   ring->fence_drv.irq_src)
                        amdgpu_irq_put(adev, ring->fence_drv.irq_src,
                                       ring->fence_drv.irq_type);
 
index f5b5ce1..1ec0765 100644 (file)
@@ -8152,8 +8152,14 @@ static int gfx_v10_0_set_powergating_state(void *handle,
        case IP_VERSION(10, 3, 3):
        case IP_VERSION(10, 3, 6):
        case IP_VERSION(10, 3, 7):
+               if (!enable)
+                       amdgpu_gfx_off_ctrl(adev, false);
+
                gfx_v10_cntl_pg(adev, enable);
-               amdgpu_gfx_off_ctrl(adev, enable);
+
+               if (enable)
+                       amdgpu_gfx_off_ctrl(adev, true);
+
                break;
        default:
                break;
index f5c3762..c4940b6 100644 (file)
@@ -4667,24 +4667,27 @@ static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
        uint64_t clock;
        uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
 
-       amdgpu_gfx_off_ctrl(adev, false);
-       mutex_lock(&adev->gfx.gpu_clock_mutex);
        if (amdgpu_sriov_vf(adev)) {
+               amdgpu_gfx_off_ctrl(adev, false);
+               mutex_lock(&adev->gfx.gpu_clock_mutex);
                clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
                clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
                clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
                if (clock_counter_hi_pre != clock_counter_hi_after)
                        clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
+               mutex_unlock(&adev->gfx.gpu_clock_mutex);
+               amdgpu_gfx_off_ctrl(adev, true);
        } else {
+               preempt_disable();
                clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
                clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
                clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
                if (clock_counter_hi_pre != clock_counter_hi_after)
                        clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
+               preempt_enable();
        }
        clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
-       mutex_unlock(&adev->gfx.gpu_clock_mutex);
-       amdgpu_gfx_off_ctrl(adev, true);
+
        return clock;
 }
 
@@ -5150,8 +5153,14 @@ static int gfx_v11_0_set_powergating_state(void *handle,
                break;
        case IP_VERSION(11, 0, 1):
        case IP_VERSION(11, 0, 4):
+               if (!enable)
+                       amdgpu_gfx_off_ctrl(adev, false);
+
                gfx_v11_cntl_pg(adev, enable);
-               amdgpu_gfx_off_ctrl(adev, enable);
+
+               if (enable)
+                       amdgpu_gfx_off_ctrl(adev, true);
+
                break;
        default:
                break;
index f46d4b1..9818743 100644 (file)
@@ -4003,30 +4003,25 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
                clock = clock_lo | (clock_hi << 32ULL);
                break;
        case IP_VERSION(9, 1, 0):
+       case IP_VERSION(9, 2, 2):
                preempt_disable();
-               clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
-               clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven);
-               hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
-               /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over
-                * roughly every 42 seconds.
-                */
-               if (hi_check != clock_hi) {
+               if (adev->rev_id >= 0x8) {
+                       clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
+                       clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
+                       hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
+               } else {
+                       clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
                        clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven);
-                       clock_hi = hi_check;
+                       hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
                }
-               preempt_enable();
-               clock = clock_lo | (clock_hi << 32ULL);
-               break;
-       case IP_VERSION(9, 2, 2):
-               preempt_disable();
-               clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
-               clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
-               hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
                /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over
-                * roughly every 42 seconds.
-                */
+               * roughly every 42 seconds.
+               */
                if (hi_check != clock_hi) {
-                       clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
+                       if (adev->rev_id >= 0x8)
+                               clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
+                       else
+                               clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven);
                        clock_hi = hi_check;
                }
                preempt_enable();
index d95f9fe..4116c11 100644 (file)
@@ -31,6 +31,8 @@
 #include "umc_v8_10.h"
 #include "athub/athub_3_0_0_sh_mask.h"
 #include "athub/athub_3_0_0_offset.h"
+#include "dcn/dcn_3_2_0_offset.h"
+#include "dcn/dcn_3_2_0_sh_mask.h"
 #include "oss/osssys_6_0_0_offset.h"
 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
 #include "navi10_enum.h"
@@ -546,7 +548,24 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
 
 static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
 {
-       return 0;
+       u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL);
+       unsigned size;
+
+       if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+               size = AMDGPU_VBIOS_VGA_ALLOCATION;
+       } else {
+               u32 viewport;
+               u32 pitch;
+
+               viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
+               pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH);
+               size = (REG_GET_FIELD(viewport,
+                                       HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
+                               REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
+                               4);
+       }
+
+       return size;
 }
 
 static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
index d4b7da5..e8b2fc4 100644 (file)
@@ -359,5 +359,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
                link[i] = stream[i].link;
                bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing);
        }
+
+       ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
+
        return ret;
 }
index 5633c57..2ddf519 100644 (file)
@@ -733,6 +733,24 @@ static int smu_late_init(void *handle)
                return ret;
        }
 
+       /*
+        * Explicitly notify PMFW the power mode the system in. Since
+        * the PMFW may boot the ASIC with a different mode.
+        * For those supporting ACDC switch via gpio, PMFW will
+        * handle the switch automatically. Driver involvement
+        * is unnecessary.
+        */
+       if (!smu->dc_controlled_by_gpio) {
+               ret = smu_set_power_source(smu,
+                                          adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
+                                          SMU_POWER_SOURCE_DC);
+               if (ret) {
+                       dev_err(adev->dev, "Failed to switch to %s mode!\n",
+                               adev->pm.ac_power ? "AC" : "DC");
+                       return ret;
+               }
+       }
+
        if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
            (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
                return 0;
index c400051..275f708 100644 (file)
@@ -3413,26 +3413,8 @@ static int navi10_post_smu_init(struct smu_context *smu)
                return 0;
 
        ret = navi10_run_umc_cdr_workaround(smu);
-       if (ret) {
+       if (ret)
                dev_err(adev->dev, "Failed to apply umc cdr workaround!\n");
-               return ret;
-       }
-
-       if (!smu->dc_controlled_by_gpio) {
-               /*
-                * For Navi1X, manually switch it to AC mode as PMFW
-                * may boot it with DC mode.
-                */
-               ret = smu_v11_0_set_power_source(smu,
-                                                adev->pm.ac_power ?
-                                                SMU_POWER_SOURCE_AC :
-                                                SMU_POWER_SOURCE_DC);
-               if (ret) {
-                       dev_err(adev->dev, "Failed to switch to %s mode!\n",
-                                       adev->pm.ac_power ? "AC" : "DC");
-                       return ret;
-               }
-       }
 
        return ret;
 }
index 3d9ff46..98a33f8 100644 (file)
@@ -1770,6 +1770,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
        .get_power_limit = smu_v13_0_7_get_power_limit,
        .set_power_limit = smu_v13_0_set_power_limit,
+       .set_power_source = smu_v13_0_set_power_source,
        .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
        .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
        .set_tool_table_location = smu_v13_0_set_tool_table_location,
index 74ea3c2..1a5ae78 100644 (file)
@@ -34,11 +34,11 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
        return -ENODEV;
 }
 
-int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
+static inline int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
 {
        return 0;
 }
 
-void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
+static inline void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
 { }
 #endif
index 650232c..b183efa 100644 (file)
@@ -204,8 +204,6 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
        struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
-       struct intel_gt *gt = dev_priv->media_gt;
-       struct intel_gsc_uc *gsc = &gt->uc.gsc;
        bool capable = false;
 
        /* I915 support for HDCP2.2 */
@@ -213,9 +211,13 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
                return false;
 
        /* If MTL+ make sure gsc is loaded and proxy is setup */
-       if (intel_hdcp_gsc_cs_required(dev_priv))
-               if (!intel_uc_fw_is_running(&gsc->fw))
+       if (intel_hdcp_gsc_cs_required(dev_priv)) {
+               struct intel_gt *gt = dev_priv->media_gt;
+               struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
+
+               if (!gsc || !intel_uc_fw_is_running(&gsc->fw))
                        return false;
+       }
 
        /* MEI/GSC interface is solid depending on which is used */
        mutex_lock(&dev_priv->display.hdcp.comp_mutex);
index 2b3ae84..bdcd554 100644 (file)
@@ -98,17 +98,17 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
 
 static const struct dpu_lm_cfg msm8998_lm[] = {
        LM_BLK("lm_0", LM_0, 0x44000, MIXER_MSM8998_MASK,
-               &msm8998_lm_sblk, PINGPONG_0, LM_2, DSPP_0),
+               &msm8998_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
        LM_BLK("lm_1", LM_1, 0x45000, MIXER_MSM8998_MASK,
-               &msm8998_lm_sblk, PINGPONG_1, LM_5, DSPP_1),
+               &msm8998_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
        LM_BLK("lm_2", LM_2, 0x46000, MIXER_MSM8998_MASK,
-               &msm8998_lm_sblk, PINGPONG_2, LM_0, 0),
+               &msm8998_lm_sblk, PINGPONG_2, LM_5, 0),
        LM_BLK("lm_3", LM_3, 0x47000, MIXER_MSM8998_MASK,
                &msm8998_lm_sblk, PINGPONG_MAX, 0, 0),
        LM_BLK("lm_4", LM_4, 0x48000, MIXER_MSM8998_MASK,
                &msm8998_lm_sblk, PINGPONG_MAX, 0, 0),
        LM_BLK("lm_5", LM_5, 0x49000, MIXER_MSM8998_MASK,
-               &msm8998_lm_sblk, PINGPONG_3, LM_1, 0),
+               &msm8998_lm_sblk, PINGPONG_3, LM_2, 0),
 };
 
 static const struct dpu_pingpong_cfg msm8998_pp[] = {
@@ -134,10 +134,10 @@ static const struct dpu_dspp_cfg msm8998_dspp[] = {
 };
 
 static const struct dpu_intf_cfg msm8998_intf[] = {
-       INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
-       INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
-       INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
-       INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_HDMI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
+       INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+       INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+       INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+       INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_HDMI, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
 };
 
 static const struct dpu_perf_cfg msm8998_perf_data = {
index 282d410..42b0e58 100644 (file)
@@ -128,10 +128,10 @@ static const struct dpu_dspp_cfg sm8150_dspp[] = {
 };
 
 static const struct dpu_pingpong_cfg sm8150_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
        PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
index c574002..e3bdfe7 100644 (file)
@@ -116,10 +116,10 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
 };
 
 static const struct dpu_pingpong_cfg sc8180x_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
        PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
index 2c40229..ed13058 100644 (file)
@@ -129,10 +129,10 @@ static const struct dpu_dspp_cfg sm8250_dspp[] = {
 };
 
 static const struct dpu_pingpong_cfg sm8250_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
        PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
index 8799ed7..a46b117 100644 (file)
@@ -80,8 +80,8 @@ static const struct dpu_dspp_cfg sc7180_dspp[] = {
 };
 
 static const struct dpu_pingpong_cfg sc7180_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te, -1, -1),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te, -1, -1),
+       PP_BLK("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk, -1, -1),
+       PP_BLK("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk, -1, -1),
 };
 
 static const struct dpu_intf_cfg sc7180_intf[] = {
index 6f04d8f..988d820 100644 (file)
@@ -122,7 +122,6 @@ const struct dpu_mdss_cfg dpu_sm6115_cfg = {
        .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
                     BIT(MDP_SSPP_TOP0_INTR2) | \
                     BIT(MDP_SSPP_TOP0_HIST_INTR) | \
-                    BIT(MDP_INTF0_INTR) | \
                     BIT(MDP_INTF1_INTR),
 };
 
index 303492d..c9003dc 100644 (file)
@@ -112,7 +112,6 @@ const struct dpu_mdss_cfg dpu_qcm2290_cfg = {
        .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
                     BIT(MDP_SSPP_TOP0_INTR2) | \
                     BIT(MDP_SSPP_TOP0_HIST_INTR) | \
-                    BIT(MDP_INTF0_INTR) | \
                     BIT(MDP_INTF1_INTR),
 };
 
index ca107ca..4f6a965 100644 (file)
@@ -127,22 +127,22 @@ static const struct dpu_dspp_cfg sm8350_dspp[] = {
 };
 
 static const struct dpu_pingpong_cfg sm8350_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
-       PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
-       PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
-       PP_BLK("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
                        -1),
-       PP_BLK("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
                        -1),
 };
index 5957de1..6b2c7ea 100644 (file)
@@ -87,10 +87,10 @@ static const struct dpu_dspp_cfg sc7280_dspp[] = {
 };
 
 static const struct dpu_pingpong_cfg sc7280_pp[] = {
-       PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
-       PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
-       PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
-       PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
+       PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
+       PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
+       PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
+       PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
 };
 
 static const struct dpu_intf_cfg sc7280_intf[] = {
index 9aab110..706d0f1 100644 (file)
@@ -121,18 +121,18 @@ static const struct dpu_dspp_cfg sc8280xp_dspp[] = {
 };
 
 static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te,
-                 DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te,
-                 DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1),
-       PP_BLK_TE("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk_te,
-                 DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1),
-       PP_BLK_TE("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk_te,
-                 DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1),
-       PP_BLK_TE("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk_te,
-                 DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1),
-       PP_BLK_TE("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk_te,
-                 DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1),
+       PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1),
+       PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1),
+       PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1),
+       PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1),
+       PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1),
+       PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1),
 };
 
 static const struct dpu_merge_3d_cfg sc8280xp_merge_3d[] = {
index 02a259b..4ecb3df 100644 (file)
@@ -128,28 +128,28 @@ static const struct dpu_dspp_cfg sm8450_dspp[] = {
 };
 /* FIXME: interrupts */
 static const struct dpu_pingpong_cfg sm8450_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
-       PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
-       PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
-       PP_BLK("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
                        -1),
-       PP_BLK("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
                        -1),
-       PP_BLK("pingpong_6", PINGPONG_6, 0x65800, MERGE_3D_3, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_6", PINGPONG_6, 0x65800, MERGE_3D_3, sc7280_pp_sblk,
                        -1,
                        -1),
-       PP_BLK("pingpong_7", PINGPONG_7, 0x65c00, MERGE_3D_3, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_7", PINGPONG_7, 0x65c00, MERGE_3D_3, sc7280_pp_sblk,
                        -1,
                        -1),
 };
index 9e40303..d0ab351 100644 (file)
@@ -132,28 +132,28 @@ static const struct dpu_dspp_cfg sm8550_dspp[] = {
                 &sm8150_dspp_sblk),
 };
 static const struct dpu_pingpong_cfg sm8550_pp[] = {
-       PP_BLK_DIPHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
                        -1),
-       PP_BLK_DIPHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
                        -1),
-       PP_BLK_DIPHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
                        -1),
-       PP_BLK_DIPHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
                        -1),
-       PP_BLK_DIPHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
                        -1),
-       PP_BLK_DIPHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
                        -1),
-       PP_BLK_DIPHER("pingpong_6", PINGPONG_6, 0x66000, MERGE_3D_3, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_6", PINGPONG_6, 0x66000, MERGE_3D_3, sc7280_pp_sblk,
                        -1,
                        -1),
-       PP_BLK_DIPHER("pingpong_7", PINGPONG_7, 0x66400, MERGE_3D_3, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_7", PINGPONG_7, 0x66400, MERGE_3D_3, sc7280_pp_sblk,
                        -1,
                        -1),
 };
index 03f162a..5d994bc 100644 (file)
@@ -491,7 +491,7 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
        .len = 0x20, .version = 0x20000},
 };
 
-#define PP_BLK_DIPHER(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
+#define PP_BLK_DITHER(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
        {\
        .name = _name, .id = _id, \
        .base = _base, .len = 0, \
@@ -587,12 +587,12 @@ static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
 
 static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
        {
-               .pps = 1088 * 1920 * 30,
+               .pps = 1920 * 1080 * 30,
                .ot_limit = 2,
        },
        {
-               .pps = 1088 * 1920 * 60,
-               .ot_limit = 6,
+               .pps = 1920 * 1080 * 60,
+               .ot_limit = 4,
        },
        {
                .pps = 3840 * 2160 * 30,
@@ -705,10 +705,7 @@ static const struct dpu_qos_lut_entry msm8998_qos_linear[] = {
        {.fl = 10, .lut = 0x1555b},
        {.fl = 11, .lut = 0x5555b},
        {.fl = 12, .lut = 0x15555b},
-       {.fl = 13, .lut = 0x55555b},
-       {.fl = 14, .lut = 0},
-       {.fl = 1,  .lut = 0x1b},
-       {.fl = 0,  .lut = 0}
+       {.fl = 0,  .lut = 0x55555b}
 };
 
 static const struct dpu_qos_lut_entry sdm845_qos_linear[] = {
@@ -730,9 +727,7 @@ static const struct dpu_qos_lut_entry msm8998_qos_macrotile[] = {
        {.fl = 10, .lut = 0x1aaff},
        {.fl = 11, .lut = 0x5aaff},
        {.fl = 12, .lut = 0x15aaff},
-       {.fl = 13, .lut = 0x55aaff},
-       {.fl = 1,  .lut = 0x1aaff},
-       {.fl = 0,  .lut = 0},
+       {.fl = 0,  .lut = 0x55aaff},
 };
 
 static const struct dpu_qos_lut_entry sc7180_qos_linear[] = {
index 53326f2..17f3e7e 100644 (file)
@@ -15,7 +15,7 @@
 
 /*
  * Register offsets in MDSS register file for the interrupt registers
- * w.r.t. to the MDP base
+ * w.r.t. the MDP base
  */
 #define MDP_SSPP_TOP0_OFF              0x0
 #define MDP_INTF_0_OFF                 0x6A000
 #define MDP_INTF_3_OFF                 0x6B800
 #define MDP_INTF_4_OFF                 0x6C000
 #define MDP_INTF_5_OFF                 0x6C800
+#define INTF_INTR_EN                   0x1c0
+#define INTF_INTR_STATUS               0x1c4
+#define INTF_INTR_CLEAR                        0x1c8
 #define MDP_AD4_0_OFF                  0x7C000
 #define MDP_AD4_1_OFF                  0x7D000
 #define MDP_AD4_INTR_EN_OFF            0x41c
 #define MDP_AD4_INTR_CLEAR_OFF         0x424
 #define MDP_AD4_INTR_STATUS_OFF                0x420
-#define MDP_INTF_0_OFF_REV_7xxx             0x34000
-#define MDP_INTF_1_OFF_REV_7xxx             0x35000
-#define MDP_INTF_2_OFF_REV_7xxx             0x36000
-#define MDP_INTF_3_OFF_REV_7xxx             0x37000
-#define MDP_INTF_4_OFF_REV_7xxx             0x38000
-#define MDP_INTF_5_OFF_REV_7xxx             0x39000
-#define MDP_INTF_6_OFF_REV_7xxx             0x3a000
-#define MDP_INTF_7_OFF_REV_7xxx             0x3b000
-#define MDP_INTF_8_OFF_REV_7xxx             0x3c000
+#define MDP_INTF_0_OFF_REV_7xxx                0x34000
+#define MDP_INTF_1_OFF_REV_7xxx                0x35000
+#define MDP_INTF_2_OFF_REV_7xxx                0x36000
+#define MDP_INTF_3_OFF_REV_7xxx                0x37000
+#define MDP_INTF_4_OFF_REV_7xxx                0x38000
+#define MDP_INTF_5_OFF_REV_7xxx                0x39000
+#define MDP_INTF_6_OFF_REV_7xxx                0x3a000
+#define MDP_INTF_7_OFF_REV_7xxx                0x3b000
+#define MDP_INTF_8_OFF_REV_7xxx                0x3c000
 
 /**
  * struct dpu_intr_reg - array of DPU register sets
index 84ee2ef..b9dddf5 100644 (file)
 #define   INTF_TPG_RGB_MAPPING          0x11C
 #define   INTF_PROG_FETCH_START         0x170
 #define   INTF_PROG_ROT_START           0x174
-
-#define   INTF_FRAME_LINE_COUNT_EN      0x0A8
-#define   INTF_FRAME_COUNT              0x0AC
-#define   INTF_LINE_COUNT               0x0B0
-
 #define   INTF_MUX                      0x25C
 #define   INTF_STATUS                   0x26C
 
index 2d28afd..a3e413d 100644 (file)
@@ -61,6 +61,7 @@ static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb,
        for (i = 0; i < m->wb_count; i++) {
                if (wb == m->wb[i].id) {
                        b->blk_addr = addr + m->wb[i].base;
+                       b->log_mask = DPU_DBG_MASK_WB;
                        return &m->wb[i];
                }
        }
index feb9a72..5acd568 100644 (file)
@@ -21,9 +21,6 @@
 #define HIST_INTR_EN                    0x01c
 #define HIST_INTR_STATUS                0x020
 #define HIST_INTR_CLEAR                 0x024
-#define INTF_INTR_EN                    0x1C0
-#define INTF_INTR_STATUS                0x1C4
-#define INTF_INTR_CLEAR                 0x1C8
 #define SPLIT_DISPLAY_EN                0x2F4
 #define SPLIT_DISPLAY_UPPER_PIPE_CTRL   0x2F8
 #define DSPP_IGC_COLOR0_RAM_LUTN        0x300
index 6666783..1245c7a 100644 (file)
@@ -593,6 +593,18 @@ static struct hdmi_codec_pdata codec_data = {
        .i2s = 1,
 };
 
+void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio)
+{
+       struct dp_audio_private *audio_priv;
+
+       audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio);
+
+       if (audio_priv->audio_pdev) {
+               platform_device_unregister(audio_priv->audio_pdev);
+               audio_priv->audio_pdev = NULL;
+       }
+}
+
 int dp_register_audio_driver(struct device *dev,
                struct dp_audio *dp_audio)
 {
index 84e5f4a..4ab7888 100644 (file)
@@ -53,6 +53,8 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev,
 int dp_register_audio_driver(struct device *dev,
                struct dp_audio *dp_audio);
 
+void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio);
+
 /**
  * dp_audio_put()
  *
index 3e13acd..99a38db 100644 (file)
@@ -326,6 +326,7 @@ static void dp_display_unbind(struct device *dev, struct device *master,
        kthread_stop(dp->ev_tsk);
 
        dp_power_client_deinit(dp->power);
+       dp_unregister_audio_driver(dev, dp->audio);
        dp_aux_unregister(dp->aux);
        dp->drm_dev = NULL;
        dp->aux->drm_dev = NULL;
index d77fa97..9c45d64 100644 (file)
@@ -155,6 +155,8 @@ static bool can_do_async(struct drm_atomic_state *state,
        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
                if (drm_atomic_crtc_needs_modeset(crtc_state))
                        return false;
+               if (!crtc_state->active)
+                       return false;
                if (++num_crtcs > 1)
                        return false;
                *async_crtc = crtc;
index db6c4e2..cd39b9d 100644 (file)
@@ -219,7 +219,8 @@ static void put_pages(struct drm_gem_object *obj)
        }
 }
 
-static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
+static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
+                                             unsigned madv)
 {
        struct msm_drm_private *priv = obj->dev->dev_private;
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -227,7 +228,9 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
 
        msm_gem_assert_locked(obj);
 
-       if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+       if (GEM_WARN_ON(msm_obj->madv > madv)) {
+               DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+                       msm_obj->madv, madv);
                return ERR_PTR(-EBUSY);
        }
 
@@ -248,7 +251,7 @@ struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
        struct page **p;
 
        msm_gem_lock(obj);
-       p = msm_gem_pin_pages_locked(obj);
+       p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
        msm_gem_unlock(obj);
 
        return p;
@@ -473,10 +476,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
 
        msm_gem_assert_locked(obj);
 
-       if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
-               return -EBUSY;
-
-       pages = msm_gem_pin_pages_locked(obj);
+       pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
        if (IS_ERR(pages))
                return PTR_ERR(pages);
 
@@ -699,13 +699,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
        if (obj->import_attach)
                return ERR_PTR(-ENODEV);
 
-       if (GEM_WARN_ON(msm_obj->madv > madv)) {
-               DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
-                       msm_obj->madv, madv);
-               return ERR_PTR(-EBUSY);
-       }
-
-       pages = msm_gem_pin_pages_locked(obj);
+       pages = msm_gem_pin_pages_locked(obj, madv);
        if (IS_ERR(pages))
                return ERR_CAST(pages);
 
index aff18c2..9f5933c 100644 (file)
@@ -722,7 +722,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        struct msm_drm_private *priv = dev->dev_private;
        struct drm_msm_gem_submit *args = data;
        struct msm_file_private *ctx = file->driver_priv;
-       struct msm_gem_submit *submit;
+       struct msm_gem_submit *submit = NULL;
        struct msm_gpu *gpu = priv->gpu;
        struct msm_gpu_submitqueue *queue;
        struct msm_ringbuffer *ring;
@@ -769,13 +769,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
                if (out_fence_fd < 0) {
                        ret = out_fence_fd;
-                       return ret;
+                       goto out_post_unlock;
                }
        }
 
        submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
-       if (IS_ERR(submit))
-               return PTR_ERR(submit);
+       if (IS_ERR(submit)) {
+               ret = PTR_ERR(submit);
+               goto out_post_unlock;
+       }
 
        trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
                args->nr_bos, args->nr_cmds);
@@ -962,11 +964,20 @@ out:
        if (has_ww_ticket)
                ww_acquire_fini(&submit->ticket);
 out_unlock:
-       if (ret && (out_fence_fd >= 0))
-               put_unused_fd(out_fence_fd);
        mutex_unlock(&queue->lock);
 out_post_unlock:
-       msm_gem_submit_put(submit);
+       if (ret && (out_fence_fd >= 0))
+               put_unused_fd(out_fence_fd);
+
+       if (!IS_ERR_OR_NULL(submit)) {
+               msm_gem_submit_put(submit);
+       } else {
+               /*
+                * If the submit hasn't yet taken ownership of the queue
+                * then we need to drop the reference ourself:
+                */
+               msm_submitqueue_put(queue);
+       }
        if (!IS_ERR_OR_NULL(post_deps)) {
                for (i = 0; i < args->nr_out_syncobjs; ++i) {
                        kfree(post_deps[i].chain);
index 418e1e0..5cc8d35 100644 (file)
@@ -234,7 +234,12 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
        /* Get the pagetable configuration from the domain */
        if (adreno_smmu->cookie)
                ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
-       if (!ttbr1_cfg)
+
+       /*
+        * If you hit this WARN_ONCE() you are probably missing an entry in
+        * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c
+        */
+       if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables"))
                return ERR_PTR(-ENODEV);
 
        pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
@@ -410,7 +415,7 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
        struct msm_mmu *mmu;
 
        mmu = msm_iommu_new(dev, quirks);
-       if (IS_ERR(mmu))
+       if (IS_ERR_OR_NULL(mmu))
                return mmu;
 
        iommu = to_msm_iommu(mmu);
index db98c3f..6de9007 100644 (file)
@@ -417,22 +417,6 @@ config S390_IOMMU
        help
          Support for the IOMMU API for s390 PCI devices.
 
-config S390_CCW_IOMMU
-       bool "S390 CCW IOMMU Support"
-       depends on S390 && CCW || COMPILE_TEST
-       select IOMMU_API
-       help
-         Enables bits of IOMMU API required by VFIO. The iommu_ops
-         is not implemented as it is not necessary for VFIO.
-
-config S390_AP_IOMMU
-       bool "S390 AP IOMMU Support"
-       depends on S390 && ZCRYPT || COMPILE_TEST
-       select IOMMU_API
-       help
-         Enables bits of IOMMU API required by VFIO. The iommu_ops
-         is not implemented as it is not necessary for VFIO.
-
 config MTK_IOMMU
        tristate "MediaTek IOMMU Support"
        depends on ARCH_MEDIATEK || COMPILE_TEST
index ae09c62..c71afda 100644 (file)
@@ -517,6 +517,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
        { .compatible = "qcom,qcm2290-smmu-500", .data = &qcom_smmu_500_impl0_data },
        { .compatible = "qcom,qdu1000-smmu-500", .data = &qcom_smmu_500_impl0_data  },
        { .compatible = "qcom,sc7180-smmu-500", .data = &qcom_smmu_500_impl0_data },
+       { .compatible = "qcom,sc7180-smmu-v2", .data = &qcom_smmu_v2_data },
        { .compatible = "qcom,sc7280-smmu-500", .data = &qcom_smmu_500_impl0_data },
        { .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
        { .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
@@ -561,5 +562,14 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
        if (match)
                return qcom_smmu_create(smmu, match->data);
 
+       /*
+        * If you hit this WARN_ON() you are missing an entry in the
+        * qcom_smmu_impl_of_match[] table, and GPU per-process page-
+        * tables will be broken.
+        */
+       WARN(of_device_is_compatible(np, "qcom,adreno-smmu"),
+            "Missing qcom_smmu_impl_of_match entry for: %s",
+            dev_name(smmu->dev));
+
        return smmu;
 }
index c2d2792..baf6454 100644 (file)
@@ -151,6 +151,12 @@ struct dvb_ca_private {
 
        /* mutex serializing ioctls */
        struct mutex ioctl_mutex;
+
+       /* A mutex used when a device is disconnected */
+       struct mutex remove_mutex;
+
+       /* Whether the device is disconnected */
+       int exit;
 };
 
 static void dvb_ca_private_free(struct dvb_ca_private *ca)
@@ -187,7 +193,7 @@ static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca);
 static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
                                    u8 *ebuf, int ecount);
 static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
-                                    u8 *ebuf, int ecount);
+                                    u8 *ebuf, int ecount, int size_write_flag);
 
 /**
  * findstr - Safely find needle in haystack.
@@ -370,7 +376,7 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
        ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10);
        if (ret)
                return ret;
-       ret = dvb_ca_en50221_write_data(ca, slot, buf, 2);
+       ret = dvb_ca_en50221_write_data(ca, slot, buf, 2, CMDREG_SW);
        if (ret != 2)
                return -EIO;
        ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN);
@@ -778,11 +784,13 @@ exit:
  * @buf: The data in this buffer is treated as a complete link-level packet to
  *      be written.
  * @bytes_write: Size of ebuf.
+ * @size_write_flag: A flag on Command Register which says whether the link size
+ * information will be writen or not.
  *
  * return: Number of bytes written, or < 0 on error.
  */
 static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
-                                    u8 *buf, int bytes_write)
+                                    u8 *buf, int bytes_write, int size_write_flag)
 {
        struct dvb_ca_slot *sl = &ca->slot_info[slot];
        int status;
@@ -817,7 +825,7 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
 
        /* OK, set HC bit */
        status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND,
-                                           IRQEN | CMDREG_HC);
+                                           IRQEN | CMDREG_HC | size_write_flag);
        if (status)
                goto exit;
 
@@ -1508,7 +1516,7 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
 
                        mutex_lock(&sl->slot_lock);
                        status = dvb_ca_en50221_write_data(ca, slot, fragbuf,
-                                                          fraglen + 2);
+                                                          fraglen + 2, 0);
                        mutex_unlock(&sl->slot_lock);
                        if (status == (fraglen + 2)) {
                                written = 1;
@@ -1709,12 +1717,22 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
 
        dprintk("%s\n", __func__);
 
-       if (!try_module_get(ca->pub->owner))
+       mutex_lock(&ca->remove_mutex);
+
+       if (ca->exit) {
+               mutex_unlock(&ca->remove_mutex);
+               return -ENODEV;
+       }
+
+       if (!try_module_get(ca->pub->owner)) {
+               mutex_unlock(&ca->remove_mutex);
                return -EIO;
+       }
 
        err = dvb_generic_open(inode, file);
        if (err < 0) {
                module_put(ca->pub->owner);
+               mutex_unlock(&ca->remove_mutex);
                return err;
        }
 
@@ -1739,6 +1757,7 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
 
        dvb_ca_private_get(ca);
 
+       mutex_unlock(&ca->remove_mutex);
        return 0;
 }
 
@@ -1758,6 +1777,8 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
 
        dprintk("%s\n", __func__);
 
+       mutex_lock(&ca->remove_mutex);
+
        /* mark the CA device as closed */
        ca->open = 0;
        dvb_ca_en50221_thread_update_delay(ca);
@@ -1768,6 +1789,13 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
 
        dvb_ca_private_put(ca);
 
+       if (dvbdev->users == 1 && ca->exit == 1) {
+               mutex_unlock(&ca->remove_mutex);
+               wake_up(&dvbdev->wait_queue);
+       } else {
+               mutex_unlock(&ca->remove_mutex);
+       }
+
        return err;
 }
 
@@ -1891,6 +1919,7 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
        }
 
        mutex_init(&ca->ioctl_mutex);
+       mutex_init(&ca->remove_mutex);
 
        if (signal_pending(current)) {
                ret = -EINTR;
@@ -1933,6 +1962,14 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
 
        dprintk("%s\n", __func__);
 
+       mutex_lock(&ca->remove_mutex);
+       ca->exit = 1;
+       mutex_unlock(&ca->remove_mutex);
+
+       if (ca->dvbdev->users < 1)
+               wait_event(ca->dvbdev->wait_queue,
+                               ca->dvbdev->users == 1);
+
        /* shutdown the thread if there was one */
        kthread_stop(ca->thread);
 
index 398c862..7c4d86b 100644 (file)
@@ -115,12 +115,12 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
 
        cc = buf[3] & 0x0f;
        ccok = ((feed->cc + 1) & 0x0f) == cc;
-       feed->cc = cc;
        if (!ccok) {
                set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
                dprintk_sect_loss("missed packet: %d instead of %d!\n",
                                  cc, (feed->cc + 1) & 0x0f);
        }
+       feed->cc = cc;
 
        if (buf[1] & 0x40)      // PUSI ?
                feed->peslen = 0xfffa;
@@ -300,7 +300,6 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
 
        cc = buf[3] & 0x0f;
        ccok = ((feed->cc + 1) & 0x0f) == cc;
-       feed->cc = cc;
 
        if (buf[3] & 0x20) {
                /* adaption field present, check for discontinuity_indicator */
@@ -336,6 +335,7 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
                feed->pusi_seen = false;
                dvb_dmx_swfilter_section_new(feed);
        }
+       feed->cc = cc;
 
        if (buf[1] & 0x40) {
                /* PUSI=1 (is set), section boundary is here */
index cc0a789..bc6950a 100644 (file)
@@ -293,14 +293,22 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
        }
 
        if (events->eventw == events->eventr) {
-               int ret;
+               struct wait_queue_entry wait;
+               int ret = 0;
 
                if (flags & O_NONBLOCK)
                        return -EWOULDBLOCK;
 
-               ret = wait_event_interruptible(events->wait_queue,
-                                              dvb_frontend_test_event(fepriv, events));
-
+               init_waitqueue_entry(&wait, current);
+               add_wait_queue(&events->wait_queue, &wait);
+               while (!dvb_frontend_test_event(fepriv, events)) {
+                       wait_woken(&wait, TASK_INTERRUPTIBLE, 0);
+                       if (signal_pending(current)) {
+                               ret = -ERESTARTSYS;
+                               break;
+                       }
+               }
+               remove_wait_queue(&events->wait_queue, &wait);
                if (ret < 0)
                        return ret;
        }
@@ -809,15 +817,26 @@ static void dvb_frontend_stop(struct dvb_frontend *fe)
 
        dev_dbg(fe->dvb->device, "%s:\n", __func__);
 
+       mutex_lock(&fe->remove_mutex);
+
        if (fe->exit != DVB_FE_DEVICE_REMOVED)
                fe->exit = DVB_FE_NORMAL_EXIT;
        mb();
 
-       if (!fepriv->thread)
+       if (!fepriv->thread) {
+               mutex_unlock(&fe->remove_mutex);
                return;
+       }
 
        kthread_stop(fepriv->thread);
 
+       mutex_unlock(&fe->remove_mutex);
+
+       if (fepriv->dvbdev->users < -1) {
+               wait_event(fepriv->dvbdev->wait_queue,
+                          fepriv->dvbdev->users == -1);
+       }
+
        sema_init(&fepriv->sem, 1);
        fepriv->state = FESTATE_IDLE;
 
@@ -2761,9 +2780,13 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
        struct dvb_adapter *adapter = fe->dvb;
        int ret;
 
+       mutex_lock(&fe->remove_mutex);
+
        dev_dbg(fe->dvb->device, "%s:\n", __func__);
-       if (fe->exit == DVB_FE_DEVICE_REMOVED)
-               return -ENODEV;
+       if (fe->exit == DVB_FE_DEVICE_REMOVED) {
+               ret = -ENODEV;
+               goto err_remove_mutex;
+       }
 
        if (adapter->mfe_shared == 2) {
                mutex_lock(&adapter->mfe_lock);
@@ -2771,7 +2794,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
                        if (adapter->mfe_dvbdev &&
                            !adapter->mfe_dvbdev->writers) {
                                mutex_unlock(&adapter->mfe_lock);
-                               return -EBUSY;
+                               ret = -EBUSY;
+                               goto err_remove_mutex;
                        }
                        adapter->mfe_dvbdev = dvbdev;
                }
@@ -2794,8 +2818,10 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
                        while (mferetry-- && (mfedev->users != -1 ||
                                              mfepriv->thread)) {
                                if (msleep_interruptible(500)) {
-                                       if (signal_pending(current))
-                                               return -EINTR;
+                                       if (signal_pending(current)) {
+                                               ret = -EINTR;
+                                               goto err_remove_mutex;
+                                       }
                                }
                        }
 
@@ -2807,7 +2833,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
                                if (mfedev->users != -1 ||
                                    mfepriv->thread) {
                                        mutex_unlock(&adapter->mfe_lock);
-                                       return -EBUSY;
+                                       ret = -EBUSY;
+                                       goto err_remove_mutex;
                                }
                                adapter->mfe_dvbdev = dvbdev;
                        }
@@ -2866,6 +2893,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
 
        if (adapter->mfe_shared)
                mutex_unlock(&adapter->mfe_lock);
+
+       mutex_unlock(&fe->remove_mutex);
        return ret;
 
 err3:
@@ -2887,6 +2916,9 @@ err1:
 err0:
        if (adapter->mfe_shared)
                mutex_unlock(&adapter->mfe_lock);
+
+err_remove_mutex:
+       mutex_unlock(&fe->remove_mutex);
        return ret;
 }
 
@@ -2897,6 +2929,8 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
        struct dvb_frontend_private *fepriv = fe->frontend_priv;
        int ret;
 
+       mutex_lock(&fe->remove_mutex);
+
        dev_dbg(fe->dvb->device, "%s:\n", __func__);
 
        if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
@@ -2918,10 +2952,18 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
                }
                mutex_unlock(&fe->dvb->mdev_lock);
 #endif
-               if (fe->exit != DVB_FE_NO_EXIT)
-                       wake_up(&dvbdev->wait_queue);
                if (fe->ops.ts_bus_ctrl)
                        fe->ops.ts_bus_ctrl(fe, 0);
+
+               if (fe->exit != DVB_FE_NO_EXIT) {
+                       mutex_unlock(&fe->remove_mutex);
+                       wake_up(&dvbdev->wait_queue);
+               } else {
+                       mutex_unlock(&fe->remove_mutex);
+               }
+
+       } else {
+               mutex_unlock(&fe->remove_mutex);
        }
 
        dvb_frontend_put(fe);
@@ -3022,6 +3064,7 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
        fepriv = fe->frontend_priv;
 
        kref_init(&fe->refcount);
+       mutex_init(&fe->remove_mutex);
 
        /*
         * After initialization, there need to be two references: one
index 8a2febf..8bb8dd3 100644 (file)
@@ -1564,15 +1564,43 @@ static long dvb_net_ioctl(struct file *file,
        return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl);
 }
 
+static int locked_dvb_net_open(struct inode *inode, struct file *file)
+{
+       struct dvb_device *dvbdev = file->private_data;
+       struct dvb_net *dvbnet = dvbdev->priv;
+       int ret;
+
+       if (mutex_lock_interruptible(&dvbnet->remove_mutex))
+               return -ERESTARTSYS;
+
+       if (dvbnet->exit) {
+               mutex_unlock(&dvbnet->remove_mutex);
+               return -ENODEV;
+       }
+
+       ret = dvb_generic_open(inode, file);
+
+       mutex_unlock(&dvbnet->remove_mutex);
+
+       return ret;
+}
+
 static int dvb_net_close(struct inode *inode, struct file *file)
 {
        struct dvb_device *dvbdev = file->private_data;
        struct dvb_net *dvbnet = dvbdev->priv;
 
+       mutex_lock(&dvbnet->remove_mutex);
+
        dvb_generic_release(inode, file);
 
-       if(dvbdev->users == 1 && dvbnet->exit == 1)
+       if (dvbdev->users == 1 && dvbnet->exit == 1) {
+               mutex_unlock(&dvbnet->remove_mutex);
                wake_up(&dvbdev->wait_queue);
+       } else {
+               mutex_unlock(&dvbnet->remove_mutex);
+       }
+
        return 0;
 }
 
@@ -1580,7 +1608,7 @@ static int dvb_net_close(struct inode *inode, struct file *file)
 static const struct file_operations dvb_net_fops = {
        .owner = THIS_MODULE,
        .unlocked_ioctl = dvb_net_ioctl,
-       .open = dvb_generic_open,
+       .open = locked_dvb_net_open,
        .release = dvb_net_close,
        .llseek = noop_llseek,
 };
@@ -1599,10 +1627,13 @@ void dvb_net_release (struct dvb_net *dvbnet)
 {
        int i;
 
+       mutex_lock(&dvbnet->remove_mutex);
        dvbnet->exit = 1;
+       mutex_unlock(&dvbnet->remove_mutex);
+
        if (dvbnet->dvbdev->users < 1)
                wait_event(dvbnet->dvbdev->wait_queue,
-                               dvbnet->dvbdev->users==1);
+                               dvbnet->dvbdev->users == 1);
 
        dvb_unregister_device(dvbnet->dvbdev);
 
@@ -1621,6 +1652,7 @@ int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet,
        int i;
 
        mutex_init(&dvbnet->ioctl_mutex);
+       mutex_init(&dvbnet->remove_mutex);
        dvbnet->demux = dmx;
 
        for (i=0; i<DVB_NET_DEVICES_MAX; i++)
index e9b3ce0..a4b05e3 100644 (file)
@@ -27,6 +27,7 @@
 #include <media/tuner.h>
 
 static DEFINE_MUTEX(dvbdev_mutex);
+static LIST_HEAD(dvbdevfops_list);
 static int dvbdev_debug;
 
 module_param(dvbdev_debug, int, 0644);
@@ -453,14 +454,15 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
                        enum dvb_device_type type, int demux_sink_pads)
 {
        struct dvb_device *dvbdev;
-       struct file_operations *dvbdevfops;
+       struct file_operations *dvbdevfops = NULL;
+       struct dvbdevfops_node *node = NULL, *new_node = NULL;
        struct device *clsdev;
        int minor;
        int id, ret;
 
        mutex_lock(&dvbdev_register_lock);
 
-       if ((id = dvbdev_get_free_id (adap, type)) < 0){
+       if ((id = dvbdev_get_free_id (adap, type)) < 0) {
                mutex_unlock(&dvbdev_register_lock);
                *pdvbdev = NULL;
                pr_err("%s: couldn't find free device id\n", __func__);
@@ -468,18 +470,45 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
        }
 
        *pdvbdev = dvbdev = kzalloc(sizeof(*dvbdev), GFP_KERNEL);
-
        if (!dvbdev){
                mutex_unlock(&dvbdev_register_lock);
                return -ENOMEM;
        }
 
-       dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
+       /*
+        * When a device of the same type is probe()d more than once,
+        * the first allocated fops are used. This prevents memory leaks
+        * that can occur when the same device is probe()d repeatedly.
+        */
+       list_for_each_entry(node, &dvbdevfops_list, list_head) {
+               if (node->fops->owner == adap->module &&
+                               node->type == type &&
+                               node->template == template) {
+                       dvbdevfops = node->fops;
+                       break;
+               }
+       }
 
-       if (!dvbdevfops){
-               kfree (dvbdev);
-               mutex_unlock(&dvbdev_register_lock);
-               return -ENOMEM;
+       if (dvbdevfops == NULL) {
+               dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
+               if (!dvbdevfops) {
+                       kfree(dvbdev);
+                       mutex_unlock(&dvbdev_register_lock);
+                       return -ENOMEM;
+               }
+
+               new_node = kzalloc(sizeof(struct dvbdevfops_node), GFP_KERNEL);
+               if (!new_node) {
+                       kfree(dvbdevfops);
+                       kfree(dvbdev);
+                       mutex_unlock(&dvbdev_register_lock);
+                       return -ENOMEM;
+               }
+
+               new_node->fops = dvbdevfops;
+               new_node->type = type;
+               new_node->template = template;
+               list_add_tail (&new_node->list_head, &dvbdevfops_list);
        }
 
        memcpy(dvbdev, template, sizeof(struct dvb_device));
@@ -490,20 +519,20 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
        dvbdev->priv = priv;
        dvbdev->fops = dvbdevfops;
        init_waitqueue_head (&dvbdev->wait_queue);
-
        dvbdevfops->owner = adap->module;
-
        list_add_tail (&dvbdev->list_head, &adap->device_list);
-
        down_write(&minor_rwsem);
 #ifdef CONFIG_DVB_DYNAMIC_MINORS
        for (minor = 0; minor < MAX_DVB_MINORS; minor++)
                if (dvb_minors[minor] == NULL)
                        break;
-
        if (minor == MAX_DVB_MINORS) {
+               if (new_node) {
+                       list_del (&new_node->list_head);
+                       kfree(dvbdevfops);
+                       kfree(new_node);
+               }
                list_del (&dvbdev->list_head);
-               kfree(dvbdevfops);
                kfree(dvbdev);
                up_write(&minor_rwsem);
                mutex_unlock(&dvbdev_register_lock);
@@ -512,41 +541,47 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
 #else
        minor = nums2minor(adap->num, type, id);
 #endif
-
        dvbdev->minor = minor;
        dvb_minors[minor] = dvb_device_get(dvbdev);
        up_write(&minor_rwsem);
-
        ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads);
        if (ret) {
                pr_err("%s: dvb_register_media_device failed to create the mediagraph\n",
                      __func__);
-
+               if (new_node) {
+                       list_del (&new_node->list_head);
+                       kfree(dvbdevfops);
+                       kfree(new_node);
+               }
                dvb_media_device_free(dvbdev);
                list_del (&dvbdev->list_head);
-               kfree(dvbdevfops);
                kfree(dvbdev);
                mutex_unlock(&dvbdev_register_lock);
                return ret;
        }
 
-       mutex_unlock(&dvbdev_register_lock);
-
        clsdev = device_create(dvb_class, adap->device,
                               MKDEV(DVB_MAJOR, minor),
                               dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id);
        if (IS_ERR(clsdev)) {
                pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
                       __func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
+               if (new_node) {
+                       list_del (&new_node->list_head);
+                       kfree(dvbdevfops);
+                       kfree(new_node);
+               }
                dvb_media_device_free(dvbdev);
                list_del (&dvbdev->list_head);
-               kfree(dvbdevfops);
                kfree(dvbdev);
+               mutex_unlock(&dvbdev_register_lock);
                return PTR_ERR(clsdev);
        }
+
        dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
                adap->num, dnames[type], id, minor, minor);
 
+       mutex_unlock(&dvbdev_register_lock);
        return 0;
 }
 EXPORT_SYMBOL(dvb_register_device);
@@ -575,7 +610,6 @@ static void dvb_free_device(struct kref *ref)
 {
        struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref);
 
-       kfree (dvbdev->fops);
        kfree (dvbdev);
 }
 
@@ -1081,9 +1115,17 @@ error:
 
 static void __exit exit_dvbdev(void)
 {
+       struct dvbdevfops_node *node, *next;
+
        class_destroy(dvb_class);
        cdev_del(&dvb_device_cdev);
        unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS);
+
+       list_for_each_entry_safe(node, next, &dvbdevfops_list, list_head) {
+               list_del (&node->list_head);
+               kfree(node->fops);
+               kfree(node);
+       }
 }
 
 subsys_initcall(init_dvbdev);
index 1f1753f..0782f83 100644 (file)
@@ -798,7 +798,7 @@ MODULE_DEVICE_TABLE(i2c, mn88443x_i2c_id);
 static struct i2c_driver mn88443x_driver = {
        .driver = {
                .name = "mn88443x",
-               .of_match_table = of_match_ptr(mn88443x_of_match),
+               .of_match_table = mn88443x_of_match,
        },
        .probe_new = mn88443x_probe,
        .remove   = mn88443x_remove,
index 8287851..d85bfbb 100644 (file)
@@ -697,7 +697,7 @@ static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
        netup_unidvb_dma_enable(dma, 0);
        msleep(50);
        cancel_work_sync(&dma->work);
-       del_timer(&dma->timeout);
+       del_timer_sync(&dma->timeout);
 }
 
 static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
@@ -887,12 +887,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
                ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
                ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
                pci_dev->irq);
-       if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
-                       "netup_unidvb", pci_dev) < 0) {
-               dev_err(&pci_dev->dev,
-                       "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
-               goto irq_request_err;
-       }
+
        ndev->dma_size = 2 * 188 *
                NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
        ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
@@ -933,6 +928,14 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
                dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
                goto dma_setup_err;
        }
+
+       if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
+                       "netup_unidvb", pci_dev) < 0) {
+               dev_err(&pci_dev->dev,
+                       "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
+               goto dma_setup_err;
+       }
+
        dev_info(&pci_dev->dev,
                "netup_unidvb: device has been initialized\n");
        return 0;
@@ -951,8 +954,6 @@ spi_setup_err:
        dma_free_coherent(&pci_dev->dev, ndev->dma_size,
                        ndev->dma_virt, ndev->dma_phys);
 dma_alloc_err:
-       free_irq(pci_dev->irq, pci_dev);
-irq_request_err:
        iounmap(ndev->lmmio1);
 pci_bar1_error:
        iounmap(ndev->lmmio0);
index 44540de..d3b5cb4 100644 (file)
@@ -101,6 +101,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap,
                if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
                        if (msg[i].addr ==
                                ce6230_zl10353_config.demod_address) {
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                req.cmd = DEMOD_READ;
                                req.value = msg[i].addr >> 1;
                                req.index = msg[i].buf[0];
@@ -117,6 +121,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap,
                } else {
                        if (msg[i].addr ==
                                ce6230_zl10353_config.demod_address) {
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                req.cmd = DEMOD_WRITE;
                                req.value = msg[i].addr >> 1;
                                req.index = msg[i].buf[0];
index 7ed0ab9..0e4773f 100644 (file)
@@ -115,6 +115,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
        while (i < num) {
                if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
                        if (msg[i].addr == ec168_ec100_config.demod_address) {
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                req.cmd = READ_DEMOD;
                                req.value = 0;
                                req.index = 0xff00 + msg[i].buf[0]; /* reg */
@@ -131,6 +135,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                        }
                } else {
                        if (msg[i].addr == ec168_ec100_config.demod_address) {
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                req.cmd = WRITE_DEMOD;
                                req.value = msg[i].buf[1]; /* val */
                                req.index = 0xff00 + msg[i].buf[0]; /* reg */
@@ -139,6 +147,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                                ret = ec168_ctrl_msg(d, &req);
                                i += 1;
                        } else {
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                req.cmd = WRITE_I2C;
                                req.value = msg[i].buf[0]; /* val */
                                req.index = 0x0100 + msg[i].addr; /* I2C addr */
index 795a012..f7884bb 100644 (file)
@@ -176,6 +176,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                        ret = -EOPNOTSUPP;
                        goto err_mutex_unlock;
                } else if (msg[0].addr == 0x10) {
+                       if (msg[0].len < 1 || msg[1].len < 1) {
+                               ret = -EOPNOTSUPP;
+                               goto err_mutex_unlock;
+                       }
                        /* method 1 - integrated demod */
                        if (msg[0].buf[0] == 0x00) {
                                /* return demod page from driver cache */
@@ -189,6 +193,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                                ret = rtl28xxu_ctrl_msg(d, &req);
                        }
                } else if (msg[0].len < 2) {
+                       if (msg[0].len < 1) {
+                               ret = -EOPNOTSUPP;
+                               goto err_mutex_unlock;
+                       }
                        /* method 2 - old I2C */
                        req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
                        req.index = CMD_I2C_RD;
@@ -217,8 +225,16 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                        ret = -EOPNOTSUPP;
                        goto err_mutex_unlock;
                } else if (msg[0].addr == 0x10) {
+                       if (msg[0].len < 1) {
+                               ret = -EOPNOTSUPP;
+                               goto err_mutex_unlock;
+                       }
                        /* method 1 - integrated demod */
                        if (msg[0].buf[0] == 0x00) {
+                               if (msg[0].len < 2) {
+                                       ret = -EOPNOTSUPP;
+                                       goto err_mutex_unlock;
+                               }
                                /* save demod page for later demod access */
                                dev->page = msg[0].buf[1];
                                ret = 0;
@@ -231,6 +247,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                                ret = rtl28xxu_ctrl_msg(d, &req);
                        }
                } else if ((msg[0].len < 23) && (!dev->new_i2c_write)) {
+                       if (msg[0].len < 1) {
+                               ret = -EOPNOTSUPP;
+                               goto err_mutex_unlock;
+                       }
                        /* method 2 - old I2C */
                        req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
                        req.index = CMD_I2C_WR;
index 7d78ee0..a31c6f8 100644 (file)
@@ -988,6 +988,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
                        /* write/read request */
                        if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) {
                                req = 0xB9;
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
                                value = msg[i].addr + (msg[i].len << 8);
                                length = msg[i + 1].len + 6;
@@ -1001,6 +1005,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
 
                                /* demod 16bit addr */
                                req = 0xBD;
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
                                value = msg[i].addr + (2 << 8);
                                length = msg[i].len - 2;
@@ -1026,6 +1034,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
                        } else {
 
                                req = 0xBD;
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                index = msg[i].buf[0] & 0x00FF;
                                value = msg[i].addr + (1 << 8);
                                length = msg[i].len - 1;
index 2756815..32134be 100644 (file)
@@ -63,6 +63,10 @@ static int digitv_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
                warn("more than 2 i2c messages at a time is not handled yet. TODO.");
 
        for (i = 0; i < num; i++) {
+               if (msg[i].len < 1) {
+                       i = -EOPNOTSUPP;
+                       break;
+               }
                /* write/read request */
                if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) {
                        if (digitv_ctrl_msg(d, USB_READ_COFDM, msg[i].buf[0], NULL, 0,
index 0ca7642..8747960 100644 (file)
@@ -946,7 +946,7 @@ static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
        for (i = 0; i < 6; i++) {
                obuf[1] = 0xf0 + i;
                if (i2c_transfer(&d->i2c_adap, msg, 2) != 2)
-                       break;
+                       return -1;
                else
                        mac[i] = ibuf[0];
        }
index 9501b10..0df1027 100644 (file)
@@ -37,6 +37,7 @@ config VIDEO_PVRUSB2_DVB
        bool "pvrusb2 ATSC/DVB support"
        default y
        depends on VIDEO_PVRUSB2 && DVB_CORE
+       depends on VIDEO_PVRUSB2=m || DVB_CORE=y
        select DVB_LGDT330X if MEDIA_SUBDRV_AUTOSELECT
        select DVB_S5H1409 if MEDIA_SUBDRV_AUTOSELECT
        select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
index 38822ce..c4474d4 100644 (file)
@@ -1544,8 +1544,7 @@ static void ttusb_dec_exit_dvb(struct ttusb_dec *dec)
        dvb_dmx_release(&dec->demux);
        if (dec->fe) {
                dvb_unregister_frontend(dec->fe);
-               if (dec->fe->ops.release)
-                       dec->fe->ops.release(dec->fe);
+               dvb_frontend_detach(dec->fe);
        }
        dvb_unregister_adapter(&dec->adapter);
 }
index 00c33ed..d920c41 100644 (file)
@@ -264,6 +264,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
                goto out_put;
        }
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
+       req_to_mmc_queue_req(req)->drv_op_result = -EIO;
        blk_execute_rq(req, false);
        ret = req_to_mmc_queue_req(req)->drv_op_result;
        blk_mq_free_request(req);
@@ -651,6 +652,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
        idatas[0] = idata;
        req_to_mmc_queue_req(req)->drv_op =
                rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+       req_to_mmc_queue_req(req)->drv_op_result = -EIO;
        req_to_mmc_queue_req(req)->drv_op_data = idatas;
        req_to_mmc_queue_req(req)->ioc_count = 1;
        blk_execute_rq(req, false);
@@ -722,6 +724,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
        }
        req_to_mmc_queue_req(req)->drv_op =
                rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+       req_to_mmc_queue_req(req)->drv_op_result = -EIO;
        req_to_mmc_queue_req(req)->drv_op_data = idata;
        req_to_mmc_queue_req(req)->ioc_count = n;
        blk_execute_rq(req, false);
@@ -2806,6 +2809,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
        if (IS_ERR(req))
                return PTR_ERR(req);
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
+       req_to_mmc_queue_req(req)->drv_op_result = -EIO;
        blk_execute_rq(req, false);
        ret = req_to_mmc_queue_req(req)->drv_op_result;
        if (ret >= 0) {
@@ -2844,6 +2848,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
                goto out_free;
        }
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
+       req_to_mmc_queue_req(req)->drv_op_result = -EIO;
        req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
        blk_execute_rq(req, false);
        err = req_to_mmc_queue_req(req)->drv_op_result;
index b24aa27..d2f6250 100644 (file)
@@ -540,9 +540,11 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
 
        if (host->mmc->caps & MMC_CAP_HW_RESET) {
                priv->rst_hw = devm_reset_control_get_optional_exclusive(dev, NULL);
-               if (IS_ERR(priv->rst_hw))
-                       return dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw),
-                                            "reset controller error\n");
+               if (IS_ERR(priv->rst_hw)) {
+                       ret = dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw),
+                                           "reset controller error\n");
+                       goto free;
+               }
                if (priv->rst_hw)
                        host->mmc_host_ops.card_hw_reset = sdhci_cdns_mmc_hw_reset;
        }
index d7c0c0b..eebf946 100644 (file)
@@ -1634,6 +1634,10 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
        if (ret)
                return ret;
 
+       /* HS400/HS400ES require 8 bit bus */
+       if (!(host->mmc->caps & MMC_CAP_8_BIT_DATA))
+               host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
+
        if (mmc_gpio_get_cd(host->mmc) >= 0)
                host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
 
@@ -1724,10 +1728,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
                host->mmc_host_ops.init_card = usdhc_init_card;
        }
 
-       err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
-       if (err)
-               goto disable_ahb_clk;
-
        if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
                sdhci_esdhc_ops.platform_execute_tuning =
                                        esdhc_executing_tuning;
@@ -1735,15 +1735,13 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
        if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
                host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
 
-       if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
-           imx_data->socdata->flags & ESDHC_FLAG_HS400)
+       if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
                host->mmc->caps2 |= MMC_CAP2_HS400;
 
        if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
                host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
 
-       if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
-           imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
+       if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
                host->mmc->caps2 |= MMC_CAP2_HS400_ES;
                host->mmc_host_ops.hs400_enhanced_strobe =
                                        esdhc_hs400_enhanced_strobe;
@@ -1765,6 +1763,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
                        goto disable_ahb_clk;
        }
 
+       err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+       if (err)
+               goto disable_ahb_clk;
+
        sdhci_esdhc_imx_hwinit(host);
 
        err = sdhci_add_host(host);
index 3fed888..edbaa14 100644 (file)
@@ -3947,7 +3947,11 @@ static int bond_slave_netdev_event(unsigned long event,
                unblock_netpoll_tx();
                break;
        case NETDEV_FEAT_CHANGE:
-               bond_compute_features(bond);
+               if (!bond->notifier_ctx) {
+                       bond->notifier_ctx = true;
+                       bond_compute_features(bond);
+                       bond->notifier_ctx = false;
+               }
                break;
        case NETDEV_RESEND_IGMP:
                /* Propagate to master device */
@@ -6342,6 +6346,8 @@ static int bond_init(struct net_device *bond_dev)
        if (!bond->wq)
                return -ENOMEM;
 
+       bond->notifier_ctx = false;
+
        spin_lock_init(&bond->stats_lock);
        netdev_lockdep_set_classes(bond_dev);
 
index 3ceccaf..b190007 100644 (file)
@@ -95,7 +95,7 @@ config CAN_AT91
 
 config CAN_BXCAN
        tristate "STM32 Basic Extended CAN (bxCAN) devices"
-       depends on OF || ARCH_STM32 || COMPILE_TEST
+       depends on ARCH_STM32 || COMPILE_TEST
        depends on HAS_IOMEM
        select CAN_RX_OFFLOAD
        help
index e26ccd4..027a8a1 100644 (file)
 #define BXCAN_FiR1_REG(b) (0x40 + (b) * 8)
 #define BXCAN_FiR2_REG(b) (0x44 + (b) * 8)
 
-#define BXCAN_FILTER_ID(primary) (primary ? 0 : 14)
+#define BXCAN_FILTER_ID(cfg) ((cfg) == BXCAN_CFG_DUAL_SECONDARY ? 14 : 0)
 
 /* Filter primary register (FMR) bits */
 #define BXCAN_FMR_CANSB_MASK GENMASK(13, 8)
@@ -135,6 +135,12 @@ enum bxcan_lec_code {
        BXCAN_LEC_UNUSED
 };
 
+enum bxcan_cfg {
+       BXCAN_CFG_SINGLE = 0,
+       BXCAN_CFG_DUAL_PRIMARY,
+       BXCAN_CFG_DUAL_SECONDARY
+};
+
 /* Structure of the message buffer */
 struct bxcan_mb {
        u32 id;                 /* can identifier */
@@ -167,7 +173,7 @@ struct bxcan_priv {
        struct regmap *gcan;
        int tx_irq;
        int sce_irq;
-       bool primary;
+       enum bxcan_cfg cfg;
        struct clk *clk;
        spinlock_t rmw_lock;    /* lock for read-modify-write operations */
        unsigned int tx_head;
@@ -202,17 +208,17 @@ static inline void bxcan_rmw(struct bxcan_priv *priv, void __iomem *addr,
        spin_unlock_irqrestore(&priv->rmw_lock, flags);
 }
 
-static void bxcan_disable_filters(struct bxcan_priv *priv, bool primary)
+static void bxcan_disable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
 {
-       unsigned int fid = BXCAN_FILTER_ID(primary);
+       unsigned int fid = BXCAN_FILTER_ID(cfg);
        u32 fmask = BIT(fid);
 
        regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0);
 }
 
-static void bxcan_enable_filters(struct bxcan_priv *priv, bool primary)
+static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
 {
-       unsigned int fid = BXCAN_FILTER_ID(primary);
+       unsigned int fid = BXCAN_FILTER_ID(cfg);
        u32 fmask = BIT(fid);
 
        /* Filter settings:
@@ -680,7 +686,7 @@ static int bxcan_chip_start(struct net_device *ndev)
                  BXCAN_BTR_BRP_MASK | BXCAN_BTR_TS1_MASK | BXCAN_BTR_TS2_MASK |
                  BXCAN_BTR_SJW_MASK, set);
 
-       bxcan_enable_filters(priv, priv->primary);
+       bxcan_enable_filters(priv, priv->cfg);
 
        /* Clear all internal status */
        priv->tx_head = 0;
@@ -806,7 +812,7 @@ static void bxcan_chip_stop(struct net_device *ndev)
                  BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FOVIE1 |
                  BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 |
                  BXCAN_IER_FFIE0 | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE, 0);
-       bxcan_disable_filters(priv, priv->primary);
+       bxcan_disable_filters(priv, priv->cfg);
        bxcan_enter_sleep_mode(priv);
        priv->can.state = CAN_STATE_STOPPED;
 }
@@ -931,7 +937,7 @@ static int bxcan_probe(struct platform_device *pdev)
        struct clk *clk = NULL;
        void __iomem *regs;
        struct regmap *gcan;
-       bool primary;
+       enum bxcan_cfg cfg;
        int err, rx_irq, tx_irq, sce_irq;
 
        regs = devm_platform_ioremap_resource(pdev, 0);
@@ -946,7 +952,13 @@ static int bxcan_probe(struct platform_device *pdev)
                return PTR_ERR(gcan);
        }
 
-       primary = of_property_read_bool(np, "st,can-primary");
+       if (of_property_read_bool(np, "st,can-primary"))
+               cfg = BXCAN_CFG_DUAL_PRIMARY;
+       else if (of_property_read_bool(np, "st,can-secondary"))
+               cfg = BXCAN_CFG_DUAL_SECONDARY;
+       else
+               cfg = BXCAN_CFG_SINGLE;
+
        clk = devm_clk_get(dev, NULL);
        if (IS_ERR(clk)) {
                dev_err(dev, "failed to get clock\n");
@@ -992,7 +1004,7 @@ static int bxcan_probe(struct platform_device *pdev)
        priv->clk = clk;
        priv->tx_irq = tx_irq;
        priv->sce_irq = sce_irq;
-       priv->primary = primary;
+       priv->cfg = cfg;
        priv->can.clock.freq = clk_get_rate(clk);
        spin_lock_init(&priv->rmw_lock);
        priv->tx_head = 0;
index 241ec63..f6d05b3 100644 (file)
@@ -54,7 +54,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
        /* check flag whether this packet has to be looped back */
        if (!(dev->flags & IFF_ECHO) ||
            (skb->protocol != htons(ETH_P_CAN) &&
-            skb->protocol != htons(ETH_P_CANFD))) {
+            skb->protocol != htons(ETH_P_CANFD) &&
+            skb->protocol != htons(ETH_P_CANXL))) {
                kfree_skb(skb);
                return 0;
        }
index 53e8a91..be189ed 100644 (file)
@@ -71,10 +71,12 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
 #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
 /* Shared receive buffer registers */
 #define KVASER_PCIEFD_SRB_BASE 0x1f200
+#define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4)
 #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
 #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
 #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
 #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
+#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214)
 #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
 /* EPCS flash controller registers */
 #define KVASER_PCIEFD_SPI_BASE 0x1fc00
@@ -111,6 +113,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
 /* DMA support */
 #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
 
+/* SRB current packet level */
+#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK 0xff
+
 /* DMA Enable */
 #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
 
@@ -526,7 +531,7 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
              KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
              KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
              KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
-             KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
+             KVASER_PCIEFD_KCAN_IRQ_TAR;
 
        iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 
@@ -554,6 +559,8 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
 
        if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
                mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
+       else
+               mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM;
 
        mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
        mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
@@ -572,7 +579,7 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
 
        spin_lock_irqsave(&can->lock, irq);
        iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
-       iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+       iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
                  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 
        status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
@@ -615,7 +622,7 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
        iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
        iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
 
-       iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+       iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
                  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 
        mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
@@ -719,6 +726,7 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
                iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
                del_timer(&can->bec_poll_timer);
        }
+       can->can.state = CAN_STATE_STOPPED;
        close_candev(netdev);
 
        return ret;
@@ -1007,8 +1015,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
                SET_NETDEV_DEV(netdev, &pcie->pci->dev);
 
                iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
-               iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
-                         KVASER_PCIEFD_KCAN_IRQ_TFD,
+               iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
                          can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 
                pcie->can[i] = can;
@@ -1058,6 +1065,7 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
 {
        int i;
        u32 srb_status;
+       u32 srb_packet_count;
        dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
 
        /* Disable the DMA */
@@ -1085,6 +1093,15 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
                  KVASER_PCIEFD_SRB_CMD_RDB1,
                  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
 
+       /* Empty Rx FIFO */
+       srb_packet_count = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG) &
+                          KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK;
+       while (srb_packet_count) {
+               /* Drop current packet in FIFO */
+               ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG);
+               srb_packet_count--;
+       }
+
        srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
        if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
                dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
@@ -1425,9 +1442,6 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
                cmd = KVASER_PCIEFD_KCAN_CMD_AT;
                cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
                iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
-
-               iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
-                         can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
        } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
                   p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
                   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
@@ -1714,15 +1728,6 @@ static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
        if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
                netdev_err(can->can.dev, "Tx FIFO overflow\n");
 
-       if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
-               u8 count = ioread32(can->reg_base +
-                                   KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
-
-               if (count == 0)
-                       iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
-                                 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
-       }
-
        if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
                netdev_err(can->can.dev,
                           "Fail to change bittiming, when not in reset mode\n");
@@ -1824,6 +1829,11 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
        if (err)
                goto err_teardown_can_ctrls;
 
+       err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
+                         IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
+       if (err)
+               goto err_teardown_can_ctrls;
+
        iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
                  pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
 
@@ -1844,11 +1854,6 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
        iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
                  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
 
-       err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
-                         IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
-       if (err)
-               goto err_teardown_can_ctrls;
-
        err = kvaser_pciefd_reg_candev(pcie);
        if (err)
                goto err_free_irq;
@@ -1856,6 +1861,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
        return 0;
 
 err_free_irq:
+       /* Disable PCI interrupts */
+       iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
        free_irq(pcie->pci->irq, pcie);
 
 err_teardown_can_ctrls:
index aec9d4f..d19b630 100644 (file)
 /* Offset 0x10: Extended Port Control Command */
 #define MV88E6393X_PORT_EPC_CMD                0x10
 #define MV88E6393X_PORT_EPC_CMD_BUSY   0x8000
-#define MV88E6393X_PORT_EPC_CMD_WRITE  0x0300
+#define MV88E6393X_PORT_EPC_CMD_WRITE  0x3000
 #define MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE   0x02
 
 /* Offset 0x11: Extended Port Control Data */
index 919027c..c37d2e5 100644 (file)
@@ -120,6 +120,22 @@ static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
        a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
 }
 
+static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable)
+{
+       u32 mask = A5PSW_PORT_ENA_TX(port);
+       u32 reg = enable ? mask : 0;
+
+       /* Even though the port TX is disabled through TXENA bit in the
+        * PORT_ENA register, it can still send BPDUs. This depends on the tag
+        * configuration added when sending packets from the CPU port to the
+        * switch port. Indeed, when using forced forwarding without filtering,
+        * even disabled ports will be able to send packets that are tagged.
+        * This allows to implement STP support when ports are in a state where
+        * forwarding traffic should be stopped but BPDUs should still be sent.
+        */
+       a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg);
+}
+
 static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
 {
        u32 port_ena = 0;
@@ -292,6 +308,22 @@ static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
        return 0;
 }
 
+static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn)
+{
+       u32 mask = A5PSW_INPUT_LEARN_DIS(port);
+       u32 reg = !learn ? mask : 0;
+
+       a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
+static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
+{
+       u32 mask = A5PSW_INPUT_LEARN_BLOCK(port);
+       u32 reg = block ? mask : 0;
+
+       a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
 static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
                                          bool set)
 {
@@ -308,6 +340,14 @@ static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
                a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
 }
 
+static void a5psw_port_set_standalone(struct a5psw *a5psw, int port,
+                                     bool standalone)
+{
+       a5psw_port_learning_set(a5psw, port, !standalone);
+       a5psw_flooding_set_resolution(a5psw, port, !standalone);
+       a5psw_port_mgmtfwd_set(a5psw, port, standalone);
+}
+
 static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
                                  struct dsa_bridge bridge,
                                  bool *tx_fwd_offload,
@@ -323,8 +363,7 @@ static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
        }
 
        a5psw->br_dev = bridge.dev;
-       a5psw_flooding_set_resolution(a5psw, port, true);
-       a5psw_port_mgmtfwd_set(a5psw, port, false);
+       a5psw_port_set_standalone(a5psw, port, false);
 
        return 0;
 }
@@ -334,8 +373,7 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
 {
        struct a5psw *a5psw = ds->priv;
 
-       a5psw_flooding_set_resolution(a5psw, port, false);
-       a5psw_port_mgmtfwd_set(a5psw, port, true);
+       a5psw_port_set_standalone(a5psw, port, true);
 
        /* No more ports bridged */
        if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
@@ -344,28 +382,35 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
 
 static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
 {
-       u32 mask = A5PSW_INPUT_LEARN_DIS(port) | A5PSW_INPUT_LEARN_BLOCK(port);
+       bool learning_enabled, rx_enabled, tx_enabled;
        struct a5psw *a5psw = ds->priv;
-       u32 reg = 0;
 
        switch (state) {
        case BR_STATE_DISABLED:
        case BR_STATE_BLOCKING:
-               reg |= A5PSW_INPUT_LEARN_DIS(port);
-               reg |= A5PSW_INPUT_LEARN_BLOCK(port);
-               break;
        case BR_STATE_LISTENING:
-               reg |= A5PSW_INPUT_LEARN_DIS(port);
+               rx_enabled = false;
+               tx_enabled = false;
+               learning_enabled = false;
                break;
        case BR_STATE_LEARNING:
-               reg |= A5PSW_INPUT_LEARN_BLOCK(port);
+               rx_enabled = false;
+               tx_enabled = false;
+               learning_enabled = true;
                break;
        case BR_STATE_FORWARDING:
-       default:
+               rx_enabled = true;
+               tx_enabled = true;
+               learning_enabled = true;
                break;
+       default:
+               dev_err(ds->dev, "invalid STP state: %d\n", state);
+               return;
        }
 
-       a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+       a5psw_port_learning_set(a5psw, port, learning_enabled);
+       a5psw_port_rx_block_set(a5psw, port, !rx_enabled);
+       a5psw_port_tx_enable(a5psw, port, tx_enabled);
 }
 
 static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
@@ -673,7 +718,7 @@ static int a5psw_setup(struct dsa_switch *ds)
        }
 
        /* Configure management port */
-       reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD;
+       reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE;
        a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
 
        /* Set pattern 0 to forward all frame to mgmt port */
@@ -722,13 +767,15 @@ static int a5psw_setup(struct dsa_switch *ds)
                if (dsa_port_is_unused(dp))
                        continue;
 
-               /* Enable egress flooding for CPU port */
-               if (dsa_port_is_cpu(dp))
+               /* Enable egress flooding and learning for CPU port */
+               if (dsa_port_is_cpu(dp)) {
                        a5psw_flooding_set_resolution(a5psw, port, true);
+                       a5psw_port_learning_set(a5psw, port, true);
+               }
 
-               /* Enable management forward only for user ports */
+               /* Enable standalone mode for user ports */
                if (dsa_port_is_user(dp))
-                       a5psw_port_mgmtfwd_set(a5psw, port, true);
+                       a5psw_port_set_standalone(a5psw, port, true);
        }
 
        return 0;
index c67abd4..b869192 100644 (file)
@@ -19,6 +19,7 @@
 #define A5PSW_PORT_OFFSET(port)                (0x400 * (port))
 
 #define A5PSW_PORT_ENA                 0x8
+#define A5PSW_PORT_ENA_TX(port)                BIT(port)
 #define A5PSW_PORT_ENA_RX_SHIFT                16
 #define A5PSW_PORT_ENA_TX_RX(port)     (BIT((port) + A5PSW_PORT_ENA_RX_SHIFT) | \
                                         BIT(port))
@@ -36,7 +37,7 @@
 #define A5PSW_INPUT_LEARN_BLOCK(p)     BIT(p)
 
 #define A5PSW_MGMT_CFG                 0x20
-#define A5PSW_MGMT_CFG_DISCARD         BIT(7)
+#define A5PSW_MGMT_CFG_ENABLE          BIT(6)
 
 #define A5PSW_MODE_CFG                 0x24
 #define A5PSW_MODE_STATS_RESET         BIT(31)
index d2f4358..ba3e7aa 100644 (file)
@@ -66,8 +66,10 @@ static int max_interrupt_work = 20;
 #include <linux/timer.h>
 #include <linux/ethtool.h>
 #include <linux/bitops.h>
-
 #include <linux/uaccess.h>
+
+#include <net/Space.h>
+
 #include <asm/io.h>
 #include <asm/dma.h>
 
index 82f94b1..5267e9d 100644 (file)
@@ -195,6 +195,7 @@ static int tc589_probe(struct pcmcia_device *link)
 {
        struct el3_private *lp;
        struct net_device *dev;
+       int ret;
 
        dev_dbg(&link->dev, "3c589_attach()\n");
 
@@ -218,7 +219,15 @@ static int tc589_probe(struct pcmcia_device *link)
 
        dev->ethtool_ops = &netdev_ethtool_ops;
 
-       return tc589_config(link);
+       ret = tc589_config(link);
+       if (ret)
+               goto err_free_netdev;
+
+       return 0;
+
+err_free_netdev:
+       free_netdev(dev);
+       return ret;
 }
 
 static void tc589_detach(struct pcmcia_device *link)
index 0a9118b..bc9c81d 100644 (file)
@@ -52,6 +52,7 @@ static const char version2[] =
 #include <linux/etherdevice.h>
 #include <linux/jiffies.h>
 #include <linux/platform_device.h>
+#include <net/Space.h>
 
 #include <asm/io.h>
 
index 6e62c37..7465650 100644 (file)
@@ -66,6 +66,7 @@ static const char version[] =
 #include <linux/isapnp.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <net/Space.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
index 5b00c45..119021d 100644 (file)
@@ -37,6 +37,7 @@ static const char version[] =
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <net/Space.h>
 
 #include <asm/io.h>
 
index 8971665..6cf3818 100644 (file)
@@ -59,6 +59,7 @@ static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@c
 #include <linux/skbuff.h>
 #include <linux/mm.h>
 #include <linux/bitops.h>
+#include <net/Space.h>
 
 #include <asm/io.h>
 #include <asm/dma.h>
index f28ffc3..eca0c92 100644 (file)
@@ -3450,7 +3450,7 @@ err_clk_disable:
        return ret;
 }
 
-static void bcmgenet_netif_stop(struct net_device *dev)
+static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
 
@@ -3465,6 +3465,8 @@ static void bcmgenet_netif_stop(struct net_device *dev)
        /* Disable MAC transmit. TX DMA disabled must be done before this */
        umac_enable_set(priv, CMD_TX_EN, false);
 
+       if (stop_phy)
+               phy_stop(dev->phydev);
        bcmgenet_disable_rx_napi(priv);
        bcmgenet_intr_disable(priv);
 
@@ -3485,7 +3487,7 @@ static int bcmgenet_close(struct net_device *dev)
 
        netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
 
-       bcmgenet_netif_stop(dev);
+       bcmgenet_netif_stop(dev, false);
 
        /* Really kill the PHY state machine and disconnect from it */
        phy_disconnect(dev->phydev);
@@ -4303,7 +4305,7 @@ static int bcmgenet_suspend(struct device *d)
 
        netif_device_detach(dev);
 
-       bcmgenet_netif_stop(dev);
+       bcmgenet_netif_stop(dev, true);
 
        if (!device_may_wakeup(d))
                phy_suspend(dev->phydev);
index 06a0c00..276c32c 100644 (file)
@@ -72,6 +72,8 @@
 #include <linux/gfp.h>
 #include <linux/io.h>
 
+#include <net/Space.h>
+
 #include <asm/irq.h>
 #include <linux/atomic.h>
 #if ALLOW_DMA
index 42ec6ca..38e5b5a 100644 (file)
@@ -3798,7 +3798,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
        entries_free = fec_enet_get_free_txdesc_num(txq);
        if (entries_free < MAX_SKB_FRAGS + 1) {
                netdev_err(fep->netdev, "NOT enough BD for SG!\n");
-               xdp_return_frame(frame);
                return NETDEV_TX_BUSY;
        }
 
@@ -3835,6 +3834,11 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
        index = fec_enet_get_bd_index(last_bdp, &txq->bd);
        txq->tx_skbuff[index] = NULL;
 
+       /* Make sure the updates to rest of the descriptor are performed before
+        * transferring ownership.
+        */
+       dma_wmb();
+
        /* Send it on its way.  Tell FEC it's ready, interrupt when done,
         * it's the last BD of the frame, and to put the CRC on the end.
         */
@@ -3844,8 +3848,14 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
        /* If this was the last BD in the ring, start at the beginning again. */
        bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
 
+       /* Make sure the update to bdp are performed before txq->bd.cur. */
+       dma_wmb();
+
        txq->bd.cur = bdp;
 
+       /* Trigger transmission start */
+       writel(0, txq->bd.reg_desc_active);
+
        return 0;
 }
 
@@ -3874,12 +3884,6 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
                sent_frames++;
        }
 
-       /* Make sure the update to bdp and tx_skbuff are performed. */
-       wmb();
-
-       /* Trigger transmission start */
-       writel(0, txq->bd.reg_desc_active);
-
        __netif_tx_unlock(nq);
 
        return sent_frames;
@@ -4478,9 +4482,11 @@ fec_drv_remove(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        int ret;
 
-       ret = pm_runtime_resume_and_get(&pdev->dev);
+       ret = pm_runtime_get_sync(&pdev->dev);
        if (ret < 0)
-               return ret;
+               dev_err(&pdev->dev,
+                       "Failed to resume device in remove callback (%pe)\n",
+                       ERR_PTR(ret));
 
        cancel_work_sync(&fep->tx_timeout_work);
        fec_ptp_stop(pdev);
@@ -4493,8 +4499,13 @@ fec_drv_remove(struct platform_device *pdev)
                of_phy_deregister_fixed_link(np);
        of_node_put(fep->phy_node);
 
-       clk_disable_unprepare(fep->clk_ahb);
-       clk_disable_unprepare(fep->clk_ipg);
+       /* After pm_runtime_get_sync() failed, the clks are still off, so skip
+        * disabling them again.
+        */
+       if (ret >= 0) {
+               clk_disable_unprepare(fep->clk_ahb);
+               clk_disable_unprepare(fep->clk_ipg);
+       }
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
index cbbab5b..b85c412 100644 (file)
@@ -331,9 +331,25 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
        return head == hw->cmq.csq.next_to_use;
 }
 
-static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
+static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
+{
+       static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = {
+               {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS},
+       };
+       u32 i;
+
+       for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout_map); i++)
+               if (cmdq_tx_timeout_map[i].opcode == opcode)
+                       return cmdq_tx_timeout_map[i].tx_timeout;
+
+       return tx_timeout;
+}
+
+static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, u16 opcode,
                                     bool *is_completed)
 {
+       u32 cmdq_tx_timeout = hclge_get_cmdq_tx_timeout(opcode,
+                                                       hw->cmq.tx_timeout);
        u32 timeout = 0;
 
        do {
@@ -343,7 +359,7 @@ static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
                }
                udelay(1);
                timeout++;
-       } while (timeout < hw->cmq.tx_timeout);
+       } while (timeout < cmdq_tx_timeout);
 }
 
 static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
@@ -407,7 +423,8 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
         * if multi descriptors to be sent, use the first one to check
         */
        if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
-               hclge_comm_wait_for_resp(hw, &is_completed);
+               hclge_comm_wait_for_resp(hw, le16_to_cpu(desc->opcode),
+                                        &is_completed);
 
        if (!is_completed)
                ret = -EBADE;
@@ -529,7 +546,7 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
        cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
 
        /* Setup Tx write back timeout */
-       cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT;
+       cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT;
 
        /* Setup queue rings */
        ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
index de72ecb..18f1b4b 100644 (file)
@@ -54,7 +54,8 @@
 #define HCLGE_COMM_NIC_SW_RST_RDY              BIT(HCLGE_COMM_NIC_SW_RST_RDY_B)
 #define HCLGE_COMM_NIC_CMQ_DESC_NUM_S          3
 #define HCLGE_COMM_NIC_CMQ_DESC_NUM            1024
-#define HCLGE_COMM_CMDQ_TX_TIMEOUT             30000
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT     30000
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS       500000
 
 enum hclge_opcode_type {
        /* Generic commands */
@@ -360,6 +361,11 @@ struct hclge_comm_caps_bit_map {
        u16 local_bit;
 };
 
+struct hclge_cmdq_tx_timeout_map {
+       u32 opcode;
+       u32 tx_timeout;
+};
+
 struct hclge_comm_firmware_compat_cmd {
        __le32 compat;
        u8 rsv[20];
index 4c3e90a..d385ffc 100644 (file)
@@ -130,7 +130,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
                .name = "tx_bd_queue",
                .cmd = HNAE3_DBG_CMD_TX_BD,
                .dentry = HNS3_DBG_DENTRY_TX_BD,
-               .buf_len = HNS3_DBG_READ_LEN_4MB,
+               .buf_len = HNS3_DBG_READ_LEN_5MB,
                .init = hns3_dbg_bd_file_init,
        },
        {
index 97578ea..4a5ef8a 100644 (file)
@@ -10,6 +10,7 @@
 #define HNS3_DBG_READ_LEN_128KB        0x20000
 #define HNS3_DBG_READ_LEN_1MB  0x100000
 #define HNS3_DBG_READ_LEN_4MB  0x400000
+#define HNS3_DBG_READ_LEN_5MB  0x500000
 #define HNS3_DBG_WRITE_LEN     1024
 
 #define HNS3_DBG_DATA_STR_LEN  32
index 4fb5406..2689b10 100644 (file)
@@ -8053,12 +8053,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
        /* If it is not PF reset or FLR, the firmware will disable the MAC,
         * so it only need to stop phy here.
         */
-       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
-           hdev->reset_type != HNAE3_FUNC_RESET &&
-           hdev->reset_type != HNAE3_FLR_RESET) {
-               hclge_mac_stop_phy(hdev);
-               hclge_update_link_status(hdev);
-               return;
+       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+               hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
+                                      HCLGE_PFC_DISABLE);
+               if (hdev->reset_type != HNAE3_FUNC_RESET &&
+                   hdev->reset_type != HNAE3_FLR_RESET) {
+                       hclge_mac_stop_phy(hdev);
+                       hclge_update_link_status(hdev);
+                       return;
+               }
        }
 
        hclge_reset_tqp(handle);
index 4a33f65..922c0da 100644 (file)
@@ -171,8 +171,8 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
 
-static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
-                                 u8 pfc_bitmap)
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+                          u8 pfc_bitmap)
 {
        struct hclge_desc desc;
        struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
index 68f28a9..dd6f1fd 100644 (file)
@@ -164,6 +164,9 @@ struct hclge_bp_to_qs_map_cmd {
        u32 rsvd1;
 };
 
+#define HCLGE_PFC_DISABLE      0
+#define HCLGE_PFC_TX_RX_DISABLE        0
+
 struct hclge_pfc_en_cmd {
        u8 tx_rx_en_bitmap;
        u8 pri_en_bitmap;
@@ -235,6 +238,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
 void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+                          u8 pfc_bitmap);
 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
 void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
index f240462..dd08989 100644 (file)
@@ -1436,7 +1436,10 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
         * might happen in case reset assertion was made by PF. Yes, this also
         * means we might end up waiting bit more even for VF reset.
         */
-       msleep(5000);
+       if (hdev->reset_type == HNAE3_VF_FULL_RESET)
+               msleep(5000);
+       else
+               msleep(500);
 
        return 0;
 }
index 9afbbda..7c0578b 100644 (file)
@@ -2238,11 +2238,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                iavf_process_config(adapter);
                adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
 
-               /* Request VLAN offload settings */
-               if (VLAN_V2_ALLOWED(adapter))
-                       iavf_set_vlan_offload_features(adapter, 0,
-                                                      netdev->features);
-
                iavf_set_queue_vlan_tag_loc(adapter);
 
                was_mac_changed = !ether_addr_equal(netdev->dev_addr,
index c6d4926..850db8e 100644 (file)
@@ -932,10 +932,9 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
        if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN ||
             first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) ||
            skb->priority != TC_PRIO_CONTROL) {
-               first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
+               first->vid &= ~VLAN_PRIO_MASK;
                /* Mask the lower 3 bits to set the 802.1p priority */
-               first->tx_flags |= (skb->priority & 0x7) <<
-                                  ICE_TX_FLAGS_VLAN_PR_S;
+               first->vid |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
                /* if this is not already set it means a VLAN 0 + priority needs
                 * to be offloaded
                 */
index 450317d..11ae0e4 100644 (file)
@@ -2745,6 +2745,8 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
                        goto unroll_vector_base;
 
                ice_vsi_map_rings_to_vectors(vsi);
+               vsi->stat_offsets_loaded = false;
+
                if (ice_is_xdp_ena_vsi(vsi)) {
                        ret = ice_vsi_determine_xdp_res(vsi);
                        if (ret)
@@ -2793,6 +2795,9 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
                ret = ice_vsi_alloc_ring_stats(vsi);
                if (ret)
                        goto unroll_vector_base;
+
+               vsi->stat_offsets_loaded = false;
+
                /* Do not exit if configuring RSS had an issue, at least
                 * receive traffic on first queue. Hence no need to capture
                 * return value
index f1dca59..588ad86 100644 (file)
@@ -1171,7 +1171,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
        if (!vf)
                return -EINVAL;
 
-       ret = ice_check_vf_ready_for_cfg(vf);
+       ret = ice_check_vf_ready_for_reset(vf);
        if (ret)
                goto out_put_vf;
 
@@ -1286,7 +1286,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
                goto out_put_vf;
        }
 
-       ret = ice_check_vf_ready_for_cfg(vf);
+       ret = ice_check_vf_ready_for_reset(vf);
        if (ret)
                goto out_put_vf;
 
@@ -1340,7 +1340,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
                return -EOPNOTSUPP;
        }
 
-       ret = ice_check_vf_ready_for_cfg(vf);
+       ret = ice_check_vf_ready_for_reset(vf);
        if (ret)
                goto out_put_vf;
 
@@ -1653,7 +1653,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
        if (!vf)
                return -EINVAL;
 
-       ret = ice_check_vf_ready_for_cfg(vf);
+       ret = ice_check_vf_ready_for_reset(vf);
        if (ret)
                goto out_put_vf;
 
index 4fcf2d0..059bd91 100644 (file)
@@ -1664,8 +1664,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
 
        if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
                td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
-               td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
-                         ICE_TX_FLAGS_VLAN_S;
+               td_tag = first->vid;
        }
 
        dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -1998,7 +1997,7 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
         * VLAN offloads exclusively so we only care about the VLAN ID here
         */
        if (skb_vlan_tag_present(skb)) {
-               first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
+               first->vid = skb_vlan_tag_get(skb);
                if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
                        first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
                else
@@ -2388,8 +2387,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
                offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
                                        (ICE_TX_CTX_DESC_IL2TAG2 <<
                                        ICE_TXD_CTX_QW1_CMD_S));
-               offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
-                       ICE_TX_FLAGS_VLAN_S;
+               offload.cd_l2tag2 = first->vid;
        }
 
        /* set up TSO offload */
index fff0efe..166413f 100644 (file)
@@ -127,10 +127,6 @@ static inline int ice_skb_pad(void)
 #define ICE_TX_FLAGS_IPV6      BIT(6)
 #define ICE_TX_FLAGS_TUNNEL    BIT(7)
 #define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN      BIT(8)
-#define ICE_TX_FLAGS_VLAN_M    0xffff0000
-#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
-#define ICE_TX_FLAGS_VLAN_PR_S 29
-#define ICE_TX_FLAGS_VLAN_S    16
 
 #define ICE_XDP_PASS           0
 #define ICE_XDP_CONSUMED       BIT(0)
@@ -182,8 +178,9 @@ struct ice_tx_buf {
                unsigned int gso_segs;
                unsigned int nr_frags;  /* used for mbuf XDP */
        };
-       u32 type:16;                    /* &ice_tx_buf_type */
-       u32 tx_flags:16;
+       u32 tx_flags:12;
+       u32 type:4;                     /* &ice_tx_buf_type */
+       u32 vid:16;
        DEFINE_DMA_UNMAP_LEN(len);
        DEFINE_DMA_UNMAP_ADDR(dma);
 };
index 89fd698..bf74a2f 100644 (file)
@@ -186,6 +186,25 @@ int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
 }
 
 /**
+ * ice_check_vf_ready_for_reset - check if VF is ready to be reset
+ * @vf: VF to check if it's ready to be reset
+ *
+ * The purpose of this function is to ensure that the VF is not in reset,
+ * disabled, and is both initialized and active, thus enabling us to safely
+ * initialize another reset.
+ */
+int ice_check_vf_ready_for_reset(struct ice_vf *vf)
+{
+       int ret;
+
+       ret = ice_check_vf_ready_for_cfg(vf);
+       if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+               ret = -EAGAIN;
+
+       return ret;
+}
+
+/**
  * ice_trigger_vf_reset - Reset a VF on HW
  * @vf: pointer to the VF structure
  * @is_vflr: true if VFLR was issued, false if not
index e3cda6f..a38ef00 100644 (file)
@@ -215,6 +215,7 @@ u16 ice_get_num_vfs(struct ice_pf *pf);
 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
 bool ice_is_vf_disabled(struct ice_vf *vf);
 int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+int ice_check_vf_ready_for_reset(struct ice_vf *vf);
 void ice_set_vf_state_dis(struct ice_vf *vf);
 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
 void
index 97243c6..f4a524f 100644 (file)
@@ -3955,6 +3955,7 @@ error_handler:
                ice_vc_notify_vf_link_state(vf);
                break;
        case VIRTCHNL_OP_RESET_VF:
+               clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
                ops->reset_vf(vf);
                break;
        case VIRTCHNL_OP_ADD_ETH_ADDR:
index 205d577..caf91c6 100644 (file)
@@ -426,7 +426,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
 {
        u32 hash_value, hash_mask;
-       u8 bit_shift = 0;
+       u8 bit_shift = 1;
 
        /* Register count multiplied by bits per register */
        hash_mask = (hw->mac.mta_reg_count * 32) - 1;
@@ -434,7 +434,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
        /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
         * where 0xFF would still fall within the hash mask.
         */
-       while (hash_mask >> bit_shift != 0xFF)
+       while (hash_mask >> bit_shift != 0xFF && bit_shift < 4)
                bit_shift++;
 
        /* The portion of the address that is used for the hash table
index 7045fed..7af223b 100644 (file)
@@ -652,9 +652,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
                                htons(ext->lso_sb - skb_network_offset(skb));
                } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
                        ext->lso_format = pfvf->hw.lso_tsov6_idx;
-
-                       ipv6_hdr(skb)->payload_len =
-                               htons(ext->lso_sb - skb_network_offset(skb));
+                       ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb));
                } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
                        __be16 l3_proto = vlan_get_protocol(skb);
                        struct udphdr *udph = udp_hdr(skb);
index a75fd07..834c644 100644 (file)
@@ -3269,18 +3269,14 @@ static int mtk_open(struct net_device *dev)
                        eth->dsa_meta[i] = md_dst;
                }
        } else {
-               /* Hardware special tag parsing needs to be disabled if at least
-                * one MAC does not use DSA.
+               /* Hardware DSA untagging and VLAN RX offloading need to be
+                * disabled if at least one MAC does not use DSA.
                 */
                u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
 
                val &= ~MTK_CDMP_STAG_EN;
                mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
 
-               val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
-               val &= ~MTK_CDMQ_STAG_EN;
-               mtk_w32(eth, val, MTK_CDMQ_IG_CTRL);
-
                mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
        }
 
index d53de39..d532883 100644 (file)
@@ -1920,9 +1920,10 @@ static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod
 static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
                           u32 syndrome, int err)
 {
+       const char *namep = mlx5_command_str(opcode);
        struct mlx5_cmd_stats *stats;
 
-       if (!err)
+       if (!err || !(strcmp(namep, "unknown command opcode")))
                return;
 
        stats = &dev->cmd.stats[opcode];
index eb5abd0..3cbebfb 100644 (file)
@@ -175,6 +175,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
        /* ensure cq space is freed before enabling more cqes */
        wmb();
 
+       mlx5e_txqsq_wake(&ptpsq->txqsq);
+
        return work_done == budget;
 }
 
index 20c2d2e..6a052c6 100644 (file)
@@ -1369,11 +1369,13 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
        struct mlx5e_tc_flow *flow;
 
        list_for_each_entry(flow, encap_flows, tmp_list) {
-               struct mlx5_flow_attr *attr = flow->attr;
                struct mlx5_esw_flow_attr *esw_attr;
+               struct mlx5_flow_attr *attr;
 
                if (!mlx5e_is_offloaded_flow(flow))
                        continue;
+
+               attr = mlx5e_tc_get_encap_attr(flow);
                esw_attr = attr->esw_attr;
 
                if (flow_flag_test(flow, SLOW))
index 47381e9..879d698 100644 (file)
@@ -193,6 +193,8 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
        return pi;
 }
 
+void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
+
 static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
 {
        return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
index 728b82c..e95414e 100644 (file)
@@ -1665,11 +1665,9 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
 {
        struct mlx5e_priv *out_priv, *route_priv;
-       struct mlx5_devcom *devcom = NULL;
        struct mlx5_core_dev *route_mdev;
        struct mlx5_eswitch *esw;
        u16 vhca_id;
-       int err;
 
        out_priv = netdev_priv(out_dev);
        esw = out_priv->mdev->priv.eswitch;
@@ -1678,6 +1676,9 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
 
        vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
        if (mlx5_lag_is_active(out_priv->mdev)) {
+               struct mlx5_devcom *devcom;
+               int err;
+
                /* In lag case we may get devices from different eswitch instances.
                 * If we failed to get vport num, it means, mostly, that we on the wrong
                 * eswitch.
@@ -1686,16 +1687,16 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
                if (err != -ENOENT)
                        return err;
 
+               rcu_read_lock();
                devcom = out_priv->mdev->priv.devcom;
-               esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
-               if (!esw)
-                       return -ENODEV;
+               esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+               err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV;
+               rcu_read_unlock();
+
+               return err;
        }
 
-       err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
-       if (devcom)
-               mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
-       return err;
+       return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
 }
 
 static int
@@ -5301,6 +5302,8 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
                goto err_action_counter;
        }
 
+       mlx5_esw_offloads_devcom_init(esw);
+
        return 0;
 
 err_action_counter:
@@ -5329,7 +5332,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
        priv = netdev_priv(rpriv->netdev);
        esw = priv->mdev->priv.eswitch;
 
-       mlx5e_tc_clean_fdb_peer_flows(esw);
+       mlx5_esw_offloads_devcom_cleanup(esw);
 
        mlx5e_tc_tun_cleanup(uplink_priv->encap);
 
@@ -5643,22 +5646,43 @@ bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
                                   0, NULL);
 }
 
+static struct mapping_ctx *
+mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv)
+{
+       struct mlx5e_tc_table *tc;
+       struct mlx5_eswitch *esw;
+       struct mapping_ctx *ctx;
+
+       if (is_mdev_switchdev_mode(priv->mdev)) {
+               esw = priv->mdev->priv.eswitch;
+               ctx = esw->offloads.reg_c0_obj_pool;
+       } else {
+               tc = mlx5e_fs_get_tc(priv->fs);
+               ctx = tc->mapping;
+       }
+
+       return ctx;
+}
+
 int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
                                     u64 act_miss_cookie, u32 *act_miss_mapping)
 {
-       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5_mapped_obj mapped_obj = {};
+       struct mlx5_eswitch *esw;
        struct mapping_ctx *ctx;
        int err;
 
-       ctx = esw->offloads.reg_c0_obj_pool;
-
+       ctx = mlx5e_get_priv_obj_mapping(priv);
        mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
        mapped_obj.act_miss_cookie = act_miss_cookie;
        err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
        if (err)
                return err;
 
+       if (!is_mdev_switchdev_mode(priv->mdev))
+               return 0;
+
+       esw = priv->mdev->priv.eswitch;
        attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
        if (IS_ERR(attr->act_id_restore_rule))
                goto err_rule;
@@ -5673,10 +5697,9 @@ err_rule:
 void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
                                      u32 act_miss_mapping)
 {
-       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-       struct mapping_ctx *ctx;
+       struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv);
 
-       ctx = esw->offloads.reg_c0_obj_pool;
-       mlx5_del_flow_rules(attr->act_id_restore_rule);
+       if (is_mdev_switchdev_mode(priv->mdev))
+               mlx5_del_flow_rules(attr->act_id_restore_rule);
        mapping_remove(ctx, act_miss_mapping);
 }
index df5e780..c7eb6b2 100644 (file)
@@ -762,6 +762,17 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t
        }
 }
 
+void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
+{
+       if (netif_tx_queue_stopped(sq->txq) &&
+           mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
+           mlx5e_ptpsq_fifo_has_room(sq) &&
+           !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
+               netif_tx_wake_queue(sq->txq);
+               sq->stats->wake++;
+       }
+}
+
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 {
        struct mlx5e_sq_stats *stats;
@@ -861,13 +872,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 
        netdev_tx_completed_queue(sq->txq, npkts, nbytes);
 
-       if (netif_tx_queue_stopped(sq->txq) &&
-           mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
-           mlx5e_ptpsq_fifo_has_room(sq) &&
-           !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
-               netif_tx_wake_queue(sq->txq);
-               stats->wake++;
-       }
+       mlx5e_txqsq_wake(sq);
 
        return (i == MLX5E_TX_CQ_POLL_BUDGET);
 }
index a50bfda..fbb2d96 100644 (file)
@@ -161,20 +161,22 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
                }
        }
 
+       /* budget=0 means we may be in IRQ context, do as little as possible */
+       if (unlikely(!budget))
+               goto out;
+
        busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
 
        if (c->xdp)
                busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
 
-       if (likely(budget)) { /* budget=0 means: don't poll rx rings */
-               if (xsk_open)
-                       work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
+       if (xsk_open)
+               work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
 
-               if (likely(budget - work_done))
-                       work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
+       if (likely(budget - work_done))
+               work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
 
-               busy |= work_done == budget;
-       }
+       busy |= work_done == budget;
 
        mlx5e_poll_ico_cq(&c->icosq.cq);
        if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
index 1c35d72..fe698c7 100644 (file)
@@ -1104,7 +1104,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
        struct mlx5_eq_table *table = dev->priv.eq_table;
 
        mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
-       mlx5_irq_table_destroy(dev);
+       mlx5_irq_table_free_irqs(dev);
        mutex_unlock(&table->lock);
 }
 
index 1a042c9..add6cfa 100644 (file)
@@ -342,6 +342,7 @@ struct mlx5_eswitch {
                u32             large_group_num;
        }  params;
        struct blocking_notifier_head n_head;
+       bool paired[MLX5_MAX_PORTS];
 };
 
 void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -369,6 +370,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
 void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw);
+void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
                               u16 vport, const u8 *mac);
 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -767,6 +770,8 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
 static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
+static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {}
+static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
 static inline
 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
index 69215ff..8d19c20 100644 (file)
@@ -2742,6 +2742,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
                    mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
                        break;
 
+               if (esw->paired[mlx5_get_dev_index(peer_esw->dev)])
+                       break;
+
                err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
                if (err)
                        goto err_out;
@@ -2753,14 +2756,18 @@ static int mlx5_esw_offloads_devcom_event(int event,
                if (err)
                        goto err_pair;
 
+               esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true;
+               peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true;
                mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
                break;
 
        case ESW_OFFLOADS_DEVCOM_UNPAIR:
-               if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+               if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)])
                        break;
 
                mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
+               esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false;
+               peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false;
                mlx5_esw_offloads_unpair(peer_esw);
                mlx5_esw_offloads_unpair(esw);
                mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
@@ -2779,7 +2786,7 @@ err_out:
        return err;
 }
 
-static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
 {
        struct mlx5_devcom *devcom = esw->dev->priv.devcom;
 
@@ -2802,7 +2809,7 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
                               ESW_OFFLOADS_DEVCOM_PAIR, esw);
 }
 
-static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
+void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
 {
        struct mlx5_devcom *devcom = esw->dev->priv.devcom;
 
@@ -3250,8 +3257,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
        if (err)
                goto err_vports;
 
-       esw_offloads_devcom_init(esw);
-
        return 0;
 
 err_vports:
@@ -3292,7 +3297,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
 
 void esw_offloads_disable(struct mlx5_eswitch *esw)
 {
-       esw_offloads_devcom_cleanup(esw);
        mlx5_eswitch_disable_pf_vf_vports(esw);
        esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
        esw_set_passing_vport_metadata(esw, false);
index adefde3..b7d779d 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/mlx5/vport.h>
 #include "lib/devcom.h"
+#include "mlx5_core.h"
 
 static LIST_HEAD(devcom_list);
 
@@ -13,7 +14,7 @@ static LIST_HEAD(devcom_list);
 
 struct mlx5_devcom_component {
        struct {
-               void *data;
+               void __rcu *data;
        } device[MLX5_DEVCOM_PORTS_SUPPORTED];
 
        mlx5_devcom_event_handler_t handler;
@@ -77,6 +78,7 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
        if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED)
                return NULL;
 
+       mlx5_dev_list_lock();
        sguid0 = mlx5_query_nic_system_image_guid(dev);
        list_for_each_entry(iter, &devcom_list, list) {
                struct mlx5_core_dev *tmp_dev = NULL;
@@ -102,8 +104,10 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
 
        if (!priv) {
                priv = mlx5_devcom_list_alloc();
-               if (!priv)
-                       return ERR_PTR(-ENOMEM);
+               if (!priv) {
+                       devcom = ERR_PTR(-ENOMEM);
+                       goto out;
+               }
 
                idx = 0;
                new_priv = true;
@@ -112,13 +116,16 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
        priv->devs[idx] = dev;
        devcom = mlx5_devcom_alloc(priv, idx);
        if (!devcom) {
-               kfree(priv);
-               return ERR_PTR(-ENOMEM);
+               if (new_priv)
+                       kfree(priv);
+               devcom = ERR_PTR(-ENOMEM);
+               goto out;
        }
 
        if (new_priv)
                list_add(&priv->list, &devcom_list);
-
+out:
+       mlx5_dev_list_unlock();
        return devcom;
 }
 
@@ -131,6 +138,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
        if (IS_ERR_OR_NULL(devcom))
                return;
 
+       mlx5_dev_list_lock();
        priv = devcom->priv;
        priv->devs[devcom->idx] = NULL;
 
@@ -141,10 +149,12 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
                        break;
 
        if (i != MLX5_DEVCOM_PORTS_SUPPORTED)
-               return;
+               goto out;
 
        list_del(&priv->list);
        kfree(priv);
+out:
+       mlx5_dev_list_unlock();
 }
 
 void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
@@ -162,7 +172,7 @@ void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
        comp = &devcom->priv->components[id];
        down_write(&comp->sem);
        comp->handler = handler;
-       comp->device[devcom->idx].data = data;
+       rcu_assign_pointer(comp->device[devcom->idx].data, data);
        up_write(&comp->sem);
 }
 
@@ -176,8 +186,9 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
 
        comp = &devcom->priv->components[id];
        down_write(&comp->sem);
-       comp->device[devcom->idx].data = NULL;
+       RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL);
        up_write(&comp->sem);
+       synchronize_rcu();
 }
 
 int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
@@ -193,12 +204,15 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
 
        comp = &devcom->priv->components[id];
        down_write(&comp->sem);
-       for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
-               if (i != devcom->idx && comp->device[i].data) {
-                       err = comp->handler(event, comp->device[i].data,
-                                           event_data);
+       for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
+               void *data = rcu_dereference_protected(comp->device[i].data,
+                                                      lockdep_is_held(&comp->sem));
+
+               if (i != devcom->idx && data) {
+                       err = comp->handler(event, data, event_data);
                        break;
                }
+       }
 
        up_write(&comp->sem);
        return err;
@@ -213,7 +227,7 @@ void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
        comp = &devcom->priv->components[id];
        WARN_ON(!rwsem_is_locked(&comp->sem));
 
-       comp->paired = paired;
+       WRITE_ONCE(comp->paired, paired);
 }
 
 bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
@@ -222,7 +236,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
        if (IS_ERR_OR_NULL(devcom))
                return false;
 
-       return devcom->priv->components[id].paired;
+       return READ_ONCE(devcom->priv->components[id].paired);
 }
 
 void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
@@ -236,7 +250,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
 
        comp = &devcom->priv->components[id];
        down_read(&comp->sem);
-       if (!comp->paired) {
+       if (!READ_ONCE(comp->paired)) {
                up_read(&comp->sem);
                return NULL;
        }
@@ -245,7 +259,29 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
                if (i != devcom->idx)
                        break;
 
-       return comp->device[i].data;
+       return rcu_dereference_protected(comp->device[i].data, lockdep_is_held(&comp->sem));
+}
+
+void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id)
+{
+       struct mlx5_devcom_component *comp;
+       int i;
+
+       if (IS_ERR_OR_NULL(devcom))
+               return NULL;
+
+       for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
+               if (i != devcom->idx)
+                       break;
+
+       comp = &devcom->priv->components[id];
+       /* This can change concurrently, however 'data' pointer will remain
+        * valid for the duration of RCU read section.
+        */
+       if (!READ_ONCE(comp->paired))
+               return NULL;
+
+       return rcu_dereference(comp->device[i].data);
 }
 
 void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
index 94313c1..9a496f4 100644 (file)
@@ -41,6 +41,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
 
 void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
                                enum mlx5_devcom_components id);
+void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id);
 void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
                                   enum mlx5_devcom_components id);
 
index 995eb2d..a7eb65c 100644 (file)
@@ -1049,7 +1049,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
 
        dev->dm = mlx5_dm_create(dev);
        if (IS_ERR(dev->dm))
-               mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
+               mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
 
        dev->tracer = mlx5_fw_tracer_create(dev);
        dev->hv_vhca = mlx5_hv_vhca_create(dev);
index efd0c29..aa403a5 100644 (file)
@@ -15,6 +15,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev);
 void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
 int mlx5_irq_table_create(struct mlx5_core_dev *dev);
 void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
+void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
 int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
 int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
 struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
index 2245d3b..db5687d 100644 (file)
@@ -32,6 +32,7 @@ struct mlx5_irq {
        struct mlx5_irq_pool *pool;
        int refcount;
        struct msi_map map;
+       u32 pool_index;
 };
 
 struct mlx5_irq_table {
@@ -132,7 +133,7 @@ static void irq_release(struct mlx5_irq *irq)
        struct cpu_rmap *rmap;
 #endif
 
-       xa_erase(&pool->irqs, irq->map.index);
+       xa_erase(&pool->irqs, irq->pool_index);
        /* free_irq requires that affinity_hint and rmap will be cleared before
         * calling it. To satisfy this requirement, we call
         * irq_cpu_rmap_remove() to remove the notifier
@@ -276,11 +277,11 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
        }
        irq->pool = pool;
        irq->refcount = 1;
-       irq->map.index = i;
-       err = xa_err(xa_store(&pool->irqs, irq->map.index, irq, GFP_KERNEL));
+       irq->pool_index = i;
+       err = xa_err(xa_store(&pool->irqs, irq->pool_index, irq, GFP_KERNEL));
        if (err) {
                mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
-                             irq->map.index, err);
+                             irq->pool_index, err);
                goto err_xa;
        }
        return irq;
@@ -567,7 +568,7 @@ int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
        struct mlx5_irq *irq;
        int i;
 
-       af_desc.is_managed = 1;
+       af_desc.is_managed = false;
        for (i = 0; i < nirqs; i++) {
                cpumask_set_cpu(cpus[i], &af_desc.mask);
                irq = mlx5_irq_request(dev, i + 1, &af_desc, rmap);
@@ -691,6 +692,24 @@ static void irq_pools_destroy(struct mlx5_irq_table *table)
        irq_pool_free(table->pcif_pool);
 }
 
+static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
+{
+       struct mlx5_irq *irq;
+       unsigned long index;
+
+       xa_for_each(&pool->irqs, index, irq)
+               free_irq(irq->map.virq, &irq->nh);
+}
+
+static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
+{
+       if (table->sf_ctrl_pool) {
+               mlx5_irq_pool_free_irqs(table->sf_comp_pool);
+               mlx5_irq_pool_free_irqs(table->sf_ctrl_pool);
+       }
+       mlx5_irq_pool_free_irqs(table->pcif_pool);
+}
+
 /* irq_table API */
 
 int mlx5_irq_table_init(struct mlx5_core_dev *dev)
@@ -774,6 +793,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
        pci_free_irq_vectors(dev->pdev);
 }
 
+void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev)
+{
+       struct mlx5_irq_table *table = dev->priv.irq_table;
+
+       if (mlx5_core_is_sf(dev))
+               return;
+
+       mlx5_irq_pools_free_irqs(table);
+       pci_free_irq_vectors(dev->pdev);
+}
+
 int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
 {
        if (table->sf_comp_pool)
index 3835ba3..1aa525e 100644 (file)
@@ -117,6 +117,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
        caps->gvmi              = MLX5_CAP_GEN(mdev, vhca_id);
        caps->flex_protocols    = MLX5_CAP_GEN(mdev, flex_parser_protocols);
        caps->sw_format_ver     = MLX5_CAP_GEN(mdev, steering_format_version);
+       caps->roce_caps.fl_rc_qp_when_roce_disabled =
+               MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled);
 
        if (MLX5_CAP_GEN(mdev, roce)) {
                err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
@@ -124,7 +126,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
                        return err;
 
                caps->roce_caps.roce_en = roce_en;
-               caps->roce_caps.fl_rc_qp_when_roce_disabled =
+               caps->roce_caps.fl_rc_qp_when_roce_disabled |=
                        MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
                caps->roce_caps.fl_rc_qp_when_roce_enabled =
                        MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
index 9413aaf..e94fbb0 100644 (file)
@@ -15,7 +15,8 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
 {
        u32 crc = crc32(0, input_data, length);
 
-       return (__force u32)htonl(crc);
+       return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
+                           ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
 }
 
 bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
index 2b6e046..ee26986 100644 (file)
@@ -1039,6 +1039,16 @@ static int lan966x_reset_switch(struct lan966x *lan966x)
 
        reset_control_reset(switch_reset);
 
+       /* Don't reinitialize the switch core, if it is already initialized. In
+        * case it is initialized twice, some pointers inside the queue system
+        * in HW will get corrupted and then after a while the queue system gets
+        * full and no traffic is passing through the switch. The issue is seen
+        * when loading and unloading the driver and sending traffic through the
+        * switch.
+        */
+       if (lan_rd(lan966x, SYS_RESET_CFG) & SYS_RESET_CFG_CORE_ENA)
+               return 0;
+
        lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
        lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
        ret = readx_poll_timeout(lan966x_ram_init, lan966x,
index 094374d..38b8b10 100644 (file)
@@ -8,7 +8,7 @@
 
 #ifdef CONFIG_DCB
 /* DCB feature definitions */
-#define NFP_NET_MAX_DSCP       4
+#define NFP_NET_MAX_DSCP       64
 #define NFP_NET_MAX_TC         IEEE_8021QAZ_MAX_TCS
 #define NFP_NET_MAX_PRIO       8
 #define NFP_DCB_CFG_STRIDE     256
index 0605d1e..7a549b8 100644 (file)
@@ -6138,6 +6138,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
        return 0;
 
 out_error:
+       nv_mgmt_release_sema(dev);
        if (phystate_orig)
                writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
 out_freering:
index a7e376e..4b19803 100644 (file)
@@ -616,10 +616,10 @@ struct rtl8169_private {
                struct work_struct work;
        } wk;
 
-       spinlock_t config25_lock;
-       spinlock_t mac_ocp_lock;
+       raw_spinlock_t config25_lock;
+       raw_spinlock_t mac_ocp_lock;
 
-       spinlock_t cfg9346_usage_lock;
+       raw_spinlock_t cfg9346_usage_lock;
        int cfg9346_usage_count;
 
        unsigned supports_gmii:1;
@@ -671,20 +671,20 @@ static void rtl_lock_config_regs(struct rtl8169_private *tp)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
+       raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
        if (!--tp->cfg9346_usage_count)
                RTL_W8(tp, Cfg9346, Cfg9346_Lock);
-       spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
 }
 
 static void rtl_unlock_config_regs(struct rtl8169_private *tp)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
+       raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
        if (!tp->cfg9346_usage_count++)
                RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
-       spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
 }
 
 static void rtl_pci_commit(struct rtl8169_private *tp)
@@ -698,10 +698,10 @@ static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set)
        unsigned long flags;
        u8 val;
 
-       spin_lock_irqsave(&tp->config25_lock, flags);
+       raw_spin_lock_irqsave(&tp->config25_lock, flags);
        val = RTL_R8(tp, Config2);
        RTL_W8(tp, Config2, (val & ~clear) | set);
-       spin_unlock_irqrestore(&tp->config25_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
 }
 
 static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
@@ -709,10 +709,10 @@ static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
        unsigned long flags;
        u8 val;
 
-       spin_lock_irqsave(&tp->config25_lock, flags);
+       raw_spin_lock_irqsave(&tp->config25_lock, flags);
        val = RTL_R8(tp, Config5);
        RTL_W8(tp, Config5, (val & ~clear) | set);
-       spin_unlock_irqrestore(&tp->config25_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
 }
 
 static bool rtl_is_8125(struct rtl8169_private *tp)
@@ -899,9 +899,9 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+       raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
        __r8168_mac_ocp_write(tp, reg, data);
-       spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
 }
 
 static u16 __r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
@@ -919,9 +919,9 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
        unsigned long flags;
        u16 val;
 
-       spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+       raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
        val = __r8168_mac_ocp_read(tp, reg);
-       spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
 
        return val;
 }
@@ -932,10 +932,10 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
        unsigned long flags;
        u16 data;
 
-       spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+       raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
        data = __r8168_mac_ocp_read(tp, reg);
        __r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
-       spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
 }
 
 /* Work around a hw issue with RTL8168g PHY, the quirk disables
@@ -1420,14 +1420,14 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
                        r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
        }
 
-       spin_lock_irqsave(&tp->config25_lock, flags);
+       raw_spin_lock_irqsave(&tp->config25_lock, flags);
        for (i = 0; i < tmp; i++) {
                options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
                if (wolopts & cfg[i].opt)
                        options |= cfg[i].mask;
                RTL_W8(tp, cfg[i].reg, options);
        }
-       spin_unlock_irqrestore(&tp->config25_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
 
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
@@ -5179,9 +5179,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        tp->eee_adv = -1;
        tp->ocp_base = OCP_STD_PHY_BASE;
 
-       spin_lock_init(&tp->cfg9346_usage_lock);
-       spin_lock_init(&tp->config25_lock);
-       spin_lock_init(&tp->mac_ocp_lock);
+       raw_spin_lock_init(&tp->cfg9346_usage_lock);
+       raw_spin_lock_init(&tp->config25_lock);
+       raw_spin_lock_init(&tp->mac_ocp_lock);
 
        dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
                                                   struct pcpu_sw_netstats);
index d916877..be395cd 100644 (file)
@@ -378,7 +378,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
        efx->net_dev = net_dev;
        SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
 
-       net_dev->features |= efx->type->offload_features;
+       /* enable all supported features except rx-fcs and rx-all */
+       net_dev->features |= efx->type->offload_features &
+                            ~(NETIF_F_RXFCS | NETIF_F_RXALL);
        net_dev->hw_features |= efx->type->offload_features;
        net_dev->hw_enc_features |= efx->type->offload_features;
        net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
index 381b805..ef9971c 100644 (file)
@@ -171,9 +171,14 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx,
 
        rc = efx_mcdi_nvram_metadata(efx, partition_type, NULL, version, NULL,
                                     0);
+
+       /* If the partition does not exist, that is not an error. */
+       if (rc == -ENOENT)
+               return 0;
+
        if (rc) {
-               netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed\n",
-                         version_name);
+               netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed (rc=%d)\n",
+                         version_name, rc);
                return rc;
        }
 
@@ -187,36 +192,33 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx,
 static int efx_devlink_info_stored_versions(struct efx_nic *efx,
                                            struct devlink_info_req *req)
 {
-       int rc;
-
-       rc = efx_devlink_info_nvram_partition(efx, req,
-                                             NVRAM_PARTITION_TYPE_BUNDLE,
-                                             DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID);
-       if (rc)
-               return rc;
-
-       rc = efx_devlink_info_nvram_partition(efx, req,
-                                             NVRAM_PARTITION_TYPE_MC_FIRMWARE,
-                                             DEVLINK_INFO_VERSION_GENERIC_FW_MGMT);
-       if (rc)
-               return rc;
-
-       rc = efx_devlink_info_nvram_partition(efx, req,
-                                             NVRAM_PARTITION_TYPE_SUC_FIRMWARE,
-                                             EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC);
-       if (rc)
-               return rc;
-
-       rc = efx_devlink_info_nvram_partition(efx, req,
-                                             NVRAM_PARTITION_TYPE_EXPANSION_ROM,
-                                             EFX_DEVLINK_INFO_VERSION_FW_EXPROM);
-       if (rc)
-               return rc;
+       int err;
 
-       rc = efx_devlink_info_nvram_partition(efx, req,
-                                             NVRAM_PARTITION_TYPE_EXPANSION_UEFI,
-                                             EFX_DEVLINK_INFO_VERSION_FW_UEFI);
-       return rc;
+       /* We do not care here about the specific error but just if an error
+        * happened. The specific error will be reported inside the call
+        * through system messages, and if any error happened in any call
+        * below, we report it through extack.
+        */
+       err = efx_devlink_info_nvram_partition(efx, req,
+                                              NVRAM_PARTITION_TYPE_BUNDLE,
+                                              DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID);
+
+       err |= efx_devlink_info_nvram_partition(efx, req,
+                                               NVRAM_PARTITION_TYPE_MC_FIRMWARE,
+                                               DEVLINK_INFO_VERSION_GENERIC_FW_MGMT);
+
+       err |= efx_devlink_info_nvram_partition(efx, req,
+                                               NVRAM_PARTITION_TYPE_SUC_FIRMWARE,
+                                               EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC);
+
+       err |= efx_devlink_info_nvram_partition(efx, req,
+                                               NVRAM_PARTITION_TYPE_EXPANSION_ROM,
+                                               EFX_DEVLINK_INFO_VERSION_FW_EXPROM);
+
+       err |= efx_devlink_info_nvram_partition(efx, req,
+                                               NVRAM_PARTITION_TYPE_EXPANSION_UEFI,
+                                               EFX_DEVLINK_INFO_VERSION_FW_UEFI);
+       return err;
 }
 
 #define EFX_VER_FLAG(_f)       \
@@ -587,27 +589,20 @@ static int efx_devlink_info_get(struct devlink *devlink,
 {
        struct efx_devlink *devlink_private = devlink_priv(devlink);
        struct efx_nic *efx = devlink_private->efx;
-       int rc;
+       int err;
 
-       /* Several different MCDI commands are used. We report first error
-        * through extack returning at that point. Specific error
-        * information via system messages.
+       /* Several different MCDI commands are used. We report if errors
+        * happened through extack. Specific error information via system
+        * messages inside the calls.
         */
-       rc = efx_devlink_info_board_cfg(efx, req);
-       if (rc) {
-               NL_SET_ERR_MSG_MOD(extack, "Getting board info failed");
-               return rc;
-       }
-       rc = efx_devlink_info_stored_versions(efx, req);
-       if (rc) {
-               NL_SET_ERR_MSG_MOD(extack, "Getting stored versions failed");
-               return rc;
-       }
-       rc = efx_devlink_info_running_versions(efx, req);
-       if (rc) {
-               NL_SET_ERR_MSG_MOD(extack, "Getting running versions failed");
-               return rc;
-       }
+       err = efx_devlink_info_board_cfg(efx, req);
+
+       err |= efx_devlink_info_stored_versions(efx, req);
+
+       err |= efx_devlink_info_running_versions(efx, req);
+
+       if (err)
+               NL_SET_ERR_MSG_MOD(extack, "Errors when getting device info. Check system messages");
 
        return 0;
 }
index 4ef05ba..d61dfa2 100644 (file)
@@ -5077,6 +5077,8 @@ err_out_iounmap:
                cas_shutdown(cp);
        mutex_unlock(&cp->pm_mutex);
 
+       vfree(cp->fw_data);
+
        pci_iounmap(pdev, cp->regs);
 
 
index 1e0c206..da2001e 100644 (file)
@@ -291,7 +291,8 @@ static int i2c_rollball_mii_cmd(struct mii_bus *bus, int bus_addr, u8 cmd,
        return i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs));
 }
 
-static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
+static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int devad,
+                                int reg)
 {
        u8 buf[4], res[6];
        int bus_addr, ret;
@@ -302,7 +303,7 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
                return 0xffff;
 
        buf[0] = ROLLBALL_DATA_ADDR;
-       buf[1] = (reg >> 16) & 0x1f;
+       buf[1] = devad;
        buf[2] = (reg >> 8) & 0xff;
        buf[3] = reg & 0xff;
 
@@ -322,8 +323,8 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
        return val;
 }
 
-static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg,
-                                 u16 val)
+static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int devad,
+                                 int reg, u16 val)
 {
        int bus_addr, ret;
        u8 buf[6];
@@ -333,7 +334,7 @@ static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg,
                return 0;
 
        buf[0] = ROLLBALL_DATA_ADDR;
-       buf[1] = (reg >> 16) & 0x1f;
+       buf[1] = devad;
        buf[2] = (reg >> 8) & 0xff;
        buf[3] = reg & 0xff;
        buf[4] = val >> 8;
@@ -405,8 +406,8 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
                        return ERR_PTR(ret);
                }
 
-               mii->read = i2c_mii_read_rollball;
-               mii->write = i2c_mii_write_rollball;
+               mii->read_c45 = i2c_mii_read_rollball;
+               mii->write_c45 = i2c_mii_write_rollball;
                break;
        default:
                mii->read = i2c_mii_read_default_c22;
index f19d48c..72f25e7 100644 (file)
@@ -873,7 +873,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
 
        switch (compat->an_mode) {
        case DW_AN_C73:
-               if (phylink_autoneg_inband(mode)) {
+               if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)) {
                        ret = xpcs_config_aneg_c73(xpcs, compat);
                        if (ret)
                                return ret;
index d75f526..76f5a24 100644 (file)
@@ -44,6 +44,7 @@
 #define DP83867_STRAP_STS1     0x006E
 #define DP83867_STRAP_STS2     0x006f
 #define DP83867_RGMIIDCTL      0x0086
+#define DP83867_DSP_FFE_CFG    0x012c
 #define DP83867_RXFCFG         0x0134
 #define DP83867_RXFPMD1        0x0136
 #define DP83867_RXFPMD2        0x0137
@@ -941,8 +942,27 @@ static int dp83867_phy_reset(struct phy_device *phydev)
 
        usleep_range(10, 20);
 
-       return phy_modify(phydev, MII_DP83867_PHYCTRL,
+       err = phy_modify(phydev, MII_DP83867_PHYCTRL,
                         DP83867_PHYCR_FORCE_LINK_GOOD, 0);
+       if (err < 0)
+               return err;
+
+       /* Configure the DSP Feedforward Equalizer Configuration register to
+        * improve short cable (< 1 meter) performance. This will not affect
+        * long cable performance.
+        */
+       err = phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_DSP_FFE_CFG,
+                           0x0e81);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
+       if (err < 0)
+               return err;
+
+       usleep_range(10, 20);
+
+       return 0;
 }
 
 static void dp83867_link_change_notify(struct phy_device *phydev)
index a50235f..defe5cc 100644 (file)
@@ -179,6 +179,7 @@ enum rgmii_clock_delay {
 #define VSC8502_RGMII_CNTL               20
 #define VSC8502_RGMII_RX_DELAY_MASK      0x0070
 #define VSC8502_RGMII_TX_DELAY_MASK      0x0007
+#define VSC8502_RGMII_RX_CLK_DISABLE     0x0800
 
 #define MSCC_PHY_WOL_LOWER_MAC_ADDR      21
 #define MSCC_PHY_WOL_MID_MAC_ADDR        22
@@ -276,6 +277,7 @@ enum rgmii_clock_delay {
 /* Microsemi PHY ID's
  *   Code assumes lowest nibble is 0
  */
+#define PHY_ID_VSC8501                   0x00070530
 #define PHY_ID_VSC8502                   0x00070630
 #define PHY_ID_VSC8504                   0x000704c0
 #define PHY_ID_VSC8514                   0x00070670
index 62bf99e..28df8a2 100644 (file)
@@ -519,16 +519,27 @@ out_unlock:
  *  * 2.0 ns (which causes the data to be sampled at exactly half way between
  *    clock transitions at 1000 Mbps) if delays should be enabled
  */
-static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
-                                  u16 rgmii_rx_delay_mask,
-                                  u16 rgmii_tx_delay_mask)
+static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl,
+                                    u16 rgmii_rx_delay_mask,
+                                    u16 rgmii_tx_delay_mask)
 {
        u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1;
        u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1;
        u16 reg_val = 0;
-       int rc;
+       u16 mask = 0;
+       int rc = 0;
 
-       mutex_lock(&phydev->lock);
+       /* For traffic to pass, the VSC8502 family needs the RX_CLK disable bit
+        * to be unset for all PHY modes, so do that as part of the paged
+        * register modification.
+        * For some family members (like VSC8530/31/40/41) this bit is reserved
+        * and read-only, and the RX clock is enabled by default.
+        */
+       if (rgmii_cntl == VSC8502_RGMII_CNTL)
+               mask |= VSC8502_RGMII_RX_CLK_DISABLE;
+
+       if (phy_interface_is_rgmii(phydev))
+               mask |= rgmii_rx_delay_mask | rgmii_tx_delay_mask;
 
        if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
            phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
@@ -537,31 +548,20 @@ static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
            phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
                reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_tx_delay_pos;
 
-       rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
-                             rgmii_cntl,
-                             rgmii_rx_delay_mask | rgmii_tx_delay_mask,
-                             reg_val);
-
-       mutex_unlock(&phydev->lock);
+       if (mask)
+               rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
+                                     rgmii_cntl, mask, reg_val);
 
        return rc;
 }
 
 static int vsc85xx_default_config(struct phy_device *phydev)
 {
-       int rc;
-
        phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
 
-       if (phy_interface_mode_is_rgmii(phydev->interface)) {
-               rc = vsc85xx_rgmii_set_skews(phydev, VSC8502_RGMII_CNTL,
-                                            VSC8502_RGMII_RX_DELAY_MASK,
-                                            VSC8502_RGMII_TX_DELAY_MASK);
-               if (rc)
-                       return rc;
-       }
-
-       return 0;
+       return vsc85xx_update_rgmii_cntl(phydev, VSC8502_RGMII_CNTL,
+                                        VSC8502_RGMII_RX_DELAY_MASK,
+                                        VSC8502_RGMII_TX_DELAY_MASK);
 }
 
 static int vsc85xx_get_tunable(struct phy_device *phydev,
@@ -1758,13 +1758,11 @@ static int vsc8584_config_init(struct phy_device *phydev)
        if (ret)
                return ret;
 
-       if (phy_interface_is_rgmii(phydev)) {
-               ret = vsc85xx_rgmii_set_skews(phydev, VSC8572_RGMII_CNTL,
-                                             VSC8572_RGMII_RX_DELAY_MASK,
-                                             VSC8572_RGMII_TX_DELAY_MASK);
-               if (ret)
-                       return ret;
-       }
+       ret = vsc85xx_update_rgmii_cntl(phydev, VSC8572_RGMII_CNTL,
+                                       VSC8572_RGMII_RX_DELAY_MASK,
+                                       VSC8572_RGMII_TX_DELAY_MASK);
+       if (ret)
+               return ret;
 
        ret = genphy_soft_reset(phydev);
        if (ret)
@@ -2317,6 +2315,30 @@ static int vsc85xx_probe(struct phy_device *phydev)
 /* Microsemi VSC85xx PHYs */
 static struct phy_driver vsc85xx_driver[] = {
 {
+       .phy_id         = PHY_ID_VSC8501,
+       .name           = "Microsemi GE VSC8501 SyncE",
+       .phy_id_mask    = 0xfffffff0,
+       /* PHY_BASIC_FEATURES */
+       .soft_reset     = &genphy_soft_reset,
+       .config_init    = &vsc85xx_config_init,
+       .config_aneg    = &vsc85xx_config_aneg,
+       .read_status    = &vsc85xx_read_status,
+       .handle_interrupt = vsc85xx_handle_interrupt,
+       .config_intr    = &vsc85xx_config_intr,
+       .suspend        = &genphy_suspend,
+       .resume         = &genphy_resume,
+       .probe          = &vsc85xx_probe,
+       .set_wol        = &vsc85xx_wol_set,
+       .get_wol        = &vsc85xx_wol_get,
+       .get_tunable    = &vsc85xx_get_tunable,
+       .set_tunable    = &vsc85xx_set_tunable,
+       .read_page      = &vsc85xx_phy_read_page,
+       .write_page     = &vsc85xx_phy_write_page,
+       .get_sset_count = &vsc85xx_get_sset_count,
+       .get_strings    = &vsc85xx_get_strings,
+       .get_stats      = &vsc85xx_get_stats,
+},
+{
        .phy_id         = PHY_ID_VSC8502,
        .name           = "Microsemi GE VSC8502 SyncE",
        .phy_id_mask    = 0xfffffff0,
@@ -2656,6 +2678,8 @@ static struct phy_driver vsc85xx_driver[] = {
 module_phy_driver(vsc85xx_driver);
 
 static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+       { PHY_ID_VSC8501, 0xfffffff0, },
+       { PHY_ID_VSC8502, 0xfffffff0, },
        { PHY_ID_VSC8504, 0xfffffff0, },
        { PHY_ID_VSC8514, 0xfffffff0, },
        { PHY_ID_VSC8530, 0xfffffff0, },
index a4111f1..e237949 100644 (file)
@@ -2225,6 +2225,10 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
 
        ASSERT_RTNL();
 
+       /* Mask out unsupported advertisements */
+       linkmode_and(config.advertising, kset->link_modes.advertising,
+                    pl->supported);
+
        if (pl->phydev) {
                /* We can rely on phylib for this update; we also do not need
                 * to update the pl->link_config settings:
@@ -2249,10 +2253,6 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
 
        config = pl->link_config;
 
-       /* Mask out unsupported advertisements */
-       linkmode_and(config.advertising, kset->link_modes.advertising,
-                    pl->supported);
-
        /* FIXME: should we reject autoneg if phy/mac does not support it? */
        switch (kset->base.autoneg) {
        case AUTONEG_DISABLE:
index d10606f..555b0b1 100644 (file)
@@ -1629,6 +1629,7 @@ static int team_init(struct net_device *dev)
 
        team->dev = dev;
        team_set_no_mode(team);
+       team->notifier_ctx = false;
 
        team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
        if (!team->pcpu_stats)
@@ -3022,7 +3023,11 @@ static int team_device_event(struct notifier_block *unused,
                team_del_slave(port->team->dev, dev);
                break;
        case NETDEV_FEAT_CHANGE:
-               team_compute_features(port->team);
+               if (!port->team->notifier_ctx) {
+                       port->team->notifier_ctx = true;
+                       team_compute_features(port->team);
+                       port->team->notifier_ctx = false;
+               }
                break;
        case NETDEV_PRECHANGEMTU:
                /* Forbid to change mtu of underlaying device */
index d4d0a41..d75456a 100644 (file)
@@ -1977,6 +1977,14 @@ napi_busy:
                int queue_len;
 
                spin_lock_bh(&queue->lock);
+
+               if (unlikely(tfile->detached)) {
+                       spin_unlock_bh(&queue->lock);
+                       rcu_read_unlock();
+                       err = -EBUSY;
+                       goto free_skb;
+               }
+
                __skb_queue_tail(queue, skb);
                queue_len = skb_queue_len(queue);
                spin_unlock(&queue->lock);
@@ -2512,6 +2520,13 @@ build:
        if (tfile->napi_enabled) {
                queue = &tfile->sk.sk_write_queue;
                spin_lock(&queue->lock);
+
+               if (unlikely(tfile->detached)) {
+                       spin_unlock(&queue->lock);
+                       kfree_skb(skb);
+                       return -EBUSY;
+               }
+
                __skb_queue_tail(queue, skb);
                spin_unlock(&queue->lock);
                ret = 1;
index 6ce8f4f..db05622 100644 (file)
@@ -181,9 +181,12 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
        else
                min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
 
-       max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
-       if (max == 0)
+       if (le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) == 0)
                max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
+       else
+               max = clamp_t(u32, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize),
+                             USB_CDC_NCM_NTB_MIN_OUT_SIZE,
+                             CDC_NCM_NTB_MAX_SIZE_TX);
 
        /* some devices set dwNtbOutMaxSize too low for the above default */
        min = min(min, max);
@@ -1244,6 +1247,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                         * further.
                         */
                        if (skb_out == NULL) {
+                               /* If even the smallest allocation fails, abort. */
+                               if (ctx->tx_curr_size == USB_CDC_NCM_NTB_MIN_OUT_SIZE)
+                                       goto alloc_failed;
                                ctx->tx_low_mem_max_cnt = min(ctx->tx_low_mem_max_cnt + 1,
                                                              (unsigned)CDC_NCM_LOW_MEM_MAX_CNT);
                                ctx->tx_low_mem_val = ctx->tx_low_mem_max_cnt;
@@ -1262,13 +1268,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                        skb_out = alloc_skb(ctx->tx_curr_size, GFP_ATOMIC);
 
                        /* No allocation possible so we will abort */
-                       if (skb_out == NULL) {
-                               if (skb != NULL) {
-                                       dev_kfree_skb_any(skb);
-                                       dev->net->stats.tx_dropped++;
-                               }
-                               goto exit_no_skb;
-                       }
+                       if (!skb_out)
+                               goto alloc_failed;
                        ctx->tx_low_mem_val--;
                }
                if (ctx->is_ndp16) {
@@ -1461,6 +1462,11 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
 
        return skb_out;
 
+alloc_failed:
+       if (skb) {
+               dev_kfree_skb_any(skb);
+               dev->net->stats.tx_dropped++;
+       }
 exit_no_skb:
        /* Start timer, if there is a remaining non-empty skb */
        if (ctx->tx_curr_skb != NULL && n > 0)
index a12ae26..56ca1d2 100644 (file)
@@ -1868,6 +1868,38 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        return received;
 }
 
+static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
+{
+       virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
+       napi_disable(&vi->rq[qp_index].napi);
+       xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
+}
+
+static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
+{
+       struct net_device *dev = vi->dev;
+       int err;
+
+       err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
+                              vi->rq[qp_index].napi.napi_id);
+       if (err < 0)
+               return err;
+
+       err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
+                                        MEM_TYPE_PAGE_SHARED, NULL);
+       if (err < 0)
+               goto err_xdp_reg_mem_model;
+
+       virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
+       virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
+
+       return 0;
+
+err_xdp_reg_mem_model:
+       xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
+       return err;
+}
+
 static int virtnet_open(struct net_device *dev)
 {
        struct virtnet_info *vi = netdev_priv(dev);
@@ -1881,22 +1913,20 @@ static int virtnet_open(struct net_device *dev)
                        if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
                                schedule_delayed_work(&vi->refill, 0);
 
-               err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
+               err = virtnet_enable_queue_pair(vi, i);
                if (err < 0)
-                       return err;
-
-               err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
-                                                MEM_TYPE_PAGE_SHARED, NULL);
-               if (err < 0) {
-                       xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
-                       return err;
-               }
-
-               virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
-               virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
+                       goto err_enable_qp;
        }
 
        return 0;
+
+err_enable_qp:
+       disable_delayed_refill(vi);
+       cancel_delayed_work_sync(&vi->refill);
+
+       for (i--; i >= 0; i--)
+               virtnet_disable_queue_pair(vi, i);
+       return err;
 }
 
 static int virtnet_poll_tx(struct napi_struct *napi, int budget)
@@ -2305,11 +2335,8 @@ static int virtnet_close(struct net_device *dev)
        /* Make sure refill_work doesn't re-enable napi! */
        cancel_delayed_work_sync(&vi->refill);
 
-       for (i = 0; i < vi->max_queue_pairs; i++) {
-               virtnet_napi_tx_disable(&vi->sq[i].napi);
-               napi_disable(&vi->rq[i].napi);
-               xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
-       }
+       for (i = 0; i < vi->max_queue_pairs; i++)
+               virtnet_disable_queue_pair(vi, i);
 
        return 0;
 }
index 9fc7c08..67b4bac 100644 (file)
@@ -651,7 +651,7 @@ struct b43_iv {
        union {
                __be16 d16;
                __be32 d32;
-       } data __packed;
+       } __packed data;
 } __packed;
 
 
index 6b0cec4..f49365d 100644 (file)
@@ -379,7 +379,7 @@ struct b43legacy_iv {
        union {
                __be16 d16;
                __be32 d32;
-       } data __packed;
+       } __packed data;
 } __packed;
 
 #define B43legacy_PHYMODE(phytype)     (1 << (phytype))
index ff710b0..00679a9 100644 (file)
@@ -1039,6 +1039,11 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
        struct brcmf_sdio_dev *sdiodev;
        struct brcmf_bus *bus_if;
 
+       if (!id) {
+               dev_err(&func->dev, "Error no sdio_device_id passed for %x:%x\n", func->vendor, func->device);
+               return -ENODEV;
+       }
+
        brcmf_dbg(SDIO, "Enter\n");
        brcmf_dbg(SDIO, "Class=%x\n", func->class);
        brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
index 59f3e9c..8022068 100644 (file)
@@ -2394,6 +2394,9 @@ static void brcmf_pcie_debugfs_create(struct device *dev)
 }
 #endif
 
+/* Forward declaration for pci_match_id() call */
+static const struct pci_device_id brcmf_pcie_devid_table[];
+
 static int
 brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
@@ -2404,6 +2407,14 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        struct brcmf_core *core;
        struct brcmf_bus *bus;
 
+       if (!id) {
+               id = pci_match_id(brcmf_pcie_devid_table, pdev);
+               if (!id) {
+                       pci_err(pdev, "Error could not find pci_device_id for %x:%x\n", pdev->vendor, pdev->device);
+                       return -ENODEV;
+               }
+       }
+
        brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
 
        ret = -ENOMEM;
index 246843a..2178675 100644 (file)
@@ -1331,6 +1331,9 @@ brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
        brcmf_usb_detach(devinfo);
 }
 
+/* Forward declaration for usb_match_id() call */
+static const struct usb_device_id brcmf_usb_devid_table[];
+
 static int
 brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
@@ -1342,6 +1345,14 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
        u32 num_of_eps;
        u8 endpoint_num, ep;
 
+       if (!id) {
+               id = usb_match_id(intf, brcmf_usb_devid_table);
+               if (!id) {
+                       dev_err(&intf->dev, "Error could not find matching usb_device_id\n");
+                       return -ENODEV;
+               }
+       }
+
        brcmf_dbg(USB, "Enter 0x%04x:0x%04x\n", id->idVendor, id->idProduct);
 
        devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
index 5f4a513..cb9181f 100644 (file)
@@ -38,7 +38,7 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = {
        },
        { .ident = "ASUS",
          .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                },
        },
        {}
index d9faaae..5521997 100644 (file)
@@ -1664,14 +1664,10 @@ static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id,
 }
 
 static void *
-iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
-                            struct iwl_dump_ini_region_data *reg_data,
+iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id,
                             struct iwl_fw_ini_monitor_dump *data,
                             const struct iwl_fw_mon_regs *addrs)
 {
-       struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
-       u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
-
        if (!iwl_trans_grab_nic_access(fwrt->trans)) {
                IWL_ERR(fwrt, "Failed to get monitor header\n");
                return NULL;
@@ -1702,8 +1698,10 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
                                  void *data, u32 data_len)
 {
        struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+       struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+       u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
 
-       return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+       return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
                                            &fwrt->trans->cfg->mon_dram_regs);
 }
 
@@ -1713,8 +1711,10 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
                                  void *data, u32 data_len)
 {
        struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+       struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+       u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id);
 
-       return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+       return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
                                            &fwrt->trans->cfg->mon_smem_regs);
 }
 
@@ -1725,7 +1725,10 @@ iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
 {
        struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
 
-       return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+       return iwl_dump_ini_mon_fill_header(fwrt,
+                                           /* no offset calculation later */
+                                           IWL_FW_INI_ALLOCATION_ID_DBGC1,
+                                           mon_dump,
                                            &fwrt->trans->cfg->mon_dbgi_regs);
 }
 
index 3963a0d..652a603 100644 (file)
@@ -526,6 +526,11 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                rcu_read_lock();
 
                sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id]);
+               if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+                       rcu_read_unlock();
+                       return PTR_ERR_OR_ZERO(sta);
+               }
+
                if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based))
                        FTM_PUT_FLAG(PMF);
 
index b35c96c..205c09b 100644 (file)
@@ -1091,7 +1091,7 @@ static const struct dmi_system_id dmi_tas_approved_list[] = {
        },
                { .ident = "LENOVO",
          .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
                },
        },
        { .ident = "DELL",
@@ -1727,8 +1727,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        iwl_mvm_tas_init(mvm);
        iwl_mvm_leds_sync(mvm);
 
-       if (fw_has_capa(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) {
+       if (iwl_rfi_supported(mvm)) {
                if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
                        iwl_rfi_send_config_cmd(mvm, NULL);
        }
index eb828de..3814915 100644 (file)
@@ -123,11 +123,13 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                if (mvmvif->link[i]->phy_ctxt)
                                        count++;
 
-                       /* FIXME: IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM should be
-                        * defined per HW
-                        */
-                       if (count >= IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM)
-                               return -EINVAL;
+                       if (vif->type == NL80211_IFTYPE_AP) {
+                               if (count > mvm->fw->ucode_capa.num_beacons)
+                                       return -EOPNOTSUPP;
+                       /* this should be per HW or such */
+                       } else if (count >= IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM) {
+                               return -EOPNOTSUPP;
+                       }
                }
 
                /* Catch early if driver tries to activate or deactivate a link
index 0f01b62..17f788a 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -3607,7 +3607,8 @@ static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm,
                                      struct ieee80211_vif *vif,
                                      struct ieee80211_sta *sta)
 {
-       unsigned int i;
+       struct ieee80211_link_sta *link_sta;
+       unsigned int link_id;
 
        /* Beacon interval check - firmware will crash if the beacon
         * interval is less than 16. We can't avoid connecting at all,
@@ -3616,14 +3617,11 @@ static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm,
         * wpa_s will blocklist the AP...
         */
 
-       for_each_set_bit(i, (unsigned long *)&sta->valid_links,
-                        IEEE80211_MLD_MAX_NUM_LINKS) {
-               struct ieee80211_link_sta *link_sta =
-                       link_sta_dereference_protected(sta, i);
+       for_each_sta_active_link(vif, sta, link_sta, link_id) {
                struct ieee80211_bss_conf *link_conf =
-                       link_conf_dereference_protected(vif, i);
+                       link_conf_dereference_protected(vif, link_id);
 
-               if (!link_conf || !link_sta)
+               if (!link_conf)
                        continue;
 
                if (link_conf->beacon_int < IWL_MVM_MIN_BEACON_INTERVAL_TU) {
@@ -3645,24 +3643,23 @@ static void iwl_mvm_vif_set_he_support(struct ieee80211_hw *hw,
                                       bool is_sta)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       unsigned int i;
+       struct ieee80211_link_sta *link_sta;
+       unsigned int link_id;
 
-       for_each_set_bit(i, (unsigned long *)&sta->valid_links,
-                        IEEE80211_MLD_MAX_NUM_LINKS) {
-               struct ieee80211_link_sta *link_sta =
-                       link_sta_dereference_protected(sta, i);
+       for_each_sta_active_link(vif, sta, link_sta, link_id) {
                struct ieee80211_bss_conf *link_conf =
-                       link_conf_dereference_protected(vif, i);
+                       link_conf_dereference_protected(vif, link_id);
 
-               if (!link_conf || !link_sta || !mvmvif->link[i])
+               if (!link_conf || !mvmvif->link[link_id])
                        continue;
 
                link_conf->he_support = link_sta->he_cap.has_he;
 
                if (is_sta) {
-                       mvmvif->link[i]->he_ru_2mhz_block = false;
+                       mvmvif->link[link_id]->he_ru_2mhz_block = false;
                        if (link_sta->he_cap.has_he)
-                               iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif, i,
+                               iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif,
+                                                                  link_id,
                                                                   link_conf);
                }
        }
@@ -3675,6 +3672,7 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
                                   struct iwl_mvm_sta_state_ops *callbacks)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct ieee80211_link_sta *link_sta;
        unsigned int i;
        int ret;
 
@@ -3699,15 +3697,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
                                           NL80211_TDLS_SETUP);
        }
 
-       for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
-               struct ieee80211_link_sta *link_sta;
-
-               link_sta = link_sta_dereference_protected(sta, i);
-               if (!link_sta)
-                       continue;
-
+       for_each_sta_active_link(vif, sta, link_sta, i)
                link_sta->agg.max_rc_amsdu_len = 1;
-       }
+
        ieee80211_sta_recalc_aggregates(sta);
 
        if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
@@ -3725,7 +3717,8 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-       unsigned int i;
+       struct ieee80211_link_sta *link_sta;
+       unsigned int link_id;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -3751,14 +3744,13 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
                if (!mvm->mld_api_is_used)
                        goto out;
 
-               for_each_set_bit(i, (unsigned long *)&sta->valid_links,
-                                IEEE80211_MLD_MAX_NUM_LINKS) {
+               for_each_sta_active_link(vif, sta, link_sta, link_id) {
                        struct ieee80211_bss_conf *link_conf =
-                               link_conf_dereference_protected(vif, i);
+                               link_conf_dereference_protected(vif, link_id);
 
                        if (WARN_ON(!link_conf))
                                return -EINVAL;
-                       if (!mvmvif->link[i])
+                       if (!mvmvif->link[link_id])
                                continue;
 
                        iwl_mvm_link_changed(mvm, vif, link_conf,
@@ -3889,6 +3881,9 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
                 * from the AP now.
                 */
                iwl_mvm_reset_cca_40mhz_workaround(mvm, vif);
+
+               /* Also free dup data just in case any assertions below fail */
+               kfree(mvm_sta->dup_data);
        }
 
        mutex_lock(&mvm->mutex);
index fbc2d5e..7fb66c5 100644 (file)
@@ -906,11 +906,12 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
                                n_active++;
                }
 
-               if (vif->type == NL80211_IFTYPE_AP &&
-                   n_active > mvm->fw->ucode_capa.num_beacons)
-                       return -EOPNOTSUPP;
-               else if (n_active > 1)
+               if (vif->type == NL80211_IFTYPE_AP) {
+                       if (n_active > mvm->fw->ucode_capa.num_beacons)
+                               return -EOPNOTSUPP;
+               } else if (n_active > 1) {
                        return -EOPNOTSUPP;
+               }
        }
 
        for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
index 0bfdf44..85a4ce8 100644 (file)
@@ -667,15 +667,15 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                ret = iwl_mvm_mld_alloc_sta_links(mvm, vif, sta);
                if (ret)
                        return ret;
-       }
 
-       spin_lock_init(&mvm_sta->lock);
+               spin_lock_init(&mvm_sta->lock);
 
-       if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
-               ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta);
-       else
                ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_MVM_INVALID_STA,
                                       STATION_TYPE_PEER);
+       } else {
+               ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta);
+       }
+
        if (ret)
                goto err;
 
@@ -728,7 +728,7 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
        struct ieee80211_link_sta *link_sta;
        unsigned int link_id;
-       int ret = 0;
+       int ret = -EINVAL;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -791,8 +791,6 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        lockdep_assert_held(&mvm->mutex);
 
-       kfree(mvm_sta->dup_data);
-
        /* flush its queues here since we are freeing mvm_sta */
        for_each_sta_active_link(vif, sta, link_sta, link_id) {
                struct iwl_mvm_link_sta *mvm_link_sta =
index 6e7470d..9e5008e 100644 (file)
@@ -2347,6 +2347,7 @@ int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm,
                                u32 old_sta_mask,
                                u32 new_sta_mask);
 
+bool iwl_rfi_supported(struct iwl_mvm *mvm);
 int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
                            struct iwl_rfi_lut_entry *rfi_table);
 struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
index 6d18a1f..fdf60af 100644 (file)
@@ -445,6 +445,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
                struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
 
                n_channels =  __le32_to_cpu(mcc_resp->n_channels);
+               if (iwl_rx_packet_payload_len(pkt) !=
+                   struct_size(mcc_resp, channels, n_channels)) {
+                       resp_cp = ERR_PTR(-EINVAL);
+                       goto exit;
+               }
                resp_len = sizeof(struct iwl_mcc_update_resp) +
                           n_channels * sizeof(__le32);
                resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
@@ -456,6 +461,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
                struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
 
                n_channels =  __le32_to_cpu(mcc_resp_v3->n_channels);
+               if (iwl_rx_packet_payload_len(pkt) !=
+                   struct_size(mcc_resp_v3, channels, n_channels)) {
+                       resp_cp = ERR_PTR(-EINVAL);
+                       goto exit;
+               }
                resp_len = sizeof(struct iwl_mcc_update_resp) +
                           n_channels * sizeof(__le32);
                resp_cp = kzalloc(resp_len, GFP_KERNEL);
index bb77bc9..2ecd32b 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2020 - 2021 Intel Corporation
+ * Copyright (C) 2020 - 2022 Intel Corporation
  */
 
 #include "mvm.h"
@@ -70,6 +70,16 @@ static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = {
                PHY_BAND_6, PHY_BAND_6,}},
 };
 
+bool iwl_rfi_supported(struct iwl_mvm *mvm)
+{
+       /* The feature depends on a platform bugfix, so for now
+        * it's always disabled.
+        * When the platform support detection is implemented we should
+        * check FW TLV and platform support instead.
+        */
+       return false;
+}
+
 int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table)
 {
        int ret;
@@ -81,7 +91,7 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
                .len[0] = sizeof(cmd),
        };
 
-       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
+       if (!iwl_rfi_supported(mvm))
                return -EOPNOTSUPP;
 
        lockdep_assert_held(&mvm->mutex);
@@ -113,7 +123,7 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
                .flags = CMD_WANT_SKB,
        };
 
-       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
+       if (!iwl_rfi_supported(mvm))
                return ERR_PTR(-EOPNOTSUPP);
 
        mutex_lock(&mvm->mutex);
index a4c1e3b..23266d0 100644 (file)
@@ -2691,6 +2691,8 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
                return;
 
        lq_sta = mvm_sta;
+
+       spin_lock(&lq_sta->pers.lock);
        iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags,
                                     info->band, &info->control.rates[0]);
        info->control.rates[0].count = 1;
@@ -2705,6 +2707,7 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
                iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band,
                                             &txrc->reported_rate);
        }
+       spin_unlock(&lq_sta->pers.lock);
 }
 
 static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
index e1d02c2..6226e4e 100644 (file)
@@ -691,6 +691,11 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t)
 
                rcu_read_lock();
                sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
+               if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+                       rcu_read_unlock();
+                       goto out;
+               }
+
                mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
                /* SN is set to the last expired frame + 1 */
@@ -712,6 +717,8 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t)
                          entries[index].e.reorder_time +
                          1 + RX_REORDER_BUF_TIMEOUT_MQ);
        }
+
+out:
        spin_unlock(&buf->lock);
 }
 
@@ -2512,7 +2519,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                                RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
                                /* Unblock BCAST / MCAST station */
                                iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
-                               cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
+                               cancel_delayed_work(&mvm->cs_tx_unblock_dwork);
                        }
                }
 
index 5469d63..05a54a6 100644 (file)
@@ -281,7 +281,7 @@ static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
         * A-MDPU and hence the timer continues to run. Then, the
         * timer expires and sta is NULL.
         */
-       if (!sta)
+       if (IS_ERR_OR_NULL(sta))
                goto unlock;
 
        mvm_sta = iwl_mvm_sta_from_mac80211(sta);
@@ -2089,9 +2089,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (iwl_mvm_has_new_rx_api(mvm))
-               kfree(mvm_sta->dup_data);
-
        ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
        if (ret)
                return ret;
@@ -3785,6 +3782,9 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
                u8 sta_id = mvmvif->deflink.ap_sta_id;
                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
                                                lockdep_is_held(&mvm->mutex));
+               if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+                       return NULL;
+
                return sta->addr;
        }
 
@@ -3822,6 +3822,11 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
 
        if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
                addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
+               if (!addr) {
+                       IWL_ERR(mvm, "Failed to find mac address\n");
+                       return -EINVAL;
+               }
+
                /* get phase 1 key from mac80211 */
                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
                ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
index 10d7178..00719e1 100644 (file)
@@ -1875,7 +1875,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
        mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
 
        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
-       if (WARN_ON_ONCE(!sta || !sta->wme)) {
+       if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) {
                rcu_read_unlock();
                return;
        }
index a5ec0f6..fabf637 100644 (file)
@@ -173,7 +173,7 @@ enum {
 #define MT_TXS5_MPDU_TX_CNT            GENMASK(31, 23)
 
 #define MT_TXS6_MPDU_FAIL_CNT          GENMASK(31, 23)
-
+#define MT_TXS7_MPDU_RETRY_BYTE                GENMASK(22, 0)
 #define MT_TXS7_MPDU_RETRY_CNT         GENMASK(31, 23)
 
 /* RXD DW0 */
index ee0fbfc..d39a3cc 100644 (file)
@@ -608,7 +608,8 @@ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
        /* PPDU based reporting */
        if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) {
                stats->tx_bytes +=
-                       le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE);
+                       le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE) -
+                       le32_get_bits(txs_data[7], MT_TXS7_MPDU_RETRY_BYTE);
                stats->tx_packets +=
                        le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_CNT);
                stats->tx_failed +=
index 130eb7b..39a4a73 100644 (file)
@@ -1088,7 +1088,7 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
                else if (beacon && mvif->beacon_rates_idx)
                        idx = mvif->beacon_rates_idx;
 
-               txwi[6] |= FIELD_PREP(MT_TXD6_TX_RATE, idx);
+               txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx));
                txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
        }
 }
index 8eafbf1..808c1c8 100644 (file)
@@ -1803,6 +1803,7 @@ struct rtl8xxxu_priv {
        u32 rege9c;
        u32 regeb4;
        u32 regebc;
+       u32 regrcr;
        int next_mbox;
        int nr_out_eps;
 
index fd8c8c6..831639d 100644 (file)
@@ -4171,6 +4171,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
                RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL |
                RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC;
        rtl8xxxu_write32(priv, REG_RCR, val32);
+       priv->regrcr = val32;
 
        if (fops->init_reg_rxfltmap) {
                /* Accept all data frames */
@@ -6501,7 +6502,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
                                      unsigned int *total_flags, u64 multicast)
 {
        struct rtl8xxxu_priv *priv = hw->priv;
-       u32 rcr = rtl8xxxu_read32(priv, REG_RCR);
+       u32 rcr = priv->regrcr;
 
        dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n",
                __func__, changed_flags, *total_flags);
@@ -6547,6 +6548,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
         */
 
        rtl8xxxu_write32(priv, REG_RCR, rcr);
+       priv->regrcr = rcr;
 
        *total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC |
                         FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL |
index 7aa6eda..a6c024c 100644 (file)
@@ -918,7 +918,7 @@ static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw,
        struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
 
        if (changed & IEEE80211_RC_BW_CHANGED)
-               rtw_update_sta_info(rtwdev, si, true);
+               ieee80211_queue_work(rtwdev->hw, &si->rc_work);
 }
 
 const struct ieee80211_ops rtw_ops = {
index 5bf6b45..d30a191 100644 (file)
@@ -319,6 +319,17 @@ static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
        return mac_id;
 }
 
+static void rtw_sta_rc_work(struct work_struct *work)
+{
+       struct rtw_sta_info *si = container_of(work, struct rtw_sta_info,
+                                              rc_work);
+       struct rtw_dev *rtwdev = si->rtwdev;
+
+       mutex_lock(&rtwdev->mutex);
+       rtw_update_sta_info(rtwdev, si, true);
+       mutex_unlock(&rtwdev->mutex);
+}
+
 int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
                struct ieee80211_vif *vif)
 {
@@ -329,12 +340,14 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
        if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
                return -ENOSPC;
 
+       si->rtwdev = rtwdev;
        si->sta = sta;
        si->vif = vif;
        si->init_ra_lv = 1;
        ewma_rssi_init(&si->avg_rssi);
        for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
                rtw_txq_init(rtwdev, sta->txq[i]);
+       INIT_WORK(&si->rc_work, rtw_sta_rc_work);
 
        rtw_update_sta_info(rtwdev, si, true);
        rtw_fw_media_status_report(rtwdev, si->mac_id, true);
@@ -353,6 +366,8 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
        struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
        int i;
 
+       cancel_work_sync(&si->rc_work);
+
        rtw_release_macid(rtwdev, si->mac_id);
        if (fw_exist)
                rtw_fw_media_status_report(rtwdev, si->mac_id, false);
index a563285..9e841f6 100644 (file)
@@ -743,6 +743,7 @@ struct rtw_txq {
 DECLARE_EWMA(rssi, 10, 16);
 
 struct rtw_sta_info {
+       struct rtw_dev *rtwdev;
        struct ieee80211_sta *sta;
        struct ieee80211_vif *vif;
 
@@ -767,6 +768,8 @@ struct rtw_sta_info {
 
        bool use_cfg_mask;
        struct cfg80211_bitrate_mask *mask;
+
+       struct work_struct rc_work;
 };
 
 enum rtw_bfee_role {
index af0459a..06fce7c 100644 (file)
@@ -87,11 +87,6 @@ static void rtw_sdio_writew(struct rtw_dev *rtwdev, u16 val, u32 addr,
        u8 buf[2];
        int i;
 
-       if (rtw_sdio_use_memcpy_io(rtwdev, addr, 2)) {
-               sdio_writew(rtwsdio->sdio_func, val, addr, err_ret);
-               return;
-       }
-
        *(__le16 *)buf = cpu_to_le16(val);
 
        for (i = 0; i < 2; i++) {
@@ -125,9 +120,6 @@ static u16 rtw_sdio_readw(struct rtw_dev *rtwdev, u32 addr, int *err_ret)
        u8 buf[2];
        int i;
 
-       if (rtw_sdio_use_memcpy_io(rtwdev, addr, 2))
-               return sdio_readw(rtwsdio->sdio_func, addr, err_ret);
-
        for (i = 0; i < 2; i++) {
                buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret);
                if (*err_ret)
index 30647f0..ad1d795 100644 (file)
@@ -78,7 +78,7 @@ struct rtw_usb {
        u8 pipe_interrupt;
        u8 pipe_in;
        u8 out_ep[RTW_USB_EP_MAX];
-       u8 qsel_to_ep[TX_DESC_QSEL_MAX];
+       int qsel_to_ep[TX_DESC_QSEL_MAX];
        u8 usb_txagg_num;
 
        struct workqueue_struct *txwq, *rxwq;
index b8019cf..512de49 100644 (file)
@@ -1425,6 +1425,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
        .wde_size4 = {RTW89_WDE_PG_64, 0, 4096,},
        /* PCIE 64 */
        .wde_size6 = {RTW89_WDE_PG_64, 512, 0,},
+       /* 8852B PCIE SCC */
+       .wde_size7 = {RTW89_WDE_PG_64, 510, 2,},
        /* DLFW */
        .wde_size9 = {RTW89_WDE_PG_64, 0, 1024,},
        /* 8852C DLFW */
@@ -1449,6 +1451,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
        .wde_qt4 = {0, 0, 0, 0,},
        /* PCIE 64 */
        .wde_qt6 = {448, 48, 0, 16,},
+       /* 8852B PCIE SCC */
+       .wde_qt7 = {446, 48, 0, 16,},
        /* 8852C DLFW */
        .wde_qt17 = {0, 0, 0,  0,},
        /* 8852C PCIE SCC */
index a8d9847..6ba633c 100644 (file)
@@ -792,6 +792,7 @@ struct rtw89_mac_size_set {
        const struct rtw89_dle_size wde_size0;
        const struct rtw89_dle_size wde_size4;
        const struct rtw89_dle_size wde_size6;
+       const struct rtw89_dle_size wde_size7;
        const struct rtw89_dle_size wde_size9;
        const struct rtw89_dle_size wde_size18;
        const struct rtw89_dle_size wde_size19;
@@ -804,6 +805,7 @@ struct rtw89_mac_size_set {
        const struct rtw89_wde_quota wde_qt0;
        const struct rtw89_wde_quota wde_qt4;
        const struct rtw89_wde_quota wde_qt6;
+       const struct rtw89_wde_quota wde_qt7;
        const struct rtw89_wde_quota wde_qt17;
        const struct rtw89_wde_quota wde_qt18;
        const struct rtw89_ple_quota ple_qt4;
index eaa2ea0..6da1b60 100644 (file)
        RTW8852B_FW_BASENAME "-" __stringify(RTW8852B_FW_FORMAT_MAX) ".bin"
 
 static const struct rtw89_hfc_ch_cfg rtw8852b_hfc_chcfg_pcie[] = {
-       {5, 343, grp_0}, /* ACH 0 */
-       {5, 343, grp_0}, /* ACH 1 */
-       {5, 343, grp_0}, /* ACH 2 */
-       {5, 343, grp_0}, /* ACH 3 */
+       {5, 341, grp_0}, /* ACH 0 */
+       {5, 341, grp_0}, /* ACH 1 */
+       {4, 342, grp_0}, /* ACH 2 */
+       {4, 342, grp_0}, /* ACH 3 */
        {0, 0, grp_0}, /* ACH 4 */
        {0, 0, grp_0}, /* ACH 5 */
        {0, 0, grp_0}, /* ACH 6 */
        {0, 0, grp_0}, /* ACH 7 */
-       {4, 344, grp_0}, /* B0MGQ */
-       {4, 344, grp_0}, /* B0HIQ */
+       {4, 342, grp_0}, /* B0MGQ */
+       {4, 342, grp_0}, /* B0HIQ */
        {0, 0, grp_0}, /* B1MGQ */
        {0, 0, grp_0}, /* B1HIQ */
        {40, 0, 0} /* FWCMDQ */
 };
 
 static const struct rtw89_hfc_pub_cfg rtw8852b_hfc_pubcfg_pcie = {
-       448, /* Group 0 */
+       446, /* Group 0 */
        0, /* Group 1 */
-       448, /* Public Max */
+       446, /* Public Max */
        0 /* WP threshold */
 };
 
@@ -49,13 +49,13 @@ static const struct rtw89_hfc_param_ini rtw8852b_hfc_param_ini_pcie[] = {
 };
 
 static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = {
-       [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size6,
-                          &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
-                          &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18,
+       [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size7,
+                          &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt7,
+                          &rtw89_mac_size.wde_qt7, &rtw89_mac_size.ple_qt18,
                           &rtw89_mac_size.ple_qt58},
-       [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size6,
-                          &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
-                          &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18,
+       [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size7,
+                          &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt7,
+                          &rtw89_mac_size.wde_qt7, &rtw89_mac_size.ple_qt18,
                           &rtw89_mac_size.ple_qt_52b_wow},
        [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size9,
                            &rtw89_mac_size.ple_size8, &rtw89_mac_size.wde_qt4,
index 9a8faaf..89c7a14 100644 (file)
@@ -5964,10 +5964,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
                        ret = -ENOMEM;
                        goto out_free;
                }
+               param.pmsr_capa = pmsr_capa;
+
                ret = parse_pmsr_capa(info->attrs[HWSIM_ATTR_PMSR_SUPPORT], pmsr_capa, info);
                if (ret)
                        goto out_free;
-               param.pmsr_capa = pmsr_capa;
        }
 
        ret = mac80211_hwsim_new_radio(info, &param);
index c066b00..829515a 100644 (file)
@@ -565,24 +565,32 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
        struct ipc_mux_config mux_cfg;
        struct iosm_imem *ipc_imem;
        u8 ctrl_chl_idx = 0;
+       int ret;
 
        ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
 
        if (ipc_imem->phase != IPC_P_RUN) {
                dev_err(ipc_imem->dev,
                        "Modem link down. Exit run state worker.");
-               return;
+               goto err_out;
        }
 
        if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
                ipc_devlink_deinit(ipc_imem->ipc_devlink);
 
-       if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
-               ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
+       ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg);
+       if (ret < 0)
+               goto err_out;
+
+       ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
+       if (!ipc_imem->mux)
+               goto err_out;
+
+       ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
+       if (ret < 0)
+               goto err_ipc_mux_deinit;
 
-       ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
-       if (ipc_imem->mux)
-               ipc_imem->mux->wwan = ipc_imem->wwan;
+       ipc_imem->mux->wwan = ipc_imem->wwan;
 
        while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
                if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
@@ -622,6 +630,13 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
 
        /* Complete all memory stores after setting bit */
        smp_mb__after_atomic();
+
+       return;
+
+err_ipc_mux_deinit:
+       ipc_mux_deinit(ipc_imem->mux);
+err_out:
+       ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
 }
 
 static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
index 66b90cc..109cf89 100644 (file)
@@ -77,8 +77,8 @@ out:
 }
 
 /* Initialize wwan channel */
-void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
-                               enum ipc_mux_protocol mux_type)
+int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+                              enum ipc_mux_protocol mux_type)
 {
        struct ipc_chnl_cfg chnl_cfg = { 0 };
 
@@ -87,7 +87,7 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
        /* If modem version is invalid (0xffffffff), do not initialize WWAN. */
        if (ipc_imem->cp_version == -1) {
                dev_err(ipc_imem->dev, "invalid CP version");
-               return;
+               return -EIO;
        }
 
        ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
@@ -104,9 +104,13 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
 
        /* WWAN registration. */
        ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
-       if (!ipc_imem->wwan)
+       if (!ipc_imem->wwan) {
                dev_err(ipc_imem->dev,
                        "failed to register the ipc_wwan interfaces");
+               return -ENOMEM;
+       }
+
+       return 0;
 }
 
 /* Map SKB to DMA for transfer */
index f8afb21..026c5bd 100644 (file)
@@ -91,9 +91,11 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id,
  *                             MUX.
  * @ipc_imem:          Pointer to iosm_imem struct.
  * @mux_type:          Type of mux protocol.
+ *
+ * Return: 0 on success and failure value on error
  */
-void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
-                               enum ipc_mux_protocol mux_type);
+int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+                              enum ipc_mux_protocol mux_type);
 
 /**
  * ipc_imem_sys_devlink_open - Open a Flash/CD Channel link to CP
index 226fc17..91256e0 100644 (file)
@@ -45,6 +45,7 @@
 #define T7XX_PCI_IREG_BASE             0
 #define T7XX_PCI_EREG_BASE             2
 
+#define T7XX_INIT_TIMEOUT              20
 #define PM_SLEEP_DIS_TIMEOUT_MS                20
 #define PM_ACK_TIMEOUT_MS              1500
 #define PM_AUTOSUSPEND_MS              20000
@@ -96,6 +97,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
        spin_lock_init(&t7xx_dev->md_pm_lock);
        init_completion(&t7xx_dev->sleep_lock_acquire);
        init_completion(&t7xx_dev->pm_sr_ack);
+       init_completion(&t7xx_dev->init_done);
        atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
 
        device_init_wakeup(&pdev->dev, true);
@@ -124,6 +126,7 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
        pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
        pm_runtime_allow(&t7xx_dev->pdev->dev);
        pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
+       complete_all(&t7xx_dev->init_done);
 }
 
 static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
@@ -529,6 +532,20 @@ static void t7xx_pci_shutdown(struct pci_dev *pdev)
        __t7xx_pci_pm_suspend(pdev);
 }
 
+static int t7xx_pci_pm_prepare(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct t7xx_pci_dev *t7xx_dev;
+
+       t7xx_dev = pci_get_drvdata(pdev);
+       if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) {
+               dev_warn(dev, "Not ready for system sleep.\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
 static int t7xx_pci_pm_suspend(struct device *dev)
 {
        return __t7xx_pci_pm_suspend(to_pci_dev(dev));
@@ -555,6 +572,7 @@ static int t7xx_pci_pm_runtime_resume(struct device *dev)
 }
 
 static const struct dev_pm_ops t7xx_pci_pm_ops = {
+       .prepare = t7xx_pci_pm_prepare,
        .suspend = t7xx_pci_pm_suspend,
        .resume = t7xx_pci_pm_resume,
        .resume_noirq = t7xx_pci_pm_resume_noirq,
index 112efa5..f08f1ab 100644 (file)
@@ -69,6 +69,7 @@ struct t7xx_pci_dev {
        struct t7xx_modem       *md;
        struct t7xx_ccmni_ctrl  *ccmni_ctlb;
        bool                    rgu_pci_irq_en;
+       struct completion       init_done;
 
        /* Low Power Items */
        struct list_head        md_pm_entities;
index ccb6eb1..1f0cbb7 100644 (file)
@@ -3585,6 +3585,9 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
 {
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
 
+       if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
+               return -EBUSY;
+
        if (device_remove_file_self(dev, attr))
                nvme_delete_ctrl_sync(ctrl);
        return count;
@@ -5045,7 +5048,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
         * that were missed. We identify persistent discovery controllers by
         * checking that they started once before, hence are reconnecting back.
         */
-       if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
+       if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
            nvme_discovery_ctrl(ctrl))
                nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
 
@@ -5056,6 +5059,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
        }
 
        nvme_change_uevent(ctrl, "NVME_EVENT=connected");
+       set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
 }
 EXPORT_SYMBOL_GPL(nvme_start_ctrl);
 
index 9e6e56c..316f3e4 100644 (file)
@@ -163,7 +163,9 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
        case hwmon_temp_max:
        case hwmon_temp_min:
                if ((!channel && data->ctrl->wctemp) ||
-                   (channel && data->log->temp_sensor[channel - 1])) {
+                   (channel && data->log->temp_sensor[channel - 1] &&
+                    !(data->ctrl->quirks &
+                      NVME_QUIRK_NO_SECONDARY_TEMP_THRESH))) {
                        if (data->ctrl->quirks &
                            NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
                                return 0444;
index 9171452..2bc159a 100644 (file)
@@ -884,7 +884,6 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
 {
        if (!head->disk)
                return;
-       blk_mark_disk_dead(head->disk);
        /* make sure all pending bios are cleaned up */
        kblockd_schedule_work(&head->requeue_work);
        flush_work(&head->requeue_work);
index bf46f12..a2d4f59 100644 (file)
@@ -149,6 +149,11 @@ enum nvme_quirks {
         * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
         */
        NVME_QUIRK_BOGUS_NID                    = (1 << 18),
+
+       /*
+        * No temperature thresholds for channels other than 0 (Composite).
+        */
+       NVME_QUIRK_NO_SECONDARY_TEMP_THRESH     = (1 << 19),
 };
 
 /*
index 7f25c0f..3224458 100644 (file)
@@ -2956,7 +2956,7 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
         * over a single page.
         */
        dev->ctrl.max_hw_sectors = min_t(u32,
-               NVME_MAX_KB_SZ << 1, dma_max_mapping_size(&pdev->dev) >> 9);
+               NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9);
        dev->ctrl.max_segments = NVME_MAX_SEGS;
 
        /*
@@ -3402,6 +3402,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
        { PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+       { PCI_DEVICE(0x2646, 0x5013),   /* Kingston KC3000, Kingston FURY Renegade */
+               .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, },
        { PCI_DEVICE(0x2646, 0x5018),   /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x2646, 0x5016),   /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */
@@ -3441,6 +3443,10 @@ static const struct pci_device_id nvme_id_table[] = {
                                NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G  */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
                .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
index c2c9b0d..be967d7 100644 (file)
@@ -1348,9 +1348,8 @@ static int mlxbf_pmc_map_counters(struct device *dev)
 
        for (i = 0; i < pmc->total_blocks; ++i) {
                if (strstr(pmc->block_name[i], "tile")) {
-                       ret = sscanf(pmc->block_name[i], "tile%d", &tile_num);
-                       if (ret < 0)
-                               return ret;
+                       if (sscanf(pmc->block_name[i], "tile%d", &tile_num) != 1)
+                               return -EINVAL;
 
                        if (tile_num >= pmc->tile_count)
                                continue;
index d5bb775..ee5f124 100644 (file)
@@ -245,24 +245,29 @@ static const struct pci_device_id pmf_pci_ids[] = {
        { }
 };
 
-int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
+static void amd_pmf_set_dram_addr(struct amd_pmf_dev *dev)
 {
        u64 phys_addr;
        u32 hi, low;
 
-       INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
+       phys_addr = virt_to_phys(dev->buf);
+       hi = phys_addr >> 32;
+       low = phys_addr & GENMASK(31, 0);
+
+       amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
+       amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+}
 
+int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
+{
        /* Get Metrics Table Address */
        dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
        if (!dev->buf)
                return -ENOMEM;
 
-       phys_addr = virt_to_phys(dev->buf);
-       hi = phys_addr >> 32;
-       low = phys_addr & GENMASK(31, 0);
+       INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
 
-       amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
-       amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+       amd_pmf_set_dram_addr(dev);
 
        /*
         * Start collecting the metrics data after a small delay
@@ -273,6 +278,18 @@ int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
        return 0;
 }
 
+static int amd_pmf_resume_handler(struct device *dev)
+{
+       struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+       if (pdev->buf)
+               amd_pmf_set_dram_addr(pdev);
+
+       return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, NULL, amd_pmf_resume_handler);
+
 static void amd_pmf_init_features(struct amd_pmf_dev *dev)
 {
        int ret;
@@ -413,6 +430,7 @@ static struct platform_driver amd_pmf_driver = {
                .name = "amd-pmf",
                .acpi_match_table = amd_pmf_acpi_ids,
                .dev_groups = amd_pmf_driver_groups,
+               .pm = pm_sleep_ptr(&amd_pmf_pm),
        },
        .probe = amd_pmf_probe,
        .remove_new = amd_pmf_remove,
index e2c9a68..fdf7da0 100644 (file)
@@ -555,6 +555,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */
        { KE_IGNORE, 0x79, },  /* Charger type dectection notification */
        { KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
+       { KE_IGNORE, 0x7B, }, /* Charger connect/disconnect notification */
        { KE_KEY, 0x7c, { KEY_MICMUTE } },
        { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
        { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
@@ -584,6 +585,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0xAE, { KEY_FN_F5 } }, /* Fn+F5 fan mode on 2020+ */
        { KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */
        { KE_KEY, 0xB5, { KEY_CALC } },
+       { KE_IGNORE, 0xC0, }, /* External display connect/disconnect notification */
        { KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
        { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
        { KE_IGNORE, 0xC6, },  /* Ambient Light Sensor notification */
index 61dffb4..e6ae826 100644 (file)
@@ -208,7 +208,7 @@ static int scan_chunks_sanity_check(struct device *dev)
                        continue;
                reinit_completion(&ifs_done);
                local_work.dev = dev;
-               INIT_WORK(&local_work.w, copy_hashes_authenticate_chunks);
+               INIT_WORK_ONSTACK(&local_work.w, copy_hashes_authenticate_chunks);
                schedule_work_on(cpu, &local_work.w);
                wait_for_completion(&ifs_done);
                if (ifsd->loading_error) {
index e0572a2..02fe360 100644 (file)
@@ -304,14 +304,13 @@ struct isst_if_pkg_info {
 static struct isst_if_cpu_info *isst_cpu_info;
 static struct isst_if_pkg_info *isst_pkg_info;
 
-#define ISST_MAX_PCI_DOMAINS   8
-
 static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
 {
        struct pci_dev *matched_pci_dev = NULL;
        struct pci_dev *pci_dev = NULL;
+       struct pci_dev *_pci_dev = NULL;
        int no_matches = 0, pkg_id;
-       int i, bus_number;
+       int bus_number;
 
        if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
            cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
@@ -323,12 +322,11 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
        if (bus_number < 0)
                return NULL;
 
-       for (i = 0; i < ISST_MAX_PCI_DOMAINS; ++i) {
-               struct pci_dev *_pci_dev;
+       for_each_pci_dev(_pci_dev) {
                int node;
 
-               _pci_dev = pci_get_domain_bus_and_slot(i, bus_number, PCI_DEVFN(dev, fn));
-               if (!_pci_dev)
+               if (_pci_dev->bus->number != bus_number ||
+                   _pci_dev->devfn != PCI_DEVFN(dev, fn))
                        continue;
 
                ++no_matches;
index 307ee6f..6f83e99 100644 (file)
@@ -624,10 +624,8 @@ static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
  */
 static void ab8500_btemp_external_power_changed(struct power_supply *psy)
 {
-       struct ab8500_btemp *di = power_supply_get_drvdata(psy);
-
-       class_for_each_device(power_supply_class, NULL,
-               di->btemp_psy, ab8500_btemp_get_ext_psy_data);
+       class_for_each_device(power_supply_class, NULL, psy,
+                             ab8500_btemp_get_ext_psy_data);
 }
 
 /* ab8500 btemp driver interrupts and their respective isr */
index 41a7bff..53560fb 100644 (file)
@@ -2407,10 +2407,8 @@ out:
  */
 static void ab8500_fg_external_power_changed(struct power_supply *psy)
 {
-       struct ab8500_fg *di = power_supply_get_drvdata(psy);
-
-       class_for_each_device(power_supply_class, NULL,
-               di->fg_psy, ab8500_fg_get_ext_psy_data);
+       class_for_each_device(power_supply_class, NULL, psy,
+                             ab8500_fg_get_ext_psy_data);
 }
 
 /**
index 05f4131..3be6f3b 100644 (file)
@@ -507,7 +507,7 @@ static void fuel_gauge_external_power_changed(struct power_supply *psy)
        mutex_lock(&info->lock);
        info->valid = 0; /* Force updating of the cached registers */
        mutex_unlock(&info->lock);
-       power_supply_changed(info->bat);
+       power_supply_changed(psy);
 }
 
 static struct power_supply_desc fuel_gauge_desc = {
index de67b98..dc33f00 100644 (file)
@@ -1262,6 +1262,7 @@ static void bq24190_input_current_limit_work(struct work_struct *work)
        bq24190_charger_set_property(bdi->charger,
                                     POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
                                     &val);
+       power_supply_changed(bdi->charger);
 }
 
 /* Sync the input-current-limit with our parent supply (if we have one) */
index 22cde35..f8636cf 100644 (file)
@@ -750,7 +750,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy)
        if (bq->chip_version != BQ25892)
                return;
 
-       ret = power_supply_get_property_from_supplier(bq->charger,
+       ret = power_supply_get_property_from_supplier(psy,
                                                      POWER_SUPPLY_PROP_USB_TYPE,
                                                      &val);
        if (ret)
@@ -775,6 +775,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy)
        }
 
        bq25890_field_write(bq, F_IINLIM, input_current_limit);
+       power_supply_changed(psy);
 }
 
 static int bq25890_get_chip_state(struct bq25890_device *bq,
@@ -1106,6 +1107,8 @@ static void bq25890_pump_express_work(struct work_struct *data)
        dev_info(bq->dev, "Hi-voltage charging requested, input voltage is %d mV\n",
                 voltage);
 
+       power_supply_changed(bq->charger);
+
        return;
 error_print:
        bq25890_field_write(bq, F_PUMPX_EN, 0);
index 5ff6f44..4296600 100644 (file)
@@ -1083,10 +1083,8 @@ static int poll_interval_param_set(const char *val, const struct kernel_param *k
                return ret;
 
        mutex_lock(&bq27xxx_list_lock);
-       list_for_each_entry(di, &bq27xxx_battery_devices, list) {
-               cancel_delayed_work_sync(&di->work);
-               schedule_delayed_work(&di->work, 0);
-       }
+       list_for_each_entry(di, &bq27xxx_battery_devices, list)
+               mod_delayed_work(system_wq, &di->work, 0);
        mutex_unlock(&bq27xxx_list_lock);
 
        return ret;
@@ -1761,60 +1759,6 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
        return POWER_SUPPLY_HEALTH_GOOD;
 }
 
-void bq27xxx_battery_update(struct bq27xxx_device_info *di)
-{
-       struct bq27xxx_reg_cache cache = {0, };
-       bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
-
-       cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
-       if ((cache.flags & 0xff) == 0xff)
-               cache.flags = -1; /* read error */
-       if (cache.flags >= 0) {
-               cache.temperature = bq27xxx_battery_read_temperature(di);
-               if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
-                       cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
-               if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
-                       cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
-               if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
-                       cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
-
-               cache.charge_full = bq27xxx_battery_read_fcc(di);
-               cache.capacity = bq27xxx_battery_read_soc(di);
-               if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
-                       cache.energy = bq27xxx_battery_read_energy(di);
-               di->cache.flags = cache.flags;
-               cache.health = bq27xxx_battery_read_health(di);
-               if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
-                       cache.cycle_count = bq27xxx_battery_read_cyct(di);
-
-               /* We only have to read charge design full once */
-               if (di->charge_design_full <= 0)
-                       di->charge_design_full = bq27xxx_battery_read_dcap(di);
-       }
-
-       if ((di->cache.capacity != cache.capacity) ||
-           (di->cache.flags != cache.flags))
-               power_supply_changed(di->bat);
-
-       if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
-               di->cache = cache;
-
-       di->last_update = jiffies;
-}
-EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
-
-static void bq27xxx_battery_poll(struct work_struct *work)
-{
-       struct bq27xxx_device_info *di =
-                       container_of(work, struct bq27xxx_device_info,
-                                    work.work);
-
-       bq27xxx_battery_update(di);
-
-       if (poll_interval > 0)
-               schedule_delayed_work(&di->work, poll_interval * HZ);
-}
-
 static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags)
 {
        if (di->opts & BQ27XXX_O_ZERO)
@@ -1833,7 +1777,8 @@ static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags)
 static int bq27xxx_battery_current_and_status(
        struct bq27xxx_device_info *di,
        union power_supply_propval *val_curr,
-       union power_supply_propval *val_status)
+       union power_supply_propval *val_status,
+       struct bq27xxx_reg_cache *cache)
 {
        bool single_flags = (di->opts & BQ27XXX_O_ZERO);
        int curr;
@@ -1845,10 +1790,14 @@ static int bq27xxx_battery_current_and_status(
                return curr;
        }
 
-       flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags);
-       if (flags < 0) {
-               dev_err(di->dev, "error reading flags\n");
-               return flags;
+       if (cache) {
+               flags = cache->flags;
+       } else {
+               flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags);
+               if (flags < 0) {
+                       dev_err(di->dev, "error reading flags\n");
+                       return flags;
+               }
        }
 
        if (di->opts & BQ27XXX_O_ZERO) {
@@ -1883,6 +1832,78 @@ static int bq27xxx_battery_current_and_status(
        return 0;
 }
 
+static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
+{
+       union power_supply_propval status = di->last_status;
+       struct bq27xxx_reg_cache cache = {0, };
+       bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
+
+       cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
+       if ((cache.flags & 0xff) == 0xff)
+               cache.flags = -1; /* read error */
+       if (cache.flags >= 0) {
+               cache.temperature = bq27xxx_battery_read_temperature(di);
+               if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
+                       cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
+               if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
+                       cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
+               if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
+                       cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
+
+               cache.charge_full = bq27xxx_battery_read_fcc(di);
+               cache.capacity = bq27xxx_battery_read_soc(di);
+               if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
+                       cache.energy = bq27xxx_battery_read_energy(di);
+               di->cache.flags = cache.flags;
+               cache.health = bq27xxx_battery_read_health(di);
+               if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
+                       cache.cycle_count = bq27xxx_battery_read_cyct(di);
+
+               /*
+                * On gauges with signed current reporting the current must be
+                * checked to detect charging <-> discharging status changes.
+                */
+               if (!(di->opts & BQ27XXX_O_ZERO))
+                       bq27xxx_battery_current_and_status(di, NULL, &status, &cache);
+
+               /* We only have to read charge design full once */
+               if (di->charge_design_full <= 0)
+                       di->charge_design_full = bq27xxx_battery_read_dcap(di);
+       }
+
+       if ((di->cache.capacity != cache.capacity) ||
+           (di->cache.flags != cache.flags) ||
+           (di->last_status.intval != status.intval)) {
+               di->last_status.intval = status.intval;
+               power_supply_changed(di->bat);
+       }
+
+       if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
+               di->cache = cache;
+
+       di->last_update = jiffies;
+
+       if (!di->removed && poll_interval > 0)
+               mod_delayed_work(system_wq, &di->work, poll_interval * HZ);
+}
+
+void bq27xxx_battery_update(struct bq27xxx_device_info *di)
+{
+       mutex_lock(&di->lock);
+       bq27xxx_battery_update_unlocked(di);
+       mutex_unlock(&di->lock);
+}
+EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
+
+static void bq27xxx_battery_poll(struct work_struct *work)
+{
+       struct bq27xxx_device_info *di =
+                       container_of(work, struct bq27xxx_device_info,
+                                    work.work);
+
+       bq27xxx_battery_update(di);
+}
+
 /*
  * Get the average power in ÂµW
  * Return < 0 if something fails.
@@ -1985,10 +2006,8 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
        struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
 
        mutex_lock(&di->lock);
-       if (time_is_before_jiffies(di->last_update + 5 * HZ)) {
-               cancel_delayed_work_sync(&di->work);
-               bq27xxx_battery_poll(&di->work.work);
-       }
+       if (time_is_before_jiffies(di->last_update + 5 * HZ))
+               bq27xxx_battery_update_unlocked(di);
        mutex_unlock(&di->lock);
 
        if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
@@ -1996,7 +2015,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
 
        switch (psp) {
        case POWER_SUPPLY_PROP_STATUS:
-               ret = bq27xxx_battery_current_and_status(di, NULL, val);
+               ret = bq27xxx_battery_current_and_status(di, NULL, val, NULL);
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
                ret = bq27xxx_battery_voltage(di, val);
@@ -2005,7 +2024,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
                val->intval = di->cache.flags < 0 ? 0 : 1;
                break;
        case POWER_SUPPLY_PROP_CURRENT_NOW:
-               ret = bq27xxx_battery_current_and_status(di, val, NULL);
+               ret = bq27xxx_battery_current_and_status(di, val, NULL, NULL);
                break;
        case POWER_SUPPLY_PROP_CAPACITY:
                ret = bq27xxx_simple_value(di->cache.capacity, val);
@@ -2078,8 +2097,8 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
 {
        struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
 
-       cancel_delayed_work_sync(&di->work);
-       schedule_delayed_work(&di->work, 0);
+       /* After charger plug in/out wait 0.5s for things to stabilize */
+       mod_delayed_work(system_wq, &di->work, HZ / 2);
 }
 
 int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
@@ -2127,22 +2146,18 @@ EXPORT_SYMBOL_GPL(bq27xxx_battery_setup);
 
 void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
 {
-       /*
-        * power_supply_unregister call bq27xxx_battery_get_property which
-        * call bq27xxx_battery_poll.
-        * Make sure that bq27xxx_battery_poll will not call
-        * schedule_delayed_work again after unregister (which cause OOPS).
-        */
-       poll_interval = 0;
-
-       cancel_delayed_work_sync(&di->work);
-
-       power_supply_unregister(di->bat);
-
        mutex_lock(&bq27xxx_list_lock);
        list_del(&di->list);
        mutex_unlock(&bq27xxx_list_lock);
 
+       /* Set removed to avoid bq27xxx_battery_update() re-queuing the work */
+       mutex_lock(&di->lock);
+       di->removed = true;
+       mutex_unlock(&di->lock);
+
+       cancel_delayed_work_sync(&di->work);
+
+       power_supply_unregister(di->bat);
        mutex_destroy(&di->lock);
 }
 EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);
index f876899..6d3c748 100644 (file)
@@ -179,7 +179,7 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client)
        i2c_set_clientdata(client, di);
 
        if (client->irq) {
-               ret = devm_request_threaded_irq(&client->dev, client->irq,
+               ret = request_threaded_irq(client->irq,
                                NULL, bq27xxx_battery_irq_handler_thread,
                                IRQF_ONESHOT,
                                di->name, di);
@@ -209,6 +209,7 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
 {
        struct bq27xxx_device_info *di = i2c_get_clientdata(client);
 
+       free_irq(client->irq, di);
        bq27xxx_battery_teardown(di);
 
        mutex_lock(&battery_mutex);
index 92e48e3..1305cba 100644 (file)
@@ -796,7 +796,9 @@ static int mt6360_charger_probe(struct platform_device *pdev)
        mci->vinovp = 6500000;
        mutex_init(&mci->chgdet_lock);
        platform_set_drvdata(pdev, mci);
-       devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work);
+       ret = devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Failed to set delayed work\n");
 
        ret = device_property_read_u32(&pdev->dev, "richtek,vinovp-microvolt", &mci->vinovp);
        if (ret)
index ab986db..3791aec 100644 (file)
@@ -348,6 +348,10 @@ static int __power_supply_is_system_supplied(struct device *dev, void *data)
        struct power_supply *psy = dev_get_drvdata(dev);
        unsigned int *count = data;
 
+       if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_SCOPE, &ret))
+               if (ret.intval == POWER_SUPPLY_SCOPE_DEVICE)
+                       return 0;
+
        (*count)++;
        if (psy->desc->type != POWER_SUPPLY_TYPE_BATTERY)
                if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_ONLINE,
@@ -366,8 +370,8 @@ int power_supply_is_system_supplied(void)
                                      __power_supply_is_system_supplied);
 
        /*
-        * If no power class device was found at all, most probably we are
-        * running on a desktop system, so assume we are on mains power.
+        * If no system scope power class device was found at all, most probably we
+        * are running on a desktop system, so assume we are on mains power.
         */
        if (count == 0)
                return 1;
@@ -573,7 +577,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
        struct power_supply_battery_info *info;
        struct device_node *battery_np = NULL;
        struct fwnode_reference_args args;
-       struct fwnode_handle *fwnode;
+       struct fwnode_handle *fwnode = NULL;
        const char *value;
        int err, len, index;
        const __be32 *list;
@@ -585,7 +589,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
                        return -ENODEV;
 
                fwnode = fwnode_handle_get(of_fwnode_handle(battery_np));
-       } else {
+       } else if (psy->dev.parent) {
                err = fwnode_property_get_reference_args(
                                        dev_fwnode(psy->dev.parent),
                                        "monitored-battery", NULL, 0, 0, &args);
@@ -595,6 +599,9 @@ int power_supply_get_battery_info(struct power_supply *psy,
                fwnode = args.fwnode;
        }
 
+       if (!fwnode)
+               return -ENOENT;
+
        err = fwnode_property_read_string(fwnode, "compatible", &value);
        if (err)
                goto out_put_node;
index 702bf83..0674483 100644 (file)
@@ -35,8 +35,9 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
                led_trigger_event(psy->charging_full_trig, LED_FULL);
                led_trigger_event(psy->charging_trig, LED_OFF);
                led_trigger_event(psy->full_trig, LED_FULL);
-               led_trigger_event(psy->charging_blink_full_solid_trig,
-                       LED_FULL);
+               /* Going from blink to LED on requires a LED_OFF event to stop blink */
+               led_trigger_event(psy->charging_blink_full_solid_trig, LED_OFF);
+               led_trigger_event(psy->charging_blink_full_solid_trig, LED_FULL);
                break;
        case POWER_SUPPLY_STATUS_CHARGING:
                led_trigger_event(psy->charging_full_trig, LED_FULL);
index ba3b125..06e5b6b 100644 (file)
@@ -286,7 +286,8 @@ static ssize_t power_supply_show_property(struct device *dev,
 
                if (ret < 0) {
                        if (ret == -ENODATA)
-                               dev_dbg(dev, "driver has no data for `%s' property\n",
+                               dev_dbg_ratelimited(dev,
+                                       "driver has no data for `%s' property\n",
                                        attr->attr.name);
                        else if (ret != -ENODEV && ret != -EAGAIN)
                                dev_err_ratelimited(dev,
index 73f744a..ea33693 100644 (file)
@@ -1023,7 +1023,7 @@ static int rt9467_request_interrupt(struct rt9467_chg_data *data)
        for (i = 0; i < num_chg_irqs; i++) {
                virq = regmap_irq_get_virq(data->irq_chip_data, chg_irqs[i].hwirq);
                if (virq <= 0)
-                       return dev_err_probe(dev, virq, "Failed to get (%s) irq\n",
+                       return dev_err_probe(dev, -EINVAL, "Failed to get (%s) irq\n",
                                             chg_irqs[i].name);
 
                ret = devm_request_threaded_irq(dev, virq, NULL, chg_irqs[i].handler,
index 75ebcbf..a14e89a 100644 (file)
@@ -24,7 +24,7 @@
 #define SBS_CHARGER_REG_STATUS                 0x13
 #define SBS_CHARGER_REG_ALARM_WARNING          0x16
 
-#define SBS_CHARGER_STATUS_CHARGE_INHIBITED    BIT(1)
+#define SBS_CHARGER_STATUS_CHARGE_INHIBITED    BIT(0)
 #define SBS_CHARGER_STATUS_RES_COLD            BIT(9)
 #define SBS_CHARGER_STATUS_RES_HOT             BIT(10)
 #define SBS_CHARGER_STATUS_BATTERY_PRESENT     BIT(14)
index 632977f..bd23c4d 100644 (file)
@@ -733,13 +733,6 @@ static int sc27xx_fgu_set_property(struct power_supply *psy,
        return ret;
 }
 
-static void sc27xx_fgu_external_power_changed(struct power_supply *psy)
-{
-       struct sc27xx_fgu_data *data = power_supply_get_drvdata(psy);
-
-       power_supply_changed(data->battery);
-}
-
 static int sc27xx_fgu_property_is_writeable(struct power_supply *psy,
                                            enum power_supply_property psp)
 {
@@ -774,7 +767,7 @@ static const struct power_supply_desc sc27xx_fgu_desc = {
        .num_properties         = ARRAY_SIZE(sc27xx_fgu_props),
        .get_property           = sc27xx_fgu_get_property,
        .set_property           = sc27xx_fgu_set_property,
-       .external_power_changed = sc27xx_fgu_external_power_changed,
+       .external_power_changed = power_supply_changed,
        .property_is_writeable  = sc27xx_fgu_property_is_writeable,
        .no_thermal             = true,
 };
index dc741ac..698ab7f 100644 (file)
@@ -5256,7 +5256,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
        }
 
        rdev->debugfs = debugfs_create_dir(rname, debugfs_root);
-       if (!rdev->debugfs) {
+       if (IS_ERR(rdev->debugfs)) {
                rdev_warn(rdev, "Failed to create debugfs directory\n");
                return;
        }
@@ -6178,7 +6178,7 @@ static int __init regulator_init(void)
        ret = class_register(&regulator_class);
 
        debugfs_root = debugfs_create_dir("regulator", NULL);
-       if (!debugfs_root)
+       if (IS_ERR(debugfs_root))
                pr_warn("regulator: Failed to create debugfs directory\n");
 
 #ifdef CONFIG_DEBUG_FS
index 1849566..3eb86ec 100644 (file)
@@ -951,9 +951,12 @@ static int mt6359_regulator_probe(struct platform_device *pdev)
        struct regulator_config config = {};
        struct regulator_dev *rdev;
        struct mt6359_regulator_info *mt6359_info;
-       int i, hw_ver;
+       int i, hw_ver, ret;
+
+       ret = regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver);
+       if (ret)
+               return ret;
 
-       regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver);
        if (hw_ver >= MT6359P_CHIP_VER)
                mt6359_info = mt6359p_regulators;
        else
index 87a746d..e75dd92 100644 (file)
@@ -264,7 +264,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
                        .vsel_reg = PCA9450_REG_BUCK2OUT_DVS0,
                        .vsel_mask = BUCK2OUT_DVS0_MASK,
                        .enable_reg = PCA9450_REG_BUCK2CTRL,
-                       .enable_mask = BUCK1_ENMODE_MASK,
+                       .enable_mask = BUCK2_ENMODE_MASK,
                        .ramp_reg = PCA9450_REG_BUCK2CTRL,
                        .ramp_mask = BUCK2_RAMP_MASK,
                        .ramp_delay_table = pca9450_dvs_buck_ramp_table,
@@ -502,7 +502,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
                        .vsel_reg = PCA9450_REG_BUCK2OUT_DVS0,
                        .vsel_mask = BUCK2OUT_DVS0_MASK,
                        .enable_reg = PCA9450_REG_BUCK2CTRL,
-                       .enable_mask = BUCK1_ENMODE_MASK,
+                       .enable_mask = BUCK2_ENMODE_MASK,
                        .ramp_reg = PCA9450_REG_BUCK2CTRL,
                        .ramp_mask = BUCK2_RAMP_MASK,
                        .ramp_delay_table = pca9450_dvs_buck_ramp_table,
index ade1369..113c509 100644 (file)
@@ -127,6 +127,8 @@ static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
                        struct dasd_device *, struct dasd_device *,
                        unsigned int, int, unsigned int, unsigned int,
                        unsigned int, unsigned int);
+static int dasd_eckd_query_pprc_status(struct dasd_device *,
+                                      struct dasd_pprc_data_sc4 *);
 
 /* initial attempt at a probe function. this can be simplified once
  * the other detection code is gone */
@@ -3733,6 +3735,26 @@ static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
        return count;
 }
 
+static int dasd_in_copy_relation(struct dasd_device *device)
+{
+       struct dasd_pprc_data_sc4 *temp;
+       int rc;
+
+       if (!dasd_eckd_pprc_enabled(device))
+               return 0;
+
+       temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+       if (!temp)
+               return -ENOMEM;
+
+       rc = dasd_eckd_query_pprc_status(device, temp);
+       if (!rc)
+               rc = temp->dev_info[0].state;
+
+       kfree(temp);
+       return rc;
+}
+
 /*
  * Release allocated space for a given range or an entire volume.
  */
@@ -3749,6 +3771,7 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
        int cur_to_trk, cur_from_trk;
        struct dasd_ccw_req *cqr;
        u32 beg_cyl, end_cyl;
+       int copy_relation;
        struct ccw1 *ccw;
        int trks_per_ext;
        size_t ras_size;
@@ -3760,6 +3783,10 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
        if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
                return ERR_PTR(-EINVAL);
 
+       copy_relation = dasd_in_copy_relation(device);
+       if (copy_relation < 0)
+               return ERR_PTR(copy_relation);
+
        rq = req ? blk_mq_rq_to_pdu(req) : NULL;
 
        features = &private->features;
@@ -3788,9 +3815,11 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
        /*
         * This bit guarantees initialisation of tracks within an extent that is
         * not fully specified, but is only supported with a certain feature
-        * subset.
+        * subset and for devices not in a copy relation.
         */
-       ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
+       if (features->feature[56] & 0x01 && !copy_relation)
+               ras_data->op_flags.guarantee_init = 1;
+
        ras_data->lss = private->conf.ned->ID;
        ras_data->dev_addr = private->conf.ned->unit_addr;
        ras_data->nr_exts = nr_exts;
index 8eb089b..d5c43e9 100644 (file)
@@ -1111,6 +1111,8 @@ static void io_subchannel_verify(struct subchannel *sch)
        cdev = sch_get_cdev(sch);
        if (cdev)
                dev_fsm_event(cdev, DEV_EVENT_VERIFY);
+       else
+               css_schedule_eval(sch->schid);
 }
 
 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
index 5ea6249..641f0db 100644 (file)
@@ -95,7 +95,7 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue,
                "       lgr     1,%[token]\n"
                "       .insn   rsy,0xeb000000008a,%[qs],%[ccq],0(%[state])"
                : [ccq] "+&d" (_ccq), [qs] "+&d" (_queuestart)
-               : [state] "d" ((unsigned long)state), [token] "d" (token)
+               : [state] "a" ((unsigned long)state), [token] "d" (token)
                : "memory", "cc", "1");
        *count = _ccq & 0xff;
        *start = _queuestart & 0xff;
index 5a05d1c..a8def50 100644 (file)
@@ -1293,6 +1293,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
                        return PTR_ERR(kkey);
                rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey);
                DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+               memzero_explicit(kkey, ktp.keylen);
                kfree(kkey);
                if (rc)
                        break;
@@ -1426,6 +1427,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
                                        kkey, ktp.keylen, &ktp.protkey);
                DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
                kfree(apqns);
+               memzero_explicit(kkey, ktp.keylen);
                kfree(kkey);
                if (rc)
                        break;
@@ -1552,6 +1554,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
                                        protkey, &protkeylen);
                DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
                kfree(apqns);
+               memzero_explicit(kkey, ktp.keylen);
                kfree(kkey);
                if (rc) {
                        kfree(protkey);
index b7c569a..0226c92 100644 (file)
@@ -1463,6 +1463,8 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
        struct Scsi_Host *host = cmd->device->host;
        int rtn = 0;
 
+       atomic_inc(&cmd->device->iorequest_cnt);
+
        /* check if the device is still usable */
        if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
                /* in SDEV_DEL we error all commands. DID_NO_CONNECT
@@ -1483,6 +1485,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
                 */
                SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
                        "queuecommand : device blocked\n"));
+               atomic_dec(&cmd->device->iorequest_cnt);
                return SCSI_MLQUEUE_DEVICE_BUSY;
        }
 
@@ -1515,6 +1518,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
        trace_scsi_dispatch_cmd_start(cmd);
        rtn = host->hostt->queuecommand(host, cmd);
        if (rtn) {
+               atomic_dec(&cmd->device->iorequest_cnt);
                trace_scsi_dispatch_cmd_error(cmd, rtn);
                if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
                    rtn != SCSI_MLQUEUE_TARGET_BUSY)
@@ -1761,7 +1765,6 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
                goto out_dec_host_busy;
        }
 
-       atomic_inc(&cmd->device->iorequest_cnt);
        return BLK_STS_OK;
 
 out_dec_host_busy:
index d9ce379..e6bc622 100644 (file)
@@ -1780,7 +1780,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 
        length = scsi_bufflen(scmnd);
        payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
-       payload_sz = sizeof(cmd_request->mpb);
+       payload_sz = 0;
 
        if (scsi_sg_count(scmnd)) {
                unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset);
@@ -1789,10 +1789,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
                unsigned long hvpfn, hvpfns_to_add;
                int j, i = 0, sg_count;
 
-               if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
+               payload_sz = (hvpg_count * sizeof(u64) +
+                             sizeof(struct vmbus_packet_mpb_array));
 
-                       payload_sz = (hvpg_count * sizeof(u64) +
-                                     sizeof(struct vmbus_packet_mpb_array));
+               if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
                        payload = kzalloc(payload_sz, GFP_ATOMIC);
                        if (!payload)
                                return SCSI_MLQUEUE_DEVICE_BUSY;
index ac85d55..26e6633 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
@@ -301,49 +302,43 @@ static int cdns_spi_setup_transfer(struct spi_device *spi,
 }
 
 /**
- * cdns_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible
+ * cdns_spi_process_fifo - Fills the TX FIFO, and drain the RX FIFO
  * @xspi:      Pointer to the cdns_spi structure
+ * @ntx:       Number of bytes to pack into the TX FIFO
+ * @nrx:       Number of bytes to drain from the RX FIFO
  */
-static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
+static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
 {
-       unsigned long trans_cnt = 0;
+       ntx = clamp(ntx, 0, xspi->tx_bytes);
+       nrx = clamp(nrx, 0, xspi->rx_bytes);
 
-       while ((trans_cnt < xspi->tx_fifo_depth) &&
-              (xspi->tx_bytes > 0)) {
+       xspi->tx_bytes -= ntx;
+       xspi->rx_bytes -= nrx;
 
+       while (ntx || nrx) {
                /* When xspi in busy condition, bytes may send failed,
                 * then spi control did't work thoroughly, add one byte delay
                 */
-               if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
-                   CDNS_SPI_IXR_TXFULL)
+               if (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_TXFULL)
                        udelay(10);
 
-               if (xspi->txbuf)
-                       cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
-               else
-                       cdns_spi_write(xspi, CDNS_SPI_TXD, 0);
+               if (ntx) {
+                       if (xspi->txbuf)
+                               cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
+                       else
+                               cdns_spi_write(xspi, CDNS_SPI_TXD, 0);
 
-               xspi->tx_bytes--;
-               trans_cnt++;
-       }
-}
+                       ntx--;
+               }
 
-/**
- * cdns_spi_read_rx_fifo - Reads the RX FIFO with as many bytes as possible
- * @xspi:       Pointer to the cdns_spi structure
- * @count:     Read byte count
- */
-static void cdns_spi_read_rx_fifo(struct cdns_spi *xspi, unsigned long count)
-{
-       u8 data;
-
-       /* Read out the data from the RX FIFO */
-       while (count > 0) {
-               data = cdns_spi_read(xspi, CDNS_SPI_RXD);
-               if (xspi->rxbuf)
-                       *xspi->rxbuf++ = data;
-               xspi->rx_bytes--;
-               count--;
+               if (nrx) {
+                       u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
+
+                       if (xspi->rxbuf)
+                               *xspi->rxbuf++ = data;
+
+                       nrx--;
+               }
        }
 }
 
@@ -381,33 +376,22 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
                spi_finalize_current_transfer(ctlr);
                status = IRQ_HANDLED;
        } else if (intr_status & CDNS_SPI_IXR_TXOW) {
-               int trans_cnt = cdns_spi_read(xspi, CDNS_SPI_THLD);
+               int threshold = cdns_spi_read(xspi, CDNS_SPI_THLD);
+               int trans_cnt = xspi->rx_bytes - xspi->tx_bytes;
+
+               if (threshold > 1)
+                       trans_cnt -= threshold;
+
                /* Set threshold to one if number of pending are
                 * less than half fifo
                 */
                if (xspi->tx_bytes < xspi->tx_fifo_depth >> 1)
                        cdns_spi_write(xspi, CDNS_SPI_THLD, 1);
 
-               while (trans_cnt) {
-                       cdns_spi_read_rx_fifo(xspi, 1);
-
-                       if (xspi->tx_bytes) {
-                               if (xspi->txbuf)
-                                       cdns_spi_write(xspi, CDNS_SPI_TXD,
-                                                      *xspi->txbuf++);
-                               else
-                                       cdns_spi_write(xspi, CDNS_SPI_TXD, 0);
-                               xspi->tx_bytes--;
-                       }
-                       trans_cnt--;
-               }
-               if (!xspi->tx_bytes) {
-                       /* Fixed delay due to controller limitation with
-                        * RX_NEMPTY incorrect status
-                        * Xilinx AR:65885 contains more details
-                        */
-                       udelay(10);
-                       cdns_spi_read_rx_fifo(xspi, xspi->rx_bytes);
+               if (xspi->tx_bytes) {
+                       cdns_spi_process_fifo(xspi, trans_cnt, trans_cnt);
+               } else {
+                       cdns_spi_process_fifo(xspi, 0, trans_cnt);
                        cdns_spi_write(xspi, CDNS_SPI_IDR,
                                       CDNS_SPI_IXR_DEFAULT);
                        spi_finalize_current_transfer(ctlr);
@@ -450,16 +434,17 @@ static int cdns_transfer_one(struct spi_controller *ctlr,
        xspi->tx_bytes = transfer->len;
        xspi->rx_bytes = transfer->len;
 
-       if (!spi_controller_is_slave(ctlr))
+       if (!spi_controller_is_slave(ctlr)) {
                cdns_spi_setup_transfer(spi, transfer);
+       } else {
+               /* Set TX empty threshold to half of FIFO depth
+                * only if TX bytes are more than half FIFO depth.
+                */
+               if (xspi->tx_bytes > xspi->tx_fifo_depth)
+                       cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1);
+       }
 
-       /* Set TX empty threshold to half of FIFO depth
-        * only if TX bytes are more than half FIFO depth.
-        */
-       if (xspi->tx_bytes > (xspi->tx_fifo_depth >> 1))
-               cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1);
-
-       cdns_spi_fill_tx_fifo(xspi);
+       cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0);
        spi_transfer_delay_exec(transfer);
 
        cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT);
index 5e6faa9..5f2aee6 100644 (file)
@@ -264,17 +264,17 @@ static void dw_spi_elba_set_cs(struct spi_device *spi, bool enable)
        struct regmap *syscon = dwsmmio->priv;
        u8 cs;
 
-       cs = spi->chip_select;
+       cs = spi_get_chipselect(spi, 0);
        if (cs < 2)
-               dw_spi_elba_override_cs(syscon, spi->chip_select, enable);
+               dw_spi_elba_override_cs(syscon, spi_get_chipselect(spi, 0), enable);
 
        /*
         * The DW SPI controller needs a native CS bit selected to start
         * the serial engine.
         */
-       spi->chip_select = 0;
+       spi_set_chipselect(spi, 0, 0);
        dw_spi_set_cs(spi, enable);
-       spi->chip_select = cs;
+       spi_get_chipselect(spi, cs);
 }
 
 static int dw_spi_elba_init(struct platform_device *pdev,
index ba7be50..a98b781 100644 (file)
@@ -294,6 +294,8 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
        mas->cs_flag = set_flag;
        /* set xfer_mode to FIFO to complete cs_done in isr */
        mas->cur_xfer_mode = GENI_SE_FIFO;
+       geni_se_select_mode(se, mas->cur_xfer_mode);
+
        reinit_completion(&mas->cs_done);
        if (set_flag)
                geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
index d76e923..c0aee5d 100644 (file)
@@ -54,6 +54,21 @@ static int ring_interrupt_index(const struct tb_ring *ring)
        return bit;
 }
 
+static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
+{
+       if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+               return;
+       iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
+}
+
+static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
+{
+       if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+               ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring);
+       else
+               iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring);
+}
+
 /*
  * ring_interrupt_active() - activate/deactivate interrupts for a single ring
  *
@@ -61,8 +76,8 @@ static int ring_interrupt_index(const struct tb_ring *ring)
  */
 static void ring_interrupt_active(struct tb_ring *ring, bool active)
 {
-       int reg = REG_RING_INTERRUPT_BASE +
-                 ring_interrupt_index(ring) / 32 * 4;
+       int index = ring_interrupt_index(ring) / 32 * 4;
+       int reg = REG_RING_INTERRUPT_BASE + index;
        int interrupt_bit = ring_interrupt_index(ring) & 31;
        int mask = 1 << interrupt_bit;
        u32 old, new;
@@ -123,7 +138,11 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
                                         "interrupt for %s %d is already %s\n",
                                         RING_TYPE(ring), ring->hop,
                                         active ? "enabled" : "disabled");
-       iowrite32(new, ring->nhi->iobase + reg);
+
+       if (active)
+               iowrite32(new, ring->nhi->iobase + reg);
+       else
+               nhi_mask_interrupt(ring->nhi, mask, index);
 }
 
 /*
@@ -136,11 +155,11 @@ static void nhi_disable_interrupts(struct tb_nhi *nhi)
        int i = 0;
        /* disable interrupts */
        for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
-               iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
+               nhi_mask_interrupt(nhi, ~0, 4 * i);
 
        /* clear interrupt status bits */
        for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
-               ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
+               nhi_clear_interrupt(nhi, 4 * i);
 }
 
 /* ring helper methods */
index faef165..6ba2958 100644 (file)
@@ -93,6 +93,8 @@ struct ring_desc {
 #define REG_RING_INTERRUPT_BASE        0x38200
 #define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
 
+#define REG_RING_INTERRUPT_MASK_CLEAR_BASE     0x38208
+
 #define REG_INT_THROTTLING_RATE        0x38c00
 
 /* Interrupt Vector Allocation */
index f801b1f..af0e1c0 100644 (file)
@@ -1012,7 +1012,7 @@ static int brcmuart_probe(struct platform_device *pdev)
        of_property_read_u32(np, "clock-frequency", &clk_rate);
 
        /* See if a Baud clock has been specified */
-       baud_mux_clk = of_clk_get_by_name(np, "sw_baud");
+       baud_mux_clk = devm_clk_get(dev, "sw_baud");
        if (IS_ERR(baud_mux_clk)) {
                if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER) {
                        ret = -EPROBE_DEFER;
@@ -1032,7 +1032,7 @@ static int brcmuart_probe(struct platform_device *pdev)
        if (clk_rate == 0) {
                dev_err(dev, "clock-frequency or clk not defined\n");
                ret = -EINVAL;
-               goto release_dma;
+               goto err_clk_disable;
        }
 
        dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not ");
@@ -1119,6 +1119,8 @@ err1:
        serial8250_unregister_port(priv->line);
 err:
        brcmuart_free_bufs(dev, priv);
+err_clk_disable:
+       clk_disable_unprepare(baud_mux_clk);
 release_dma:
        if (priv->dma_enabled)
                brcmuart_arbitration(priv, 0);
@@ -1133,6 +1135,7 @@ static int brcmuart_remove(struct platform_device *pdev)
        hrtimer_cancel(&priv->hrt);
        serial8250_unregister_port(priv->line);
        brcmuart_free_bufs(&pdev->dev, priv);
+       clk_disable_unprepare(priv->baud_mux_clk);
        if (priv->dma_enabled)
                brcmuart_arbitration(priv, 0);
        return 0;
index 64770c6..b406cba 100644 (file)
 #define PCI_DEVICE_ID_COMMTECH_4224PCIE                0x0020
 #define PCI_DEVICE_ID_COMMTECH_4228PCIE                0x0021
 #define PCI_DEVICE_ID_COMMTECH_4222PCIE                0x0022
+
 #define PCI_DEVICE_ID_EXAR_XR17V4358           0x4358
 #define PCI_DEVICE_ID_EXAR_XR17V8358           0x8358
 
+#define PCI_SUBDEVICE_ID_USR_2980              0x0128
+#define PCI_SUBDEVICE_ID_USR_2981              0x0129
+
 #define PCI_DEVICE_ID_SEALEVEL_710xC           0x1001
 #define PCI_DEVICE_ID_SEALEVEL_720xC           0x1002
 #define PCI_DEVICE_ID_SEALEVEL_740xC           0x1004
@@ -829,6 +833,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
                (kernel_ulong_t)&bd                     \
        }
 
+#define USR_DEVICE(devid, sdevid, bd) {                        \
+       PCI_DEVICE_SUB(                                 \
+               PCI_VENDOR_ID_USR,                      \
+               PCI_DEVICE_ID_EXAR_##devid,             \
+               PCI_VENDOR_ID_EXAR,                     \
+               PCI_SUBDEVICE_ID_USR_##sdevid), 0, 0,   \
+               (kernel_ulong_t)&bd                     \
+       }
+
 static const struct pci_device_id exar_pci_tbl[] = {
        EXAR_DEVICE(ACCESSIO, COM_2S, pbn_exar_XR17C15x),
        EXAR_DEVICE(ACCESSIO, COM_4S, pbn_exar_XR17C15x),
@@ -853,6 +866,10 @@ static const struct pci_device_id exar_pci_tbl[] = {
 
        IBM_DEVICE(XR17C152, SATURN_SERIAL_ONE_PORT, pbn_exar_ibm_saturn),
 
+       /* USRobotics USR298x-OEM PCI Modems */
+       USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x),
+       USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x),
+
        /* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */
        EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x),
        EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x),
index c55be6f..e80c4f6 100644 (file)
@@ -1920,6 +1920,8 @@ pci_moxa_setup(struct serial_private *priv,
 #define PCI_SUBDEVICE_ID_SIIG_DUAL_30  0x2530
 #define PCI_VENDOR_ID_ADVANTECH                0x13fe
 #define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
+#define PCI_DEVICE_ID_ADVANTECH_PCI1600        0x1600
+#define PCI_DEVICE_ID_ADVANTECH_PCI1600_1611   0x1611
 #define PCI_DEVICE_ID_ADVANTECH_PCI3620        0x3620
 #define PCI_DEVICE_ID_ADVANTECH_PCI3618        0x3618
 #define PCI_DEVICE_ID_ADVANTECH_PCIf618        0xf618
@@ -4085,6 +4087,9 @@ static SIMPLE_DEV_PM_OPS(pciserial_pm_ops, pciserial_suspend_one,
                         pciserial_resume_one);
 
 static const struct pci_device_id serial_pci_tbl[] = {
+       {       PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI1600,
+               PCI_DEVICE_ID_ADVANTECH_PCI1600_1611, PCI_ANY_ID, 0, 0,
+               pbn_b0_4_921600 },
        /* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */
        {       PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620,
                PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0,
index fe8d79c..c153ba3 100644 (file)
@@ -669,6 +669,7 @@ EXPORT_SYMBOL_GPL(serial8250_em485_supported);
 /**
  * serial8250_em485_config() - generic ->rs485_config() callback
  * @port: uart port
+ * @termios: termios structure
  * @rs485: rs485 settings
  *
  * Generic callback usable by 8250 uart drivers to activate rs485 settings
index 59e25f2..4b2512e 100644 (file)
@@ -606,10 +606,11 @@ static int arc_serial_probe(struct platform_device *pdev)
        }
        uart->baud = val;
 
-       port->membase = of_iomap(np, 0);
-       if (!port->membase)
+       port->membase = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(port->membase)) {
                /* No point of dev_err since UART itself is hosed here */
-               return -ENXIO;
+               return PTR_ERR(port->membase);
+       }
 
        port->irq = irq_of_parse_and_map(np, 0);
 
index 08dc3e2..8582479 100644 (file)
@@ -1664,19 +1664,18 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
        uport->private_data = &port->private_data;
        platform_set_drvdata(pdev, port);
 
-       ret = uart_add_one_port(drv, uport);
-       if (ret)
-               return ret;
-
        irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
        ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr,
                        IRQF_TRIGGER_HIGH, port->name, uport);
        if (ret) {
                dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
-               uart_remove_one_port(drv, uport);
                return ret;
        }
 
+       ret = uart_add_one_port(drv, uport);
+       if (ret)
+               return ret;
+
        /*
         * Set pm_runtime status as ACTIVE so that wakeup_irq gets
         * enabled/disabled from dev_pm_arm_wake_irq during system
index 498ba9c..829c4be 100644 (file)
@@ -656,10 +656,17 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
                        }
                }
 
-               /* The vcs_size might have changed while we slept to grab
-                * the user buffer, so recheck.
+               /* The vc might have been freed or vcs_size might have changed
+                * while we slept to grab the user buffer, so recheck.
                 * Return data written up to now on failure.
                 */
+               vc = vcs_vc(inode, &viewed);
+               if (!vc) {
+                       if (written)
+                               break;
+                       ret = -ENXIO;
+                       goto unlock_out;
+               }
                size = vcs_size(vc, attr, false);
                if (size < 0) {
                        if (written)
index 202ff71..51b3c6a 100644 (file)
@@ -150,7 +150,8 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
        u32 hba_maxq, rem, tot_queues;
        struct Scsi_Host *host = hba->host;
 
-       hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities);
+       /* maxq is 0 based value */
+       hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1;
 
        tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
                        rw_queues;
@@ -265,7 +266,7 @@ static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
        addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
                hba->ucdl_dma_addr;
 
-       return div_u64(addr, sizeof(struct utp_transfer_cmd_desc));
+       return div_u64(addr, ufshcd_get_ucd_size(hba));
 }
 
 static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
index 45fd374..e7e79f5 100644 (file)
@@ -2849,10 +2849,10 @@ static void ufshcd_map_queues(struct Scsi_Host *shost)
 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
 {
        struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
-               i * sizeof_utp_transfer_cmd_desc(hba);
+               i * ufshcd_get_ucd_size(hba);
        struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
        dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
-               i * sizeof_utp_transfer_cmd_desc(hba);
+               i * ufshcd_get_ucd_size(hba);
        u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
                                       response_upiu);
        u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
@@ -3761,7 +3761,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
        size_t utmrdl_size, utrdl_size, ucdl_size;
 
        /* Allocate memory for UTP command descriptors */
-       ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs;
+       ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs;
        hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
                                                  ucdl_size,
                                                  &hba->ucdl_dma_addr,
@@ -3861,7 +3861,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
        prdt_offset =
                offsetof(struct utp_transfer_cmd_desc, prd_table);
 
-       cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
+       cmd_desc_size = ufshcd_get_ucd_size(hba);
        cmd_desc_dma_addr = hba->ucdl_dma_addr;
 
        for (i = 0; i < hba->nutrs; i++) {
@@ -8452,7 +8452,7 @@ static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
 {
        size_t ucdl_size, utrdl_size;
 
-       ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
+       ucdl_size = ufshcd_get_ucd_size(hba) * nutrs;
        dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
                           hba->ucdl_dma_addr);
 
index 4bb6d30..311007b 100644 (file)
@@ -1928,6 +1928,8 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
 
        if (request.req.wLength > USBTMC_BUFSIZE)
                return -EMSGSIZE;
+       if (request.req.wLength == 0)   /* Length-0 requests are never IN */
+               request.req.bRequestType &= ~USB_DIR_IN;
 
        is_in = request.req.bRequestType & USB_DIR_IN;
 
index 0beaab9..7b2ce01 100644 (file)
@@ -1137,7 +1137,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
 
        dwc3_set_incr_burst_type(dwc);
 
-       dwc3_phy_power_on(dwc);
+       ret = dwc3_phy_power_on(dwc);
        if (ret)
                goto err_exit_phy;
 
index d56457c..1f043c3 100644 (file)
@@ -1116,6 +1116,7 @@ struct dwc3_scratchpad_array {
  * @dis_metastability_quirk: set to disable metastability quirk.
  * @dis_split_quirk: set to disable split boundary.
  * @wakeup_configured: set if the device is configured for remote wakeup.
+ * @suspended: set to track suspend event due to U3/L2.
  * @imod_interval: set the interrupt moderation interval in 250ns
  *                     increments or 0 to disable.
  * @max_cfg_eps: current max number of IN eps used across all USB configs.
@@ -1332,6 +1333,7 @@ struct dwc3 {
        unsigned                dis_split_quirk:1;
        unsigned                async_callbacks:1;
        unsigned                wakeup_configured:1;
+       unsigned                suspended:1;
 
        u16                     imod_interval;
 
index e4a2560..ebf0346 100644 (file)
@@ -332,6 +332,11 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
        unsigned int            current_mode;
        unsigned long           flags;
        u32                     reg;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        reg = dwc3_readl(dwc->regs, DWC3_GSTS);
@@ -350,6 +355,8 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
        }
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -395,6 +402,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = s->private;
        unsigned long           flags;
        u32                     reg;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        reg = dwc3_readl(dwc->regs, DWC3_GCTL);
@@ -414,6 +426,8 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
                seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
        }
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -463,6 +477,11 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = s->private;
        unsigned long           flags;
        u32                     reg;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -493,6 +512,8 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
                seq_printf(s, "UNKNOWN %d\n", reg);
        }
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -509,6 +530,7 @@ static ssize_t dwc3_testmode_write(struct file *file,
        unsigned long           flags;
        u32                     testmode = 0;
        char                    buf[32];
+       int                     ret;
 
        if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
                return -EFAULT;
@@ -526,10 +548,16 @@ static ssize_t dwc3_testmode_write(struct file *file,
        else
                testmode = 0;
 
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
+
        spin_lock_irqsave(&dwc->lock, flags);
        dwc3_gadget_set_test_mode(dwc, testmode);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return count;
 }
 
@@ -548,12 +576,18 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
        enum dwc3_link_state    state;
        u32                     reg;
        u8                      speed;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        reg = dwc3_readl(dwc->regs, DWC3_GSTS);
        if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
                seq_puts(s, "Not available\n");
                spin_unlock_irqrestore(&dwc->lock, flags);
+               pm_runtime_put_sync(dwc->dev);
                return 0;
        }
 
@@ -566,6 +600,8 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
                   dwc3_gadget_hs_link_string(state));
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -584,6 +620,7 @@ static ssize_t dwc3_link_state_write(struct file *file,
        char                    buf[32];
        u32                     reg;
        u8                      speed;
+       int                     ret;
 
        if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
                return -EFAULT;
@@ -603,10 +640,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
        else
                return -EINVAL;
 
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
+
        spin_lock_irqsave(&dwc->lock, flags);
        reg = dwc3_readl(dwc->regs, DWC3_GSTS);
        if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
                spin_unlock_irqrestore(&dwc->lock, flags);
+               pm_runtime_put_sync(dwc->dev);
                return -EINVAL;
        }
 
@@ -616,12 +658,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
        if (speed < DWC3_DSTS_SUPERSPEED &&
            state != DWC3_LINK_STATE_RECOV) {
                spin_unlock_irqrestore(&dwc->lock, flags);
+               pm_runtime_put_sync(dwc->dev);
                return -EINVAL;
        }
 
        dwc3_gadget_set_link_state(dwc, state);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return count;
 }
 
@@ -645,6 +690,11 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
        unsigned long           flags;
        u32                     mdwidth;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_TXFIFO);
@@ -657,6 +707,8 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -667,6 +719,11 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
        unsigned long           flags;
        u32                     mdwidth;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_RXFIFO);
@@ -679,6 +736,8 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -688,12 +747,19 @@ static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = dep->dwc;
        unsigned long           flags;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_TXREQQ);
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -703,12 +769,19 @@ static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = dep->dwc;
        unsigned long           flags;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_RXREQQ);
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -718,12 +791,19 @@ static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = dep->dwc;
        unsigned long           flags;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ);
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -733,12 +813,19 @@ static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = dep->dwc;
        unsigned long           flags;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ);
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -748,12 +835,19 @@ static int dwc3_event_queue_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = dep->dwc;
        unsigned long           flags;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_EVENTQ);
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -798,6 +892,11 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = dep->dwc;
        unsigned long           flags;
        int                     i;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        if (dep->number <= 1) {
@@ -827,6 +926,8 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
 out:
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -839,6 +940,11 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
        u32                     lower_32_bits;
        u32                     upper_32_bits;
        u32                     reg;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        reg = DWC3_GDBGLSPMUX_EPSELECT(dep->number);
@@ -851,6 +957,8 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
        seq_printf(s, "0x%016llx\n", ep_info);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -910,6 +1018,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
        dwc->regset->regs = dwc3_regs;
        dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
        dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
+       dwc->regset->dev = dwc->dev;
 
        root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
        dwc->debug_root = root;
index c0ca4d1..d831f5a 100644 (file)
@@ -2440,6 +2440,7 @@ static int dwc3_gadget_func_wakeup(struct usb_gadget *g, int intf_id)
                        return -EINVAL;
                }
                dwc3_resume_gadget(dwc);
+               dwc->suspended = false;
                dwc->link_state = DWC3_LINK_STATE_U0;
        }
 
@@ -2699,6 +2700,21 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
        return ret;
 }
 
+static int dwc3_gadget_soft_connect(struct dwc3 *dwc)
+{
+       /*
+        * In the Synopsys DWC_usb31 1.90a programming guide section
+        * 4.1.9, it specifies that for a reconnect after a
+        * device-initiated disconnect requires a core soft reset
+        * (DCTL.CSftRst) before enabling the run/stop bit.
+        */
+       dwc3_core_soft_reset(dwc);
+
+       dwc3_event_buffers_setup(dwc);
+       __dwc3_gadget_start(dwc);
+       return dwc3_gadget_run_stop(dwc, true);
+}
+
 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 {
        struct dwc3             *dwc = gadget_to_dwc(g);
@@ -2737,21 +2753,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 
        synchronize_irq(dwc->irq_gadget);
 
-       if (!is_on) {
+       if (!is_on)
                ret = dwc3_gadget_soft_disconnect(dwc);
-       } else {
-               /*
-                * In the Synopsys DWC_usb31 1.90a programming guide section
-                * 4.1.9, it specifies that for a reconnect after a
-                * device-initiated disconnect requires a core soft reset
-                * (DCTL.CSftRst) before enabling the run/stop bit.
-                */
-               dwc3_core_soft_reset(dwc);
-
-               dwc3_event_buffers_setup(dwc);
-               __dwc3_gadget_start(dwc);
-               ret = dwc3_gadget_run_stop(dwc, true);
-       }
+       else
+               ret = dwc3_gadget_soft_connect(dwc);
 
        pm_runtime_put(dwc->dev);
 
@@ -3938,6 +3943,8 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
 {
        int                     reg;
 
+       dwc->suspended = false;
+
        dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
 
        reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -3962,6 +3969,8 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 {
        u32                     reg;
 
+       dwc->suspended = false;
+
        /*
         * Ideally, dwc3_reset_gadget() would trigger the function
         * drivers to stop any active transfers through ep disable.
@@ -4180,6 +4189,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
 
 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc, unsigned int evtinfo)
 {
+       dwc->suspended = false;
+
        /*
         * TODO take core out of low power mode when that's
         * implemented.
@@ -4277,6 +4288,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
                if (dwc->gadget->wakeup_armed) {
                        dwc3_gadget_enable_linksts_evts(dwc, false);
                        dwc3_resume_gadget(dwc);
+                       dwc->suspended = false;
                }
                break;
        case DWC3_LINK_STATE_U1:
@@ -4303,8 +4315,10 @@ static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
 {
        enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
 
-       if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
+       if (!dwc->suspended && next == DWC3_LINK_STATE_U3) {
+               dwc->suspended = true;
                dwc3_suspend_gadget(dwc);
+       }
 
        dwc->link_state = next;
 }
@@ -4655,42 +4669,39 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
 int dwc3_gadget_suspend(struct dwc3 *dwc)
 {
        unsigned long flags;
+       int ret;
 
        if (!dwc->gadget_driver)
                return 0;
 
-       dwc3_gadget_run_stop(dwc, false);
+       ret = dwc3_gadget_soft_disconnect(dwc);
+       if (ret)
+               goto err;
 
        spin_lock_irqsave(&dwc->lock, flags);
        dwc3_disconnect_gadget(dwc);
-       __dwc3_gadget_stop(dwc);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
        return 0;
+
+err:
+       /*
+        * Attempt to reset the controller's state. Likely no
+        * communication can be established until the host
+        * performs a port reset.
+        */
+       if (dwc->softconnect)
+               dwc3_gadget_soft_connect(dwc);
+
+       return ret;
 }
 
 int dwc3_gadget_resume(struct dwc3 *dwc)
 {
-       int                     ret;
-
        if (!dwc->gadget_driver || !dwc->softconnect)
                return 0;
 
-       ret = __dwc3_gadget_start(dwc);
-       if (ret < 0)
-               goto err0;
-
-       ret = dwc3_gadget_run_stop(dwc, true);
-       if (ret < 0)
-               goto err1;
-
-       return 0;
-
-err1:
-       __dwc3_gadget_stop(dwc);
-
-err0:
-       return ret;
+       return dwc3_gadget_soft_connect(dwc);
 }
 
 void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
index 6956ad8..a366abb 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <linux/string_helpers.h>
 #include <linux/usb/composite.h>
 
 #include "u_ether.h"
@@ -965,6 +966,8 @@ int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
        dev = netdev_priv(net);
        snprintf(host_addr, len, "%pm", dev->host_mac);
 
+       string_upper(host_addr, host_addr);
+
        return strlen(host_addr);
 }
 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
index 4641153..52e6d2e 100644 (file)
@@ -37,10 +37,6 @@ static const struct bus_type gadget_bus_type;
  * @vbus: for udcs who care about vbus status, this value is real vbus status;
  * for udcs who do not care about vbus status, this value is always true
  * @started: the UDC's started state. True if the UDC had started.
- * @connect_lock: protects udc->vbus, udc->started, gadget->connect, gadget->deactivate related
- * functions. usb_gadget_connect_locked, usb_gadget_disconnect_locked,
- * usb_udc_connect_control_locked, usb_gadget_udc_start_locked, usb_gadget_udc_stop_locked are
- * called with this lock held.
  *
  * This represents the internal data structure which is used by the UDC-class
  * to hold information about udc driver and gadget together.
@@ -52,7 +48,6 @@ struct usb_udc {
        struct list_head                list;
        bool                            vbus;
        bool                            started;
-       struct mutex                    connect_lock;
 };
 
 static struct class *udc_class;
@@ -692,9 +687,17 @@ out:
 }
 EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
 
-/* Internal version of usb_gadget_connect needs to be called with connect_lock held. */
-static int usb_gadget_connect_locked(struct usb_gadget *gadget)
-       __must_hold(&gadget->udc->connect_lock)
+/**
+ * usb_gadget_connect - software-controlled connect to USB host
+ * @gadget:the peripheral being connected
+ *
+ * Enables the D+ (or potentially D-) pullup.  The host will start
+ * enumerating this gadget when the pullup is active and a VBUS session
+ * is active (the link is powered).
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_connect(struct usb_gadget *gadget)
 {
        int ret = 0;
 
@@ -703,15 +706,10 @@ static int usb_gadget_connect_locked(struct usb_gadget *gadget)
                goto out;
        }
 
-       if (gadget->connected)
-               goto out;
-
-       if (gadget->deactivated || !gadget->udc->started) {
+       if (gadget->deactivated) {
                /*
                 * If gadget is deactivated we only save new state.
                 * Gadget will be connected automatically after activation.
-                *
-                * udc first needs to be started before gadget can be pulled up.
                 */
                gadget->connected = true;
                goto out;
@@ -726,32 +724,22 @@ out:
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(usb_gadget_connect);
 
 /**
- * usb_gadget_connect - software-controlled connect to USB host
- * @gadget:the peripheral being connected
+ * usb_gadget_disconnect - software-controlled disconnect from USB host
+ * @gadget:the peripheral being disconnected
  *
- * Enables the D+ (or potentially D-) pullup.  The host will start
- * enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered).
+ * Disables the D+ (or potentially D-) pullup, which the host may see
+ * as a disconnect (when a VBUS session is active).  Not all systems
+ * support software pullup controls.
+ *
+ * Following a successful disconnect, invoke the ->disconnect() callback
+ * for the current gadget driver so that UDC drivers don't need to.
  *
  * Returns zero on success, else negative errno.
  */
-int usb_gadget_connect(struct usb_gadget *gadget)
-{
-       int ret;
-
-       mutex_lock(&gadget->udc->connect_lock);
-       ret = usb_gadget_connect_locked(gadget);
-       mutex_unlock(&gadget->udc->connect_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(usb_gadget_connect);
-
-/* Internal version of usb_gadget_disconnect needs to be called with connect_lock held. */
-static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
-       __must_hold(&gadget->udc->connect_lock)
+int usb_gadget_disconnect(struct usb_gadget *gadget)
 {
        int ret = 0;
 
@@ -763,12 +751,10 @@ static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
        if (!gadget->connected)
                goto out;
 
-       if (gadget->deactivated || !gadget->udc->started) {
+       if (gadget->deactivated) {
                /*
                 * If gadget is deactivated we only save new state.
                 * Gadget will stay disconnected after activation.
-                *
-                * udc should have been started before gadget being pulled down.
                 */
                gadget->connected = false;
                goto out;
@@ -788,30 +774,6 @@ out:
 
        return ret;
 }
-
-/**
- * usb_gadget_disconnect - software-controlled disconnect from USB host
- * @gadget:the peripheral being disconnected
- *
- * Disables the D+ (or potentially D-) pullup, which the host may see
- * as a disconnect (when a VBUS session is active).  Not all systems
- * support software pullup controls.
- *
- * Following a successful disconnect, invoke the ->disconnect() callback
- * for the current gadget driver so that UDC drivers don't need to.
- *
- * Returns zero on success, else negative errno.
- */
-int usb_gadget_disconnect(struct usb_gadget *gadget)
-{
-       int ret;
-
-       mutex_lock(&gadget->udc->connect_lock);
-       ret = usb_gadget_disconnect_locked(gadget);
-       mutex_unlock(&gadget->udc->connect_lock);
-
-       return ret;
-}
 EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
 
 /**
@@ -832,11 +794,10 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
        if (gadget->deactivated)
                goto out;
 
-       mutex_lock(&gadget->udc->connect_lock);
        if (gadget->connected) {
-               ret = usb_gadget_disconnect_locked(gadget);
+               ret = usb_gadget_disconnect(gadget);
                if (ret)
-                       goto unlock;
+                       goto out;
 
                /*
                 * If gadget was being connected before deactivation, we want
@@ -846,8 +807,6 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
        }
        gadget->deactivated = true;
 
-unlock:
-       mutex_unlock(&gadget->udc->connect_lock);
 out:
        trace_usb_gadget_deactivate(gadget, ret);
 
@@ -871,7 +830,6 @@ int usb_gadget_activate(struct usb_gadget *gadget)
        if (!gadget->deactivated)
                goto out;
 
-       mutex_lock(&gadget->udc->connect_lock);
        gadget->deactivated = false;
 
        /*
@@ -879,8 +837,7 @@ int usb_gadget_activate(struct usb_gadget *gadget)
         * while it was being deactivated, we call usb_gadget_connect().
         */
        if (gadget->connected)
-               ret = usb_gadget_connect_locked(gadget);
-       mutex_unlock(&gadget->udc->connect_lock);
+               ret = usb_gadget_connect(gadget);
 
 out:
        trace_usb_gadget_activate(gadget, ret);
@@ -1121,13 +1078,12 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);
 
 /* ------------------------------------------------------------------------- */
 
-/* Acquire connect_lock before calling this function. */
-static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
+static void usb_udc_connect_control(struct usb_udc *udc)
 {
-       if (udc->vbus && udc->started)
-               usb_gadget_connect_locked(udc->gadget);
+       if (udc->vbus)
+               usb_gadget_connect(udc->gadget);
        else
-               usb_gadget_disconnect_locked(udc->gadget);
+               usb_gadget_disconnect(udc->gadget);
 }
 
 /**
@@ -1143,12 +1099,10 @@ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
 {
        struct usb_udc *udc = gadget->udc;
 
-       mutex_lock(&udc->connect_lock);
        if (udc) {
                udc->vbus = status;
-               usb_udc_connect_control_locked(udc);
+               usb_udc_connect_control(udc);
        }
-       mutex_unlock(&udc->connect_lock);
 }
 EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
 
@@ -1170,7 +1124,7 @@ void usb_gadget_udc_reset(struct usb_gadget *gadget,
 EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
 
 /**
- * usb_gadget_udc_start_locked - tells usb device controller to start up
+ * usb_gadget_udc_start - tells usb device controller to start up
  * @udc: The UDC to be started
  *
  * This call is issued by the UDC Class driver when it's about
@@ -1181,11 +1135,8 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
  * necessary to have it powered on.
  *
  * Returns zero on success, else negative errno.
- *
- * Caller should acquire connect_lock before invoking this function.
  */
-static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
-       __must_hold(&udc->connect_lock)
+static inline int usb_gadget_udc_start(struct usb_udc *udc)
 {
        int ret;
 
@@ -1202,7 +1153,7 @@ static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
 }
 
 /**
- * usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore
+ * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
  * @udc: The UDC to be stopped
  *
  * This call is issued by the UDC Class driver after calling
@@ -1211,11 +1162,8 @@ static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
  * The details are implementation specific, but it can go as
  * far as powering off UDC completely and disable its data
  * line pullups.
- *
- * Caller should acquire connect lock before invoking this function.
  */
-static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc)
-       __must_hold(&udc->connect_lock)
+static inline void usb_gadget_udc_stop(struct usb_udc *udc)
 {
        if (!udc->started) {
                dev_err(&udc->dev, "UDC had already stopped\n");
@@ -1374,7 +1322,6 @@ int usb_add_gadget(struct usb_gadget *gadget)
 
        udc->gadget = gadget;
        gadget->udc = udc;
-       mutex_init(&udc->connect_lock);
 
        udc->started = false;
 
@@ -1576,15 +1523,11 @@ static int gadget_bind_driver(struct device *dev)
        if (ret)
                goto err_bind;
 
-       mutex_lock(&udc->connect_lock);
-       ret = usb_gadget_udc_start_locked(udc);
-       if (ret) {
-               mutex_unlock(&udc->connect_lock);
+       ret = usb_gadget_udc_start(udc);
+       if (ret)
                goto err_start;
-       }
        usb_gadget_enable_async_callbacks(udc);
-       usb_udc_connect_control_locked(udc);
-       mutex_unlock(&udc->connect_lock);
+       usb_udc_connect_control(udc);
 
        kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
        return 0;
@@ -1615,14 +1558,12 @@ static void gadget_unbind_driver(struct device *dev)
 
        kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
 
-       mutex_lock(&udc->connect_lock);
-       usb_gadget_disconnect_locked(gadget);
+       usb_gadget_disconnect(gadget);
        usb_gadget_disable_async_callbacks(udc);
        if (gadget->irq)
                synchronize_irq(gadget->irq);
        udc->driver->unbind(gadget);
-       usb_gadget_udc_stop_locked(udc);
-       mutex_unlock(&udc->connect_lock);
+       usb_gadget_udc_stop(udc);
 
        mutex_lock(&udc_lock);
        driver->is_bound = false;
@@ -1708,15 +1649,11 @@ static ssize_t soft_connect_store(struct device *dev,
        }
 
        if (sysfs_streq(buf, "connect")) {
-               mutex_lock(&udc->connect_lock);
-               usb_gadget_udc_start_locked(udc);
-               usb_gadget_connect_locked(udc->gadget);
-               mutex_unlock(&udc->connect_lock);
+               usb_gadget_udc_start(udc);
+               usb_gadget_connect(udc->gadget);
        } else if (sysfs_streq(buf, "disconnect")) {
-               mutex_lock(&udc->connect_lock);
-               usb_gadget_disconnect_locked(udc->gadget);
-               usb_gadget_udc_stop_locked(udc);
-               mutex_unlock(&udc->connect_lock);
+               usb_gadget_disconnect(udc->gadget);
+               usb_gadget_udc_stop(udc);
        } else {
                dev_err(dev, "unsupported command '%s'\n", buf);
                ret = -EINVAL;
index 3592f75..7bd2fdd 100644 (file)
@@ -119,11 +119,13 @@ static int uhci_pci_init(struct usb_hcd *hcd)
 
        uhci->rh_numports = uhci_count_ports(hcd);
 
-       /* Intel controllers report the OverCurrent bit active on.
-        * VIA controllers report it active off, so we'll adjust the
-        * bit value.  (It's not standardized in the UHCI spec.)
+       /*
+        * Intel controllers report the OverCurrent bit active on.  VIA
+        * and ZHAOXIN controllers report it active off, so we'll adjust
+        * the bit value.  (It's not standardized in the UHCI spec.)
         */
-       if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA)
+       if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA ||
+                       to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_ZHAOXIN)
                uhci->oc_low = 1;
 
        /* HP's server management chip requires a longer port reset delay. */
index ddb79f2..79b3691 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/acpi.h>
 #include <linux/reset.h>
+#include <linux/suspend.h>
 
 #include "xhci.h"
 #include "xhci-trace.h"
@@ -387,7 +388,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 
        if (pdev->vendor == PCI_VENDOR_ID_AMD &&
                pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI)
-               xhci->quirks |= XHCI_BROKEN_D3COLD;
+               xhci->quirks |= XHCI_BROKEN_D3COLD_S2I;
 
        if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
                xhci->quirks |= XHCI_LPM_SUPPORT;
@@ -801,9 +802,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
         * Systems with the TI redriver that loses port status change events
         * need to have the registers polled during D3, so avoid D3cold.
         */
-       if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD))
+       if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
                pci_d3cold_disable(pdev);
 
+#ifdef CONFIG_SUSPEND
+       /* d3cold is broken, but only when s2idle is used */
+       if (pm_suspend_target_state == PM_SUSPEND_TO_IDLE &&
+           xhci->quirks & (XHCI_BROKEN_D3COLD_S2I))
+               pci_d3cold_disable(pdev);
+#endif
+
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
                xhci_pme_quirk(hcd);
 
index 1ad12d5..2bc82b3 100644 (file)
@@ -276,6 +276,26 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
        trace_xhci_inc_enq(ring);
 }
 
+static int xhci_num_trbs_to(struct xhci_segment *start_seg, union xhci_trb *start,
+                           struct xhci_segment *end_seg, union xhci_trb *end,
+                           unsigned int num_segs)
+{
+       union xhci_trb *last_on_seg;
+       int num = 0;
+       int i = 0;
+
+       do {
+               if (start_seg == end_seg && end >= start)
+                       return num + (end - start);
+               last_on_seg = &start_seg->trbs[TRBS_PER_SEGMENT - 1];
+               num += last_on_seg - start;
+               start_seg = start_seg->next;
+               start = start_seg->trbs;
+       } while (i++ <= num_segs);
+
+       return -EINVAL;
+}
+
 /*
  * Check to see if there's room to enqueue num_trbs on the ring and make sure
  * enqueue pointer will not advance into dequeue segment. See rules above.
@@ -2140,6 +2160,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
                     u32 trb_comp_code)
 {
        struct xhci_ep_ctx *ep_ctx;
+       int trbs_freed;
 
        ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
 
@@ -2209,9 +2230,15 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
        }
 
        /* Update ring dequeue pointer */
+       trbs_freed = xhci_num_trbs_to(ep_ring->deq_seg, ep_ring->dequeue,
+                                     td->last_trb_seg, td->last_trb,
+                                     ep_ring->num_segs);
+       if (trbs_freed < 0)
+               xhci_dbg(xhci, "Failed to count freed trbs at TD finish\n");
+       else
+               ep_ring->num_trbs_free += trbs_freed;
        ep_ring->dequeue = td->last_trb;
        ep_ring->deq_seg = td->last_trb_seg;
-       ep_ring->num_trbs_free += td->num_trbs - 1;
        inc_deq(xhci, ep_ring);
 
        return xhci_td_cleanup(xhci, td, ep_ring, td->status);
index 08d7219..6b690ec 100644 (file)
@@ -1901,7 +1901,7 @@ struct xhci_hcd {
 #define XHCI_DISABLE_SPARSE    BIT_ULL(38)
 #define XHCI_SG_TRB_CACHE_SIZE_QUIRK   BIT_ULL(39)
 #define XHCI_NO_SOFT_RETRY     BIT_ULL(40)
-#define XHCI_BROKEN_D3COLD     BIT_ULL(41)
+#define XHCI_BROKEN_D3COLD_S2I BIT_ULL(41)
 #define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
 #define XHCI_SUSPEND_RESUME_CLKS       BIT_ULL(43)
 #define XHCI_RESET_TO_DEFAULT  BIT_ULL(44)
index 8931df5..c54e980 100644 (file)
@@ -406,22 +406,25 @@ static DEF_SCSI_QCMD(queuecommand)
  ***********************************************************************/
 
 /* Command timeout and abort */
-static int command_abort(struct scsi_cmnd *srb)
+static int command_abort_matching(struct us_data *us, struct scsi_cmnd *srb_match)
 {
-       struct us_data *us = host_to_us(srb->device->host);
-
-       usb_stor_dbg(us, "%s called\n", __func__);
-
        /*
         * us->srb together with the TIMED_OUT, RESETTING, and ABORTING
         * bits are protected by the host lock.
         */
        scsi_lock(us_to_host(us));
 
-       /* Is this command still active? */
-       if (us->srb != srb) {
+       /* is there any active pending command to abort ? */
+       if (!us->srb) {
                scsi_unlock(us_to_host(us));
                usb_stor_dbg(us, "-- nothing to abort\n");
+               return SUCCESS;
+       }
+
+       /* Does the command match the passed srb if any ? */
+       if (srb_match && us->srb != srb_match) {
+               scsi_unlock(us_to_host(us));
+               usb_stor_dbg(us, "-- pending command mismatch\n");
                return FAILED;
        }
 
@@ -444,6 +447,14 @@ static int command_abort(struct scsi_cmnd *srb)
        return SUCCESS;
 }
 
+static int command_abort(struct scsi_cmnd *srb)
+{
+       struct us_data *us = host_to_us(srb->device->host);
+
+       usb_stor_dbg(us, "%s called\n", __func__);
+       return command_abort_matching(us, srb);
+}
+
 /*
  * This invokes the transport reset mechanism to reset the state of the
  * device
@@ -455,6 +466,9 @@ static int device_reset(struct scsi_cmnd *srb)
 
        usb_stor_dbg(us, "%s called\n", __func__);
 
+       /* abort any pending command before reset */
+       command_abort_matching(us, NULL);
+
        /* lock the device pointers and do the reset */
        mutex_lock(&(us->dev_mutex));
        result = us->transport_reset(us);
index 8f3e884..66de880 100644 (file)
@@ -516,6 +516,10 @@ static ssize_t pin_assignment_show(struct device *dev,
 
        mutex_unlock(&dp->lock);
 
+       /* get_current_pin_assignments can return 0 when no matching pin assignments are found */
+       if (len == 0)
+               len++;
+
        buf[len - 1] = '\n';
        return len;
 }
index 8b075ca..438cc40 100644 (file)
@@ -886,6 +886,9 @@ static void tps6598x_remove(struct i2c_client *client)
 {
        struct tps6598x *tps = i2c_get_clientdata(client);
 
+       if (!client->irq)
+               cancel_delayed_work_sync(&tps->wq_poll);
+
        tps6598x_disconnect(tps, 0);
        typec_unregister_port(tps->port);
        usb_role_switch_put(tps->role_sw);
index 96e9157..0fdf5f4 100644 (file)
@@ -124,7 +124,7 @@ config FB_PROVIDE_GET_FB_UNMAPPED_AREA
        depends on FB
        help
          Allow generic frame-buffer to provide get_fb_unmapped_area
-         function.
+         function to provide shareable character device support on nommu.
 
 menuconfig FB_FOREIGN_ENDIAN
        bool "Framebuffer foreign endianness support"
index b02e4e6..cba2b11 100644 (file)
@@ -3498,11 +3498,6 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
        if (ret)
                goto atyfb_setup_generic_fail;
 #endif
-       if (!(aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_EXT_DISP_EN))
-               par->clk_wr_offset = (inb(R_GENMO) & 0x0CU) >> 2;
-       else
-               par->clk_wr_offset = aty_ld_8(CLOCK_CNTL, par) & 0x03U;
-
        /* according to ATI, we should use clock 3 for acelerated mode */
        par->clk_wr_offset = 3;
 
index e808dc8..28739f1 100644 (file)
@@ -1468,7 +1468,7 @@ __releases(&info->lock)
 }
 
 #if defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && !defined(CONFIG_MMU)
-unsigned long get_fb_unmapped_area(struct file *filp,
+static unsigned long get_fb_unmapped_area(struct file *filp,
                                   unsigned long addr, unsigned long len,
                                   unsigned long pgoff, unsigned long flags)
 {
index b4b3670..2082b5c 100644 (file)
@@ -14,6 +14,7 @@
 
 #include "i810_regs.h"
 #include "i810.h"
+#include "i810_main.h"
 
 struct mode_registers std_modes[] = {
        /* 640x480 @ 60Hz */
@@ -276,7 +277,7 @@ void i810fb_fill_var_timings(struct fb_var_screeninfo *var)
        var->upper_margin = total - (yres + var->lower_margin + var->vsync_len);
 }
 
-u32 i810_get_watermark(struct fb_var_screeninfo *var,
+u32 i810_get_watermark(const struct fb_var_screeninfo *var,
                       struct i810fb_par *par)
 {
        struct mode_registers *params = &par->regs;
index 1eaa35c..477789c 100644 (file)
@@ -491,7 +491,8 @@ static int tpo_td043_probe(struct spi_device *spi)
 
        ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
        if (IS_ERR(ddata->vcc_reg)) {
-               r = dev_err_probe(&spi->dev, r, "failed to get LCD VCC regulator\n");
+               r = dev_err_probe(&spi->dev, PTR_ERR(ddata->vcc_reg),
+                                 "failed to get LCD VCC regulator\n");
                goto err_regulator;
        }
 
index 14c9215..686a234 100644 (file)
@@ -741,7 +741,7 @@ ngleClearOverlayPlanes(struct stifb_info *fb, int mask, int data)
         packed_len = (fb->info.var.xres << 16) | fb->info.var.yres;
         NGLE_SET_DSTXY(fb, packed_dst);
 
-        /* Write zeroes to overlay planes */
+       /* Write zeroes to overlay planes */
        NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb,
                                       IBOvals(RopSrc, MaskAddrOffset(0),
                                               BitmapExtent08, StaticReg(0),
@@ -1297,14 +1297,14 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
                break;
        default:
 #ifdef FALLBACK_TO_1BPP
-               printk(KERN_WARNING
+               printk(KERN_WARNING
                        "stifb: Unsupported graphics card (id=0x%08x) "
                                "- now trying 1bpp mode instead\n",
                        fb->id);
                bpp = 1;        /* default to 1 bpp */
                break;
 #else
-               printk(KERN_WARNING
+               printk(KERN_WARNING
                        "stifb: Unsupported graphics card (id=0x%08x) "
                                "- skipping.\n",
                        fb->id);
index 216d49c..dabc30a 100644 (file)
@@ -27,6 +27,8 @@
 #include <video/udlfb.h>
 #include "edid.h"
 
+#define OUT_EP_NUM     1       /* The endpoint number we will use */
+
 static const struct fb_fix_screeninfo dlfb_fix = {
        .id =           "udlfb",
        .type =         FB_TYPE_PACKED_PIXELS,
@@ -1541,24 +1543,16 @@ static const struct device_attribute fb_device_attrs[] = {
 static int dlfb_select_std_channel(struct dlfb_data *dlfb)
 {
        int ret;
-       void *buf;
        static const u8 set_def_chn[] = {
                                0x57, 0xCD, 0xDC, 0xA7,
                                0x1C, 0x88, 0x5E, 0x15,
                                0x60, 0xFE, 0xC6, 0x97,
                                0x16, 0x3D, 0x47, 0xF2  };
 
-       buf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
-
-       if (!buf)
-               return -ENOMEM;
-
-       ret = usb_control_msg(dlfb->udev, usb_sndctrlpipe(dlfb->udev, 0),
-                       NR_USB_REQUEST_CHANNEL,
+       ret = usb_control_msg_send(dlfb->udev, 0, NR_USB_REQUEST_CHANNEL,
                        (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
-                       buf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
-
-       kfree(buf);
+                       &set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT,
+                       GFP_KERNEL);
 
        return ret;
 }
@@ -1652,7 +1646,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
        struct fb_info *info;
        int retval;
        struct usb_device *usbdev = interface_to_usbdev(intf);
-       struct usb_endpoint_descriptor *out;
+       static u8 out_ep[] = {OUT_EP_NUM + USB_DIR_OUT, 0};
 
        /* usb initialization */
        dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
@@ -1666,9 +1660,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
        dlfb->udev = usb_get_dev(usbdev);
        usb_set_intfdata(intf, dlfb);
 
-       retval = usb_find_common_endpoints(intf->cur_altsetting, NULL, &out, NULL, NULL);
-       if (retval) {
-               dev_err(&intf->dev, "Device should have at lease 1 bulk endpoint!\n");
+       if (!usb_check_bulk_endpoints(intf, out_ep)) {
+               dev_err(&intf->dev, "Invalid DisplayLink device!\n");
+               retval = -EINVAL;
                goto error;
        }
 
@@ -1927,7 +1921,8 @@ retry:
                }
 
                /* urb->transfer_buffer_length set to actual before submit */
-               usb_fill_bulk_urb(urb, dlfb->udev, usb_sndbulkpipe(dlfb->udev, 1),
+               usb_fill_bulk_urb(urb, dlfb->udev,
+                       usb_sndbulkpipe(dlfb->udev, OUT_EP_NUM),
                        buf, size, dlfb_urb_completion, unode);
                urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 
index 29cf002..4c0f22a 100644 (file)
@@ -3942,7 +3942,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
        struct dentry *dentry;
        struct ceph_cap *cap;
        char *path;
-       int pathlen = 0, err = 0;
+       int pathlen = 0, err;
        u64 pathbase;
        u64 snap_follows;
 
@@ -3965,6 +3965,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
        cap = __get_cap_for_mds(ci, mds);
        if (!cap) {
                spin_unlock(&ci->i_ceph_lock);
+               err = 0;
                goto out_err;
        }
        dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
index 8700720..0b236eb 100644 (file)
@@ -1111,6 +1111,19 @@ skip_inode:
                                continue;
                        adjust_snap_realm_parent(mdsc, child, realm->ino);
                }
+       } else {
+               /*
+                * In the non-split case both 'num_split_inos' and
+                * 'num_split_realms' should be 0, making this a no-op.
+                * However the MDS happens to populate 'split_realms' list
+                * in one of the UPDATE op cases by mistake.
+                *
+                * Skip both lists just in case to ensure that 'p' is
+                * positioned at the start of realm info, as expected by
+                * ceph_update_snap_trace().
+                */
+               p += sizeof(u64) * num_split_inos;
+               p += sizeof(u64) * num_split_realms;
        }
 
        /*
index 414685c..5f8fd20 100644 (file)
@@ -424,8 +424,8 @@ struct smb_version_operations {
        /* check for STATUS_NETWORK_SESSION_EXPIRED */
        bool (*is_session_expired)(char *);
        /* send oplock break response */
-       int (*oplock_response)(struct cifs_tcon *, struct cifs_fid *,
-                              struct cifsInodeInfo *);
+       int (*oplock_response)(struct cifs_tcon *tcon, __u64 persistent_fid, __u64 volatile_fid,
+                       __u16 net_fid, struct cifsInodeInfo *cifs_inode);
        /* query remote filesystem */
        int (*queryfs)(const unsigned int, struct cifs_tcon *,
                       struct cifs_sb_info *, struct kstatfs *);
index c5fcefd..ba7f2e0 100644 (file)
@@ -4881,9 +4881,9 @@ void cifs_oplock_break(struct work_struct *work)
        struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
        struct TCP_Server_Info *server = tcon->ses->server;
        int rc = 0;
-       bool purge_cache = false;
-       struct cifs_deferred_close *dclose;
-       bool is_deferred = false;
+       bool purge_cache = false, oplock_break_cancelled;
+       __u64 persistent_fid, volatile_fid;
+       __u16 net_fid;
 
        wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
                        TASK_UNINTERRUPTIBLE);
@@ -4924,28 +4924,28 @@ oplock_break_ack:
         * file handles but cached, then schedule deferred close immediately.
         * So, new open will not use cached handle.
         */
-       spin_lock(&CIFS_I(inode)->deferred_lock);
-       is_deferred = cifs_is_deferred_close(cfile, &dclose);
-       spin_unlock(&CIFS_I(inode)->deferred_lock);
 
-       if (!CIFS_CACHE_HANDLE(cinode) && is_deferred &&
-                       cfile->deferred_close_scheduled && delayed_work_pending(&cfile->deferred)) {
+       if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
                cifs_close_deferred_file(cinode);
-       }
 
+       persistent_fid = cfile->fid.persistent_fid;
+       volatile_fid = cfile->fid.volatile_fid;
+       net_fid = cfile->fid.netfid;
+       oplock_break_cancelled = cfile->oplock_break_cancelled;
+
+       _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
        /*
         * releasing stale oplock after recent reconnect of smb session using
         * a now incorrect file handle is not a data integrity issue but do
         * not bother sending an oplock release if session to server still is
         * disconnected since oplock already released by the server
         */
-       if (!cfile->oplock_break_cancelled) {
-               rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
-                                                            cinode);
+       if (!oplock_break_cancelled) {
+               rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+                               volatile_fid, net_fid, cinode);
                cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
        }
 
-       _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
        cifs_done_oplock_break(cinode);
 }
 
index abda614..7d1b3fc 100644 (file)
@@ -897,12 +897,11 @@ cifs_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
 }
 
 static int
-cifs_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
-                    struct cifsInodeInfo *cinode)
+cifs_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
+               __u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
 {
-       return CIFSSMBLock(0, tcon, fid->netfid, current->tgid, 0, 0, 0, 0,
-                          LOCKING_ANDX_OPLOCK_RELEASE, false,
-                          CIFS_CACHE_READ(cinode) ? 1 : 0);
+       return CIFSSMBLock(0, tcon, net_fid, current->tgid, 0, 0, 0, 0,
+                          LOCKING_ANDX_OPLOCK_RELEASE, false, CIFS_CACHE_READ(cinode) ? 1 : 0);
 }
 
 static int
index a295e4c..5065398 100644 (file)
@@ -2383,15 +2383,14 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
 }
 
 static int
-smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
-                    struct cifsInodeInfo *cinode)
+smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
+               __u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
 {
        if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
                return SMB2_lease_break(0, tcon, cinode->lease_key,
                                        smb2_get_lease_state(cinode));
 
-       return SMB2_oplock_break(0, tcon, fid->persistent_fid,
-                                fid->volatile_fid,
+       return SMB2_oplock_break(0, tcon, persistent_fid, volatile_fid,
                                 CIFS_CACHE_READ(cinode) ? 1 : 0);
 }
 
index 704fb59..f259d92 100644 (file)
@@ -121,6 +121,7 @@ config EROFS_FS_PCPU_KTHREAD
 config EROFS_FS_PCPU_KTHREAD_HIPRI
        bool "EROFS high priority per-CPU kthread workers"
        depends on EROFS_FS_ZIP && EROFS_FS_PCPU_KTHREAD
+       default y
        help
          This permits EROFS to configure per-CPU kthread workers to run
          at higher priority.
index 99bbc59..a3a98fc 100644 (file)
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
 obj-$(CONFIG_EROFS_FS) += erofs.o
-erofs-objs := super.o inode.o data.o namei.o dir.o utils.o pcpubuf.o sysfs.o
+erofs-objs := super.o inode.o data.o namei.o dir.o utils.o sysfs.o
 erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
-erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o
+erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o pcpubuf.o
 erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o
 erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o
index af0431a..1e39c03 100644 (file)
@@ -472,12 +472,6 @@ static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count)
        return NULL;
 }
 
-void *erofs_get_pcpubuf(unsigned int requiredpages);
-void erofs_put_pcpubuf(void *ptr);
-int erofs_pcpubuf_growsize(unsigned int nrpages);
-void __init erofs_pcpubuf_init(void);
-void erofs_pcpubuf_exit(void);
-
 int erofs_register_sysfs(struct super_block *sb);
 void erofs_unregister_sysfs(struct super_block *sb);
 int __init erofs_init_sysfs(void);
@@ -512,6 +506,11 @@ int z_erofs_load_lz4_config(struct super_block *sb,
                            struct z_erofs_lz4_cfgs *lz4, int len);
 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
                            int flags);
+void *erofs_get_pcpubuf(unsigned int requiredpages);
+void erofs_put_pcpubuf(void *ptr);
+int erofs_pcpubuf_growsize(unsigned int nrpages);
+void __init erofs_pcpubuf_init(void);
+void erofs_pcpubuf_exit(void);
 #else
 static inline void erofs_shrinker_register(struct super_block *sb) {}
 static inline void erofs_shrinker_unregister(struct super_block *sb) {}
@@ -529,6 +528,8 @@ static inline int z_erofs_load_lz4_config(struct super_block *sb,
        }
        return 0;
 }
+static inline void erofs_pcpubuf_init(void) {}
+static inline void erofs_pcpubuf_exit(void) {}
 #endif /* !CONFIG_EROFS_FS_ZIP */
 
 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
index cd80499..bbfe7ce 100644 (file)
@@ -675,7 +675,7 @@ int erofs_xattr_prefixes_init(struct super_block *sb)
        if (!pfs)
                return -ENOMEM;
 
-       if (erofs_sb_has_fragments(sbi))
+       if (sbi->packed_inode)
                buf.inode = sbi->packed_inode;
        else
                erofs_init_metabuf(&buf, sb);
index 45f21db..160b3da 100644 (file)
@@ -369,8 +369,6 @@ static struct kthread_worker *erofs_init_percpu_worker(int cpu)
                return worker;
        if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI))
                sched_set_fifo_low(worker->task);
-       else
-               sched_set_normal(worker->task, 0);
        return worker;
 }
 
index 4ed379f..4882a81 100644 (file)
@@ -351,7 +351,8 @@ int ksmbd_conn_handler_loop(void *p)
                        break;
 
                /* 4 for rfc1002 length field */
-               size = pdu_size + 4;
+               /* 1 for implied bcc[0] */
+               size = pdu_size + 4 + 1;
                conn->request_buf = kvmalloc(size, GFP_KERNEL);
                if (!conn->request_buf)
                        break;
index 2e54ded..6d1ccb9 100644 (file)
@@ -1449,11 +1449,12 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
  * smb2_find_context_vals() - find a particular context info in open request
  * @open_req:  buffer containing smb2 file open(create) request
  * @tag:       context name to search for
+ * @tag_len:   the length of tag
  *
  * Return:     pointer to requested context, NULL if @str context not found
  *             or error pointer if name length is invalid.
  */
-struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
+struct create_context *smb2_find_context_vals(void *open_req, const char *tag, int tag_len)
 {
        struct create_context *cc;
        unsigned int next = 0;
@@ -1492,7 +1493,7 @@ struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
                        return ERR_PTR(-EINVAL);
 
                name = (char *)cc + name_off;
-               if (memcmp(name, tag, name_len) == 0)
+               if (name_len == tag_len && !memcmp(name, tag, name_len))
                        return cc;
 
                remain_len -= next;
index 0975344..4b0fe6d 100644 (file)
@@ -118,7 +118,7 @@ void create_durable_v2_rsp_buf(char *cc, struct ksmbd_file *fp);
 void create_mxac_rsp_buf(char *cc, int maximal_access);
 void create_disk_id_rsp_buf(char *cc, __u64 file_id, __u64 vol_id);
 void create_posix_rsp_buf(char *cc, struct ksmbd_file *fp);
-struct create_context *smb2_find_context_vals(void *open_req, const char *str);
+struct create_context *smb2_find_context_vals(void *open_req, const char *tag, int tag_len);
 struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
                                          char *lease_key);
 int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
index fbdde42..0ffe663 100644 (file)
@@ -416,8 +416,11 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
 
                /*
                 * Allow a message that padded to 8byte boundary.
+                * Linux 4.19.217 with smb 3.0.2 are sometimes
+                * sending messages where the cls_len is exactly
+                * 8 bytes less than len.
                 */
-               if (clc_len < len && (len - clc_len) < 8)
+               if (clc_len < len && (len - clc_len) <= 8)
                        goto validate_credit;
 
                pr_err_ratelimited(
index cb93fd2..717bcd2 100644 (file)
@@ -1356,7 +1356,7 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
        struct authenticate_message *authblob;
        struct ksmbd_user *user;
        char *name;
-       unsigned int auth_msg_len, name_off, name_len, secbuf_len;
+       unsigned int name_off, name_len, secbuf_len;
 
        secbuf_len = le16_to_cpu(req->SecurityBufferLength);
        if (secbuf_len < sizeof(struct authenticate_message)) {
@@ -1366,9 +1366,8 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
        authblob = user_authblob(conn, req);
        name_off = le32_to_cpu(authblob->UserName.BufferOffset);
        name_len = le16_to_cpu(authblob->UserName.Length);
-       auth_msg_len = le16_to_cpu(req->SecurityBufferOffset) + secbuf_len;
 
-       if (auth_msg_len < (u64)name_off + name_len)
+       if (secbuf_len < (u64)name_off + name_len)
                return NULL;
 
        name = smb_strndup_from_utf16((const char *)authblob + name_off,
@@ -2464,7 +2463,7 @@ static int smb2_create_sd_buffer(struct ksmbd_work *work,
                return -ENOENT;
 
        /* Parse SD BUFFER create contexts */
-       context = smb2_find_context_vals(req, SMB2_CREATE_SD_BUFFER);
+       context = smb2_find_context_vals(req, SMB2_CREATE_SD_BUFFER, 4);
        if (!context)
                return -ENOENT;
        else if (IS_ERR(context))
@@ -2666,7 +2665,7 @@ int smb2_open(struct ksmbd_work *work)
 
        if (req->CreateContextsOffset) {
                /* Parse non-durable handle create contexts */
-               context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER);
+               context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER, 4);
                if (IS_ERR(context)) {
                        rc = PTR_ERR(context);
                        goto err_out1;
@@ -2686,7 +2685,7 @@ int smb2_open(struct ksmbd_work *work)
                }
 
                context = smb2_find_context_vals(req,
-                                                SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST);
+                                                SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST, 4);
                if (IS_ERR(context)) {
                        rc = PTR_ERR(context);
                        goto err_out1;
@@ -2697,7 +2696,7 @@ int smb2_open(struct ksmbd_work *work)
                }
 
                context = smb2_find_context_vals(req,
-                                                SMB2_CREATE_TIMEWARP_REQUEST);
+                                                SMB2_CREATE_TIMEWARP_REQUEST, 4);
                if (IS_ERR(context)) {
                        rc = PTR_ERR(context);
                        goto err_out1;
@@ -2709,7 +2708,7 @@ int smb2_open(struct ksmbd_work *work)
 
                if (tcon->posix_extensions) {
                        context = smb2_find_context_vals(req,
-                                                        SMB2_CREATE_TAG_POSIX);
+                                                        SMB2_CREATE_TAG_POSIX, 16);
                        if (IS_ERR(context)) {
                                rc = PTR_ERR(context);
                                goto err_out1;
@@ -3107,7 +3106,7 @@ int smb2_open(struct ksmbd_work *work)
                struct create_alloc_size_req *az_req;
 
                az_req = (struct create_alloc_size_req *)smb2_find_context_vals(req,
-                                       SMB2_CREATE_ALLOCATION_SIZE);
+                                       SMB2_CREATE_ALLOCATION_SIZE, 4);
                if (IS_ERR(az_req)) {
                        rc = PTR_ERR(az_req);
                        goto err_out;
@@ -3134,7 +3133,7 @@ int smb2_open(struct ksmbd_work *work)
                                            err);
                }
 
-               context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID);
+               context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID, 4);
                if (IS_ERR(context)) {
                        rc = PTR_ERR(context);
                        goto err_out;
index bb94949..04ba95b 100644 (file)
@@ -77,9 +77,9 @@ static const unsigned long    nlm_grace_period_min = 0;
 static const unsigned long     nlm_grace_period_max = 240;
 static const unsigned long     nlm_timeout_min = 3;
 static const unsigned long     nlm_timeout_max = 20;
-static const int               nlm_port_min = 0, nlm_port_max = 65535;
 
 #ifdef CONFIG_SYSCTL
+static const int               nlm_port_min = 0, nlm_port_max = 65535;
 static struct ctl_table_header * nlm_sysctl_table;
 #endif
 
index e63c1d4..8f3112e 100644 (file)
@@ -317,7 +317,7 @@ static int nfs_readdir_folio_array_append(struct folio *folio,
 
        name = nfs_readdir_copy_name(entry->name, entry->len);
 
-       array = kmap_atomic(folio_page(folio, 0));
+       array = kmap_local_folio(folio, 0);
        if (!name)
                goto out;
        ret = nfs_readdir_array_can_expand(array);
@@ -340,7 +340,7 @@ static int nfs_readdir_folio_array_append(struct folio *folio,
                nfs_readdir_array_set_eof(array);
 out:
        *cookie = array->last_cookie;
-       kunmap_atomic(array);
+       kunmap_local(array);
        return ret;
 }
 
index 18f25ff..d366539 100644 (file)
@@ -5437,10 +5437,18 @@ static bool nfs4_read_plus_not_supported(struct rpc_task *task,
        return false;
 }
 
-static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
+static inline void nfs4_read_plus_scratch_free(struct nfs_pgio_header *hdr)
 {
-       if (hdr->res.scratch)
+       if (hdr->res.scratch) {
                kfree(hdr->res.scratch);
+               hdr->res.scratch = NULL;
+       }
+}
+
+static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
+{
+       nfs4_read_plus_scratch_free(hdr);
+
        if (!nfs4_sequence_done(task, &hdr->res.seq_res))
                return -EAGAIN;
        if (nfs4_read_stateid_changed(task, &hdr->args))
index 7b8f17e..c159817 100644 (file)
@@ -153,18 +153,6 @@ static int exports_net_open(struct net *net, struct file *file)
        return 0;
 }
 
-static int exports_proc_open(struct inode *inode, struct file *file)
-{
-       return exports_net_open(current->nsproxy->net_ns, file);
-}
-
-static const struct proc_ops exports_proc_ops = {
-       .proc_open      = exports_proc_open,
-       .proc_read      = seq_read,
-       .proc_lseek     = seq_lseek,
-       .proc_release   = seq_release,
-};
-
 static int exports_nfsd_open(struct inode *inode, struct file *file)
 {
        return exports_net_open(inode->i_sb->s_fs_info, file);
@@ -1458,6 +1446,19 @@ static struct file_system_type nfsd_fs_type = {
 MODULE_ALIAS_FS("nfsd");
 
 #ifdef CONFIG_PROC_FS
+
+static int exports_proc_open(struct inode *inode, struct file *file)
+{
+       return exports_net_open(current->nsproxy->net_ns, file);
+}
+
+static const struct proc_ops exports_proc_ops = {
+       .proc_open      = exports_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
+};
+
 static int create_proc_exports_entry(void)
 {
        struct proc_dir_entry *entry;
index 4183819..72a906a 100644 (file)
@@ -1365,19 +1365,19 @@ TRACE_EVENT(nfsd_cb_setup,
                __field(u32, cl_id)
                __field(unsigned long, authflavor)
                __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
-               __array(unsigned char, netid, 8)
+               __string(netid, netid)
        ),
        TP_fast_assign(
                __entry->cl_boot = clp->cl_clientid.cl_boot;
                __entry->cl_id = clp->cl_clientid.cl_id;
-               strlcpy(__entry->netid, netid, sizeof(__entry->netid));
+               __assign_str(netid, netid);
                __entry->authflavor = authflavor;
                __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
                                  clp->cl_cb_conn.cb_addrlen)
        ),
        TP_printk("addr=%pISpc client %08x:%08x proto=%s flavor=%s",
                __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
-               __entry->netid, show_nfsd_authflavor(__entry->authflavor))
+               __get_str(netid), show_nfsd_authflavor(__entry->authflavor))
 );
 
 TRACE_EVENT(nfsd_cb_setup_err,
index 1310d2d..a8ce522 100644 (file)
@@ -917,6 +917,7 @@ void nilfs_evict_inode(struct inode *inode)
        struct nilfs_transaction_info ti;
        struct super_block *sb = inode->i_sb;
        struct nilfs_inode_info *ii = NILFS_I(inode);
+       struct the_nilfs *nilfs;
        int ret;
 
        if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
@@ -929,6 +930,23 @@ void nilfs_evict_inode(struct inode *inode)
 
        truncate_inode_pages_final(&inode->i_data);
 
+       nilfs = sb->s_fs_info;
+       if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) {
+               /*
+                * If this inode is about to be disposed after the file system
+                * has been degraded to read-only due to file system corruption
+                * or after the writer has been detached, do not make any
+                * changes that cause writes, just clear it.
+                * Do this check after read-locking ns_segctor_sem by
+                * nilfs_transaction_begin() in order to avoid a race with
+                * the writer detach operation.
+                */
+               clear_inode(inode);
+               nilfs_clear_inode(inode);
+               nilfs_transaction_abort(sb);
+               return;
+       }
+
        /* TODO: some of the following operations may fail.  */
        nilfs_truncate_bmap(ii, 0);
        nilfs_mark_inode_dirty(inode);
index 0ba34c1..96d1c3e 100644 (file)
@@ -130,6 +130,7 @@ static int do_statfs_native(struct kstatfs *st, struct statfs __user *p)
        if (sizeof(buf) == sizeof(*st))
                memcpy(&buf, st, sizeof(*st));
        else {
+               memset(&buf, 0, sizeof(buf));
                if (sizeof buf.f_blocks == 4) {
                        if ((st->f_blocks | st->f_bfree | st->f_bavail |
                             st->f_bsize | st->f_frsize) &
@@ -158,7 +159,6 @@ static int do_statfs_native(struct kstatfs *st, struct statfs __user *p)
                buf.f_namelen = st->f_namelen;
                buf.f_frsize = st->f_frsize;
                buf.f_flags = st->f_flags;
-               memset(buf.f_spare, 0, sizeof(buf.f_spare));
        }
        if (copy_to_user(p, &buf, sizeof(buf)))
                return -EFAULT;
@@ -171,6 +171,7 @@ static int do_statfs64(struct kstatfs *st, struct statfs64 __user *p)
        if (sizeof(buf) == sizeof(*st))
                memcpy(&buf, st, sizeof(*st));
        else {
+               memset(&buf, 0, sizeof(buf));
                buf.f_type = st->f_type;
                buf.f_bsize = st->f_bsize;
                buf.f_blocks = st->f_blocks;
@@ -182,7 +183,6 @@ static int do_statfs64(struct kstatfs *st, struct statfs64 __user *p)
                buf.f_namelen = st->f_namelen;
                buf.f_frsize = st->f_frsize;
                buf.f_flags = st->f_flags;
-               memset(buf.f_spare, 0, sizeof(buf.f_spare));
        }
        if (copy_to_user(p, &buf, sizeof(buf)))
                return -EFAULT;
index b441e63..c0ffe20 100644 (file)
@@ -1376,8 +1376,6 @@ enum blk_unique_id {
        BLK_UID_NAA     = 3,
 };
 
-#define NFL4_UFLG_MASK                 0x0000003F
-
 struct block_device_operations {
        void (*submit_bio)(struct bio *bio);
        int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
index 947a60b..d7779a1 100644 (file)
  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
  * to disable branch tracing on a per file basis.
  */
-#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
-    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
                          int expect, int is_constant);
-
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
+    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
 #define likely_notrace(x)      __builtin_expect(!!(x), 1)
 #define unlikely_notrace(x)    __builtin_expect(!!(x), 0)
 
index 9deeaeb..abf3d3b 100644 (file)
@@ -74,6 +74,7 @@ struct class {
 struct class_dev_iter {
        struct klist_iter               ki;
        const struct device_type        *type;
+       struct subsys_private           *sp;
 };
 
 int __must_check class_register(const struct class *class);
index fc985e5..8de6b6e 100644 (file)
@@ -208,6 +208,7 @@ struct team {
        bool queue_override_enabled;
        struct list_head *qom_lists; /* array of queue override mapping lists */
        bool port_mtu_change_allowed;
+       bool notifier_ctx;
        struct {
                unsigned int count;
                unsigned int interval; /* in ms */
index dc5e2cb..b89778d 100644 (file)
@@ -1705,7 +1705,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         rc[0x1];
 
        u8         uar_4k[0x1];
-       u8         reserved_at_241[0x9];
+       u8         reserved_at_241[0x7];
+       u8         fl_rc_qp_when_roce_disabled[0x1];
+       u8         regexp_params[0x1];
        u8         uar_sz[0x6];
        u8         port_selection_cap[0x1];
        u8         reserved_at_248[0x1];
index c5a0dc8..6478838 100644 (file)
@@ -1900,10 +1900,8 @@ void phy_package_leave(struct phy_device *phydev);
 int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
                          int addr, size_t priv_size);
 
-#if IS_ENABLED(CONFIG_PHYLIB)
 int __init mdio_bus_init(void);
 void mdio_bus_exit(void);
-#endif
 
 int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
 int phy_ethtool_get_sset_count(struct phy_device *phydev);
index a1aa681..7c8d654 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef __LINUX_BQ27X00_BATTERY_H__
 #define __LINUX_BQ27X00_BATTERY_H__
 
+#include <linux/power_supply.h>
+
 enum bq27xxx_chip {
        BQ27000 = 1, /* bq27000, bq27200 */
        BQ27010, /* bq27010, bq27210 */
@@ -68,7 +70,9 @@ struct bq27xxx_device_info {
        struct bq27xxx_access_methods bus;
        struct bq27xxx_reg_cache cache;
        int charge_design_full;
+       bool removed;
        unsigned long last_update;
+       union power_supply_propval last_status;
        struct delayed_work work;
        struct power_supply *bat;
        struct list_head list;
index 7bde8e1..224293b 100644 (file)
@@ -107,7 +107,10 @@ extern void synchronize_shrinkers(void);
 
 #ifdef CONFIG_SHRINKER_DEBUG
 extern int shrinker_debugfs_add(struct shrinker *shrinker);
-extern struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker);
+extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
+                                             int *debugfs_id);
+extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
+                                   int debugfs_id);
 extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
                                                  const char *fmt, ...);
 #else /* CONFIG_SHRINKER_DEBUG */
@@ -115,10 +118,16 @@ static inline int shrinker_debugfs_add(struct shrinker *shrinker)
 {
        return 0;
 }
-static inline struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
+static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
+                                                    int *debugfs_id)
 {
+       *debugfs_id = -1;
        return NULL;
 }
+static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
+                                          int debugfs_id)
+{
+}
 static inline __printf(2, 3)
 int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
 {
index 738776a..0b40417 100644 (file)
@@ -1587,6 +1587,16 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
        to->l4_hash = from->l4_hash;
 };
 
+static inline int skb_cmp_decrypted(const struct sk_buff *skb1,
+                                   const struct sk_buff *skb2)
+{
+#ifdef CONFIG_TLS_DEVICE
+       return skb2->decrypted - skb1->decrypted;
+#else
+       return 0;
+#endif
+}
+
 static inline void skb_copy_decrypted(struct sk_buff *to,
                                      const struct sk_buff *from)
 {
index 84f7874..054d791 100644 (file)
@@ -71,7 +71,6 @@ struct sk_psock_link {
 };
 
 struct sk_psock_work_state {
-       struct sk_buff                  *skb;
        u32                             len;
        u32                             off;
 };
@@ -105,7 +104,7 @@ struct sk_psock {
        struct proto                    *sk_proto;
        struct mutex                    work_mutex;
        struct sk_psock_work_state      work_state;
-       struct work_struct              work;
+       struct delayed_work             work;
        struct rcu_work                 rwork;
 };
 
index 24aa159..fbc4bd4 100644 (file)
@@ -176,7 +176,7 @@ extern struct svc_rdma_recv_ctxt *
 extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
                                   struct svc_rdma_recv_ctxt *ctxt);
 extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
-extern void svc_rdma_release_rqst(struct svc_rqst *rqstp);
+extern void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *ctxt);
 extern int svc_rdma_recvfrom(struct svc_rqst *);
 
 /* svc_rdma_rw.c */
index 8674792..a6b1263 100644 (file)
@@ -23,7 +23,7 @@ struct svc_xprt_ops {
        int             (*xpo_sendto)(struct svc_rqst *);
        int             (*xpo_result_payload)(struct svc_rqst *, unsigned int,
                                              unsigned int);
-       void            (*xpo_release_rqst)(struct svc_rqst *);
+       void            (*xpo_release_ctxt)(struct svc_xprt *xprt, void *ctxt);
        void            (*xpo_detach)(struct svc_xprt *);
        void            (*xpo_free)(struct svc_xprt *);
        void            (*xpo_kill_temp_xprt)(struct svc_xprt *);
index 7769338..6a1e8f1 100644 (file)
@@ -282,6 +282,7 @@ enum tpm_chip_flags {
        TPM_CHIP_FLAG_ALWAYS_POWERED            = BIT(5),
        TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED    = BIT(6),
        TPM_CHIP_FLAG_FIRMWARE_UPGRADE          = BIT(7),
+       TPM_CHIP_FLAG_SUSPENDED                 = BIT(8),
 };
 
 #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
index a2448e9..07531c4 100644 (file)
@@ -443,7 +443,7 @@ static inline struct usb_composite_driver *to_cdriver(
  * @bcd_webusb_version: 0x0100 by default, WebUSB specification version
  * @b_webusb_vendor_code: 0x0 by default, vendor code for WebUSB
  * @landing_page: empty by default, landing page to announce in WebUSB
- * @use_webusb:: false by default, interested gadgets set it
+ * @use_webusb: false by default, interested gadgets set it
  * @os_desc_config: the configuration to be used with OS descriptors
  * @setup_pending: true when setup request is queued but not completed
  * @os_desc_pending: true when os_desc request is queued but not completed
index e7c4487..367d538 100644 (file)
@@ -686,7 +686,10 @@ struct dtv_frontend_properties {
  * @id:                        Frontend ID
  * @exit:              Used to inform the DVB core that the frontend
  *                     thread should exit (usually, means that the hardware
- *                     got disconnected.
+ *                     got disconnected).
+ * @remove_mutex:      mutex that avoids a race condition between a callback
+ *                     called when the hardware is disconnected and the
+ *                     file_operations of dvb_frontend.
  */
 
 struct dvb_frontend {
@@ -704,6 +707,7 @@ struct dvb_frontend {
        int (*callback)(void *adapter_priv, int component, int cmd, int arg);
        int id;
        unsigned int exit;
+       struct mutex remove_mutex;
 };
 
 /**
index 9980b1d..4a921ea 100644 (file)
@@ -39,6 +39,9 @@ struct net_device;
  * @exit:              flag to indicate when the device is being removed.
  * @demux:             pointer to &struct dmx_demux.
  * @ioctl_mutex:       protect access to this struct.
+ * @remove_mutex:      mutex that avoids a race condition between a callback
+ *                     called when the hardware is disconnected and the
+ *                     file_operations of dvb_net.
  *
  * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network
  * devices.
@@ -51,6 +54,7 @@ struct dvb_net {
        unsigned int exit:1;
        struct dmx_demux *demux;
        struct mutex ioctl_mutex;
+       struct mutex remove_mutex;
 };
 
 /**
index 29d25c8..8958e5e 100644 (file)
@@ -194,6 +194,21 @@ struct dvb_device {
 };
 
 /**
+ * struct dvbdevfops_node - fops nodes registered in dvbdevfops_list
+ *
+ * @fops:              Dynamically allocated fops for ->owner registration
+ * @type:              type of dvb_device
+ * @template:          dvb_device used for registration
+ * @list_head:         list_head for dvbdevfops_list
+ */
+struct dvbdevfops_node {
+       struct file_operations *fops;
+       enum dvb_device_type type;
+       const struct dvb_device *template;
+       struct list_head list_head;
+};
+
+/**
  * dvb_device_get - Increase dvb_device reference
  *
  * @dvbdev:    pointer to struct dvb_device
index a6c8aee..8baf346 100644 (file)
@@ -1327,7 +1327,7 @@ int hci_le_create_cis(struct hci_conn *conn);
 
 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
                              u8 role);
-int hci_conn_del(struct hci_conn *conn);
+void hci_conn_del(struct hci_conn *conn);
 void hci_conn_hash_flush(struct hci_dev *hdev);
 void hci_conn_check_pending(struct hci_dev *hdev);
 
index 0efef2a..59955ac 100644 (file)
@@ -221,6 +221,7 @@ struct bonding {
        struct   bond_up_slave __rcu *usable_slaves;
        struct   bond_up_slave __rcu *all_slaves;
        bool     force_primary;
+       bool     notifier_ctx;
        s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
        int     (*recv_probe)(const struct sk_buff *, struct bonding *,
                              struct slave *);
index 3352b1a..2e26e43 100644 (file)
@@ -24,6 +24,7 @@ struct tls_handshake_args {
        struct socket           *ta_sock;
        tls_done_func_t         ta_done;
        void                    *ta_data;
+       const char              *ta_peername;
        unsigned int            ta_timeout_ms;
        key_serial_t            ta_keyring;
        key_serial_t            ta_my_cert;
index c3fffaa..acec504 100644 (file)
@@ -76,6 +76,7 @@ struct ipcm_cookie {
        __be32                  addr;
        int                     oif;
        struct ip_options_rcu   *opt;
+       __u8                    protocol;
        __u8                    ttl;
        __s16                   tos;
        char                    priority;
@@ -96,6 +97,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
        ipcm->sockc.tsflags = inet->sk.sk_tsflags;
        ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
        ipcm->addr = inet->inet_saddr;
+       ipcm->protocol = inet->inet_num;
 }
 
 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
index 9fa291a..2b12725 100644 (file)
@@ -497,29 +497,6 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
        return NULL;
 }
 
-/* Variant of nexthop_fib6_nh().
- * Caller should either hold rcu_read_lock(), or RTNL.
- */
-static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh)
-{
-       struct nh_info *nhi;
-
-       if (nh->is_group) {
-               struct nh_group *nh_grp;
-
-               nh_grp = rcu_dereference_rtnl(nh->nh_grp);
-               nh = nexthop_mpath_select(nh_grp, 0);
-               if (!nh)
-                       return NULL;
-       }
-
-       nhi = rcu_dereference_rtnl(nh->nh_info);
-       if (nhi->family == AF_INET6)
-               return &nhi->fib6_nh;
-
-       return NULL;
-}
-
 static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
 {
        struct fib6_nh *fib6_nh;
index c8ec2f3..126f9e2 100644 (file)
@@ -399,22 +399,4 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
                page_pool_update_nid(pool, new_nid);
 }
 
-static inline void page_pool_ring_lock(struct page_pool *pool)
-       __acquires(&pool->ring.producer_lock)
-{
-       if (in_softirq())
-               spin_lock(&pool->ring.producer_lock);
-       else
-               spin_lock_bh(&pool->ring.producer_lock);
-}
-
-static inline void page_pool_ring_unlock(struct page_pool *pool)
-       __releases(&pool->ring.producer_lock)
-{
-       if (in_softirq())
-               spin_unlock(&pool->ring.producer_lock);
-       else
-               spin_unlock_bh(&pool->ring.producer_lock);
-}
-
 #endif /* _NET_PAGE_POOL_H */
index 04a3164..18a038d 100644 (file)
@@ -1470,6 +1470,8 @@ static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
 }
 
 void tcp_cleanup_rbuf(struct sock *sk, int copied);
+void __tcp_cleanup_rbuf(struct sock *sk, int copied);
+
 
 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
  * If 87.5 % (7/8) of the space has been consumed, we want to override
@@ -2326,6 +2328,14 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
 #endif /* CONFIG_BPF_SYSCALL */
 
+#ifdef CONFIG_INET
+void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
+#else
+static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
+{
+}
+#endif
+
 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
                          struct sk_msg *msg, u32 bytes, int flags);
 #endif /* CONFIG_NET_SOCK_MSG */
index 6056ce5..596595c 100644 (file)
@@ -126,6 +126,7 @@ struct tls_strparser {
        u32 mark : 8;
        u32 stopped : 1;
        u32 copy_mode : 1;
+       u32 mixed_decrypted : 1;
        u32 msg_ready : 1;
 
        struct strp_msg stm;
index dbc47af..4f44f0b 100644 (file)
@@ -44,6 +44,9 @@ int hdac_bus_eml_sdw_power_down_unlocked(struct hdac_bus *bus, int sublink);
 
 int hdac_bus_eml_sdw_set_lsdiid(struct hdac_bus *bus, int sublink, int dev_num);
 
+int hdac_bus_eml_sdw_map_stream_ch(struct hdac_bus *bus, int sublink, int y,
+                                  int channel_mask, int stream_id, int dir);
+
 void hda_bus_ml_put_all(struct hdac_bus *bus);
 void hda_bus_ml_reset_losidv(struct hdac_bus *bus);
 int hda_bus_ml_resume(struct hdac_bus *bus);
@@ -51,6 +54,7 @@ int hda_bus_ml_suspend(struct hdac_bus *bus);
 
 struct hdac_ext_link *hdac_bus_eml_ssp_get_hlink(struct hdac_bus *bus);
 struct hdac_ext_link *hdac_bus_eml_dmic_get_hlink(struct hdac_bus *bus);
+struct hdac_ext_link *hdac_bus_eml_sdw_get_hlink(struct hdac_bus *bus);
 
 struct mutex *hdac_bus_eml_get_mutex(struct hdac_bus *bus, bool alt, int elid);
 
@@ -144,6 +148,13 @@ hdac_bus_eml_sdw_power_down_unlocked(struct hdac_bus *bus, int sublink) { return
 static inline int
 hdac_bus_eml_sdw_set_lsdiid(struct hdac_bus *bus, int sublink, int dev_num) { return 0; }
 
+static inline int
+hdac_bus_eml_sdw_map_stream_ch(struct hdac_bus *bus, int sublink, int y,
+                              int channel_mask, int stream_id, int dir)
+{
+       return 0;
+}
+
 static inline void hda_bus_ml_put_all(struct hdac_bus *bus) { }
 static inline void hda_bus_ml_reset_losidv(struct hdac_bus *bus) { }
 static inline int hda_bus_ml_resume(struct hdac_bus *bus) { return 0; }
@@ -155,6 +166,9 @@ hdac_bus_eml_ssp_get_hlink(struct hdac_bus *bus) { return NULL; }
 static inline struct hdac_ext_link *
 hdac_bus_eml_dmic_get_hlink(struct hdac_bus *bus) { return NULL; }
 
+static inline struct hdac_ext_link *
+hdac_bus_eml_sdw_get_hlink(struct hdac_bus *bus) { return NULL; }
+
 static inline struct mutex *
 hdac_bus_eml_get_mutex(struct hdac_bus *bus, bool alt, int elid) { return NULL; }
 
index b38fd25..5282790 100644 (file)
@@ -170,6 +170,7 @@ struct snd_soc_acpi_link_adr {
 /* Descriptor for SST ASoC machine driver */
 struct snd_soc_acpi_mach {
        u8 id[ACPI_ID_LEN];
+       const char *uid;
        const struct snd_soc_acpi_codecs *comp_ids;
        const u32 link_mask;
        const struct snd_soc_acpi_link_adr *links;
index 4d6ac76..ebd2475 100644 (file)
@@ -122,6 +122,10 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
 int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
                struct snd_soc_pcm_runtime *be, int stream);
 
+/* can this BE perform prepare */
+int snd_soc_dpcm_can_be_prepared(struct snd_soc_pcm_runtime *fe,
+                                struct snd_soc_pcm_runtime *be, int stream);
+
 /* is the current PCM operation for this FE ? */
 int snd_soc_dpcm_fe_can_update(struct snd_soc_pcm_runtime *fe, int stream);
 
index 1de4d0b..3d7ea58 100644 (file)
@@ -44,6 +44,7 @@ enum {
        HANDSHAKE_A_ACCEPT_AUTH_MODE,
        HANDSHAKE_A_ACCEPT_PEER_IDENTITY,
        HANDSHAKE_A_ACCEPT_CERTIFICATE,
+       HANDSHAKE_A_ACCEPT_PEERNAME,
 
        __HANDSHAKE_A_ACCEPT_MAX,
        HANDSHAKE_A_ACCEPT_MAX = (__HANDSHAKE_A_ACCEPT_MAX - 1)
index 4b7f2df..e682ab6 100644 (file)
@@ -163,6 +163,7 @@ struct in_addr {
 #define IP_MULTICAST_ALL               49
 #define IP_UNICAST_IF                  50
 #define IP_LOCAL_PORT_RANGE            51
+#define IP_PROTOCOL                    52
 
 #define MCAST_EXCLUDE  0
 #define MCAST_INCLUDE  1
index f29899b..4bf9c4f 100644 (file)
@@ -66,7 +66,8 @@ enum skl_ch_cfg {
        SKL_CH_CFG_DUAL_MONO = 9,
        SKL_CH_CFG_I2S_DUAL_STEREO_0 = 10,
        SKL_CH_CFG_I2S_DUAL_STEREO_1 = 11,
-       SKL_CH_CFG_4_CHANNEL = 12,
+       SKL_CH_CFG_7_1 = 12,
+       SKL_CH_CFG_4_CHANNEL = SKL_CH_CFG_7_1,
        SKL_CH_CFG_INVALID
 };
 
index bbc3787..e9ec7e4 100644 (file)
 #define SOF_TKN_CAVS_AUDIO_FORMAT_IN_INTERLEAVING_STYLE        1906
 #define SOF_TKN_CAVS_AUDIO_FORMAT_IN_FMT_CFG   1907
 #define SOF_TKN_CAVS_AUDIO_FORMAT_IN_SAMPLE_TYPE       1908
-#define SOF_TKN_CAVS_AUDIO_FORMAT_PIN_INDEX            1909
+#define SOF_TKN_CAVS_AUDIO_FORMAT_INPUT_PIN_INDEX      1909
 /* intentional token numbering discontinuity, reserved for future use */
 #define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_RATE     1930
 #define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_BIT_DEPTH        1931
 #define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_INTERLEAVING_STYLE       1936
 #define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_FMT_CFG  1937
 #define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_SAMPLE_TYPE      1938
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUTPUT_PIN_INDEX     1939
 /* intentional token numbering discontinuity, reserved for future use */
 #define SOF_TKN_CAVS_AUDIO_FORMAT_IBS          1970
 #define SOF_TKN_CAVS_AUDIO_FORMAT_OBS          1971
index f755329..df1d04f 100644 (file)
@@ -1133,7 +1133,7 @@ static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
        ({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); })
 #endif
 
-static inline size_t sizeof_utp_transfer_cmd_desc(const struct ufs_hba *hba)
+static inline size_t ufshcd_get_ucd_size(const struct ufs_hba *hba)
 {
        return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba);
 }
index 00c253b..9901efe 100644 (file)
@@ -1215,7 +1215,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
 
        ret = htab_lock_bucket(htab, b, hash, &flags);
        if (ret)
-               return ret;
+               goto err_lock_bucket;
 
        l_old = lookup_elem_raw(head, hash, key, key_size);
 
@@ -1236,6 +1236,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
 err:
        htab_unlock_bucket(htab, b, hash, flags);
 
+err_lock_bucket:
        if (ret)
                htab_lru_push_free(htab, l_new);
        else if (l_old)
@@ -1338,7 +1339,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
 
        ret = htab_lock_bucket(htab, b, hash, &flags);
        if (ret)
-               return ret;
+               goto err_lock_bucket;
 
        l_old = lookup_elem_raw(head, hash, key, key_size);
 
@@ -1361,6 +1362,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
        ret = 0;
 err:
        htab_unlock_bucket(htab, b, hash, flags);
+err_lock_bucket:
        if (l_new)
                bpf_lru_push_free(&htab->lru, &l_new->lru_node);
        return ret;
index d9c9f45..8a26cd8 100644 (file)
@@ -859,4 +859,4 @@ static int __init bpf_offload_init(void)
        return rhashtable_init(&offdevs, &offdevs_params);
 }
 
-late_initcall(bpf_offload_init);
+core_initcall(bpf_offload_init);
index fbcf5a4..5871aa7 100644 (file)
@@ -17033,7 +17033,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                                        insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
                                                                        insn->dst_reg,
                                                                        shift);
-                               insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
+                               insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
                                                                (1ULL << size * 8) - 1);
                        }
                }
index ad7b6ad..6ab2c94 100644 (file)
@@ -276,6 +276,7 @@ static ssize_t read_file_mod_stats(struct file *file, char __user *user_buf,
        struct mod_fail_load *mod_fail;
        unsigned int len, size, count_failed = 0;
        char *buf;
+       int ret;
        u32 live_mod_count, fkreads, fdecompress, fbecoming, floads;
        unsigned long total_size, text_size, ikread_bytes, ibecoming_bytes,
                idecompress_bytes, imod_bytes, total_virtual_lost;
@@ -390,8 +391,9 @@ static ssize_t read_file_mod_stats(struct file *file, char __user *user_buf,
 out_unlock:
        mutex_unlock(&module_mutex);
 out:
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
        kfree(buf);
-        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       return ret;
 }
 #undef MAX_PREAMBLE
 #undef MAX_FAILED_MOD_PRINT
index 9abb390..18d3684 100644 (file)
 struct fprobe_rethook_node {
        struct rethook_node node;
        unsigned long entry_ip;
+       unsigned long entry_parent_ip;
        char data[];
 };
 
-static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
-                          struct ftrace_ops *ops, struct ftrace_regs *fregs)
+static inline void __fprobe_handler(unsigned long ip, unsigned long parent_ip,
+                       struct ftrace_ops *ops, struct ftrace_regs *fregs)
 {
        struct fprobe_rethook_node *fpr;
        struct rethook_node *rh = NULL;
        struct fprobe *fp;
        void *entry_data = NULL;
-       int bit, ret;
+       int ret = 0;
 
        fp = container_of(ops, struct fprobe, ops);
-       if (fprobe_disabled(fp))
-               return;
-
-       bit = ftrace_test_recursion_trylock(ip, parent_ip);
-       if (bit < 0) {
-               fp->nmissed++;
-               return;
-       }
 
        if (fp->exit_handler) {
                rh = rethook_try_get(fp->rethook);
                if (!rh) {
                        fp->nmissed++;
-                       goto out;
+                       return;
                }
                fpr = container_of(rh, struct fprobe_rethook_node, node);
                fpr->entry_ip = ip;
+               fpr->entry_parent_ip = parent_ip;
                if (fp->entry_data_size)
                        entry_data = fpr->data;
        }
@@ -61,23 +55,60 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
                else
                        rethook_hook(rh, ftrace_get_regs(fregs), true);
        }
-out:
+}
+
+static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
+               struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+       struct fprobe *fp;
+       int bit;
+
+       fp = container_of(ops, struct fprobe, ops);
+       if (fprobe_disabled(fp))
+               return;
+
+       /* recursion detection has to go before any traceable function and
+        * all functions before this point should be marked as notrace
+        */
+       bit = ftrace_test_recursion_trylock(ip, parent_ip);
+       if (bit < 0) {
+               fp->nmissed++;
+               return;
+       }
+       __fprobe_handler(ip, parent_ip, ops, fregs);
        ftrace_test_recursion_unlock(bit);
+
 }
 NOKPROBE_SYMBOL(fprobe_handler);
 
 static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
                                  struct ftrace_ops *ops, struct ftrace_regs *fregs)
 {
-       struct fprobe *fp = container_of(ops, struct fprobe, ops);
+       struct fprobe *fp;
+       int bit;
+
+       fp = container_of(ops, struct fprobe, ops);
+       if (fprobe_disabled(fp))
+               return;
+
+       /* recursion detection has to go before any traceable function and
+        * all functions called before this point should be marked as notrace
+        */
+       bit = ftrace_test_recursion_trylock(ip, parent_ip);
+       if (bit < 0) {
+               fp->nmissed++;
+               return;
+       }
 
        if (unlikely(kprobe_running())) {
                fp->nmissed++;
                return;
        }
+
        kprobe_busy_begin();
-       fprobe_handler(ip, parent_ip, ops, fregs);
+       __fprobe_handler(ip, parent_ip, ops, fregs);
        kprobe_busy_end();
+       ftrace_test_recursion_unlock(bit);
 }
 
 static void fprobe_exit_handler(struct rethook_node *rh, void *data,
@@ -85,14 +116,26 @@ static void fprobe_exit_handler(struct rethook_node *rh, void *data,
 {
        struct fprobe *fp = (struct fprobe *)data;
        struct fprobe_rethook_node *fpr;
+       int bit;
 
        if (!fp || fprobe_disabled(fp))
                return;
 
        fpr = container_of(rh, struct fprobe_rethook_node, node);
 
+       /*
+        * we need to assure no calls to traceable functions in-between the
+        * end of fprobe_handler and the beginning of fprobe_exit_handler.
+        */
+       bit = ftrace_test_recursion_trylock(fpr->entry_ip, fpr->entry_parent_ip);
+       if (bit < 0) {
+               fp->nmissed++;
+               return;
+       }
+
        fp->exit_handler(fp, fpr->entry_ip, regs,
                         fp->entry_data_size ? (void *)fpr->data : NULL);
+       ftrace_test_recursion_unlock(bit);
 }
 NOKPROBE_SYMBOL(fprobe_exit_handler);
 
index 32c3dfd..60f6cb2 100644 (file)
@@ -288,7 +288,7 @@ unsigned long rethook_trampoline_handler(struct pt_regs *regs,
         * These loops must be protected from rethook_free_rcu() because those
         * are accessing 'rhn->rethook'.
         */
-       preempt_disable();
+       preempt_disable_notrace();
 
        /*
         * Run the handler on the shadow stack. Do not unlink the list here because
@@ -321,7 +321,7 @@ unsigned long rethook_trampoline_handler(struct pt_regs *regs,
                first = first->next;
                rethook_recycle(rhn);
        }
-       preempt_enable();
+       preempt_enable_notrace();
 
        return correct_ret_addr;
 }
index 110a364..8ebc43d 100644 (file)
@@ -5317,15 +5317,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
 
        mt = mte_node_type(mas->node);
        pivots = ma_pivots(mas_mn(mas), mt);
-       if (offset)
-               mas->min = pivots[offset - 1] + 1;
-
-       if (offset < mt_pivots[mt])
-               mas->max = pivots[offset];
-
-       if (mas->index < mas->min)
-               mas->index = mas->min;
-
+       min = mas_safe_min(mas, pivots, offset);
+       if (mas->index < min)
+               mas->index = min;
        mas->last = mas->index + size - 1;
        return 0;
 }
index 2aafc46..392fb27 100644 (file)
@@ -29,7 +29,7 @@
  * canary of every 8 bytes is the same. 64-bit memory can be filled and checked
  * at a time instead of byte by byte to improve performance.
  */
-#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(0x0706050403020100))
+#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(le64_to_cpu(0x0706050403020100)))
 
 /* Maximum stack depth for reports. */
 #define KFENCE_STACK_DEPTH 64
index 3f83b10..fe10436 100644 (file)
@@ -237,7 +237,8 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
 }
 EXPORT_SYMBOL(shrinker_debugfs_rename);
 
-struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
+struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
+                                      int *debugfs_id)
 {
        struct dentry *entry = shrinker->debugfs_entry;
 
@@ -246,14 +247,18 @@ struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
        kfree_const(shrinker->name);
        shrinker->name = NULL;
 
-       if (entry) {
-               ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
-               shrinker->debugfs_entry = NULL;
-       }
+       *debugfs_id = entry ? shrinker->debugfs_id : -1;
+       shrinker->debugfs_entry = NULL;
 
        return entry;
 }
 
+void shrinker_debugfs_remove(struct dentry *debugfs_entry, int debugfs_id)
+{
+       debugfs_remove_recursive(debugfs_entry);
+       ida_free(&shrinker_debugfs_ida, debugfs_id);
+}
+
 static int __init shrinker_debugfs_init(void)
 {
        struct shrinker *shrinker;
index d257916..6d0cd28 100644 (file)
@@ -805,6 +805,7 @@ EXPORT_SYMBOL(register_shrinker);
 void unregister_shrinker(struct shrinker *shrinker)
 {
        struct dentry *debugfs_entry;
+       int debugfs_id;
 
        if (!(shrinker->flags & SHRINKER_REGISTERED))
                return;
@@ -814,13 +815,13 @@ void unregister_shrinker(struct shrinker *shrinker)
        shrinker->flags &= ~SHRINKER_REGISTERED;
        if (shrinker->flags & SHRINKER_MEMCG_AWARE)
                unregister_memcg_shrinker(shrinker);
-       debugfs_entry = shrinker_debugfs_remove(shrinker);
+       debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id);
        mutex_unlock(&shrinker_mutex);
 
        atomic_inc(&shrinker_srcu_generation);
        synchronize_srcu(&shrinker_srcu);
 
-       debugfs_remove_recursive(debugfs_entry);
+       shrinker_debugfs_remove(debugfs_entry, debugfs_id);
 
        kfree(shrinker->nr_deferred);
        shrinker->nr_deferred = NULL;
index 44ddaf5..02f7f41 100644 (file)
@@ -1331,31 +1331,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
        obj_to_location(obj, &page, &obj_idx);
        zspage = get_zspage(page);
 
-#ifdef CONFIG_ZPOOL
-       /*
-        * Move the zspage to front of pool's LRU.
-        *
-        * Note that this is swap-specific, so by definition there are no ongoing
-        * accesses to the memory while the page is swapped out that would make
-        * it "hot". A new entry is hot, then ages to the tail until it gets either
-        * written back or swaps back in.
-        *
-        * Furthermore, map is also called during writeback. We must not put an
-        * isolated page on the LRU mid-reclaim.
-        *
-        * As a result, only update the LRU when the page is mapped for write
-        * when it's first instantiated.
-        *
-        * This is a deviation from the other backends, which perform this update
-        * in the allocation function (zbud_alloc, z3fold_alloc).
-        */
-       if (mm == ZS_MM_WO) {
-               if (!list_empty(&zspage->lru))
-                       list_del(&zspage->lru);
-               list_add(&zspage->lru, &pool->lru);
-       }
-#endif
-
        /*
         * migration cannot move any zpages in this zspage. Here, pool->lock
         * is too heavy since callers would take some time until they calls
@@ -1525,9 +1500,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
                fix_fullness_group(class, zspage);
                record_obj(handle, obj);
                class_stat_inc(class, ZS_OBJS_INUSE, 1);
-               spin_unlock(&pool->lock);
 
-               return handle;
+               goto out;
        }
 
        spin_unlock(&pool->lock);
@@ -1550,6 +1524,14 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
 
        /* We completely set up zspage so mark them as movable */
        SetZsPageMovable(pool, zspage);
+out:
+#ifdef CONFIG_ZPOOL
+       /* Add/move zspage to beginning of LRU */
+       if (!list_empty(&zspage->lru))
+               list_del(&zspage->lru);
+       list_add(&zspage->lru, &pool->lru);
+#endif
+
        spin_unlock(&pool->lock);
 
        return handle;
index e1e621d..59da2a4 100644 (file)
@@ -1020,6 +1020,22 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
                goto fail;
 
        case ZSWAP_SWAPCACHE_NEW: /* page is locked */
+               /*
+                * Having a local reference to the zswap entry doesn't exclude
+                * swapping from invalidating and recycling the swap slot. Once
+                * the swapcache is secured against concurrent swapping to and
+                * from the slot, recheck that the entry is still current before
+                * writing.
+                */
+               spin_lock(&tree->lock);
+               if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) {
+                       spin_unlock(&tree->lock);
+                       delete_from_swap_cache(page_folio(page));
+                       ret = -ENOMEM;
+                       goto fail;
+               }
+               spin_unlock(&tree->lock);
+
                /* decompress */
                acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
                dlen = PAGE_SIZE;
index 870e493..b90781b 100644 (file)
@@ -109,8 +109,8 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
         * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
         * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
         */
-       if (veth->h_vlan_proto != vlan->vlan_proto ||
-           vlan->flags & VLAN_FLAG_REORDER_HDR) {
+       if (vlan->flags & VLAN_FLAG_REORDER_HDR ||
+           veth->h_vlan_proto != vlan->vlan_proto) {
                u16 vlan_tci;
                vlan_tci = vlan->vlan_id;
                vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
index 2b2d33e..995d29e 100644 (file)
@@ -400,6 +400,7 @@ done:
        return error;
 }
 
+#ifdef CONFIG_PROC_FS
 void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
 {
        mutex_lock(&atm_dev_mutex);
@@ -415,3 +416,4 @@ void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        return seq_list_next(v, &atm_devs, pos);
 }
+#endif
index 640b951..f75ef12 100644 (file)
@@ -1083,8 +1083,28 @@ static void hci_conn_unlink(struct hci_conn *conn)
        if (!conn->parent) {
                struct hci_link *link, *t;
 
-               list_for_each_entry_safe(link, t, &conn->link_list, list)
-                       hci_conn_unlink(link->conn);
+               list_for_each_entry_safe(link, t, &conn->link_list, list) {
+                       struct hci_conn *child = link->conn;
+
+                       hci_conn_unlink(child);
+
+                       /* If hdev is down it means
+                        * hci_dev_close_sync/hci_conn_hash_flush is in progress
+                        * and links don't need to be cleanup as all connections
+                        * would be cleanup.
+                        */
+                       if (!test_bit(HCI_UP, &hdev->flags))
+                               continue;
+
+                       /* Due to race, SCO connection might be not established
+                        * yet at this point. Delete it now, otherwise it is
+                        * possible for it to be stuck and can't be deleted.
+                        */
+                       if ((child->type == SCO_LINK ||
+                            child->type == ESCO_LINK) &&
+                           child->handle == HCI_CONN_HANDLE_UNSET)
+                               hci_conn_del(child);
+               }
 
                return;
        }
@@ -1092,35 +1112,30 @@ static void hci_conn_unlink(struct hci_conn *conn)
        if (!conn->link)
                return;
 
-       hci_conn_put(conn->parent);
-       conn->parent = NULL;
-
        list_del_rcu(&conn->link->list);
        synchronize_rcu();
 
+       hci_conn_drop(conn->parent);
+       hci_conn_put(conn->parent);
+       conn->parent = NULL;
+
        kfree(conn->link);
        conn->link = NULL;
-
-       /* Due to race, SCO connection might be not established
-        * yet at this point. Delete it now, otherwise it is
-        * possible for it to be stuck and can't be deleted.
-        */
-       if (conn->handle == HCI_CONN_HANDLE_UNSET)
-               hci_conn_del(conn);
 }
 
-int hci_conn_del(struct hci_conn *conn)
+void hci_conn_del(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
 
        BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
 
+       hci_conn_unlink(conn);
+
        cancel_delayed_work_sync(&conn->disc_work);
        cancel_delayed_work_sync(&conn->auto_accept_work);
        cancel_delayed_work_sync(&conn->idle_work);
 
        if (conn->type == ACL_LINK) {
-               hci_conn_unlink(conn);
                /* Unacked frames */
                hdev->acl_cnt += conn->sent;
        } else if (conn->type == LE_LINK) {
@@ -1131,13 +1146,6 @@ int hci_conn_del(struct hci_conn *conn)
                else
                        hdev->acl_cnt += conn->sent;
        } else {
-               struct hci_conn *acl = conn->parent;
-
-               if (acl) {
-                       hci_conn_unlink(conn);
-                       hci_conn_drop(acl);
-               }
-
                /* Unacked ISO frames */
                if (conn->type == ISO_LINK) {
                        if (hdev->iso_pkts)
@@ -1160,8 +1168,6 @@ int hci_conn_del(struct hci_conn *conn)
         * rest of hci_conn_del.
         */
        hci_conn_cleanup(conn);
-
-       return 0;
 }
 
 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
@@ -2462,22 +2468,21 @@ timer:
 /* Drop all connection on the device */
 void hci_conn_hash_flush(struct hci_dev *hdev)
 {
-       struct hci_conn_hash *h = &hdev->conn_hash;
-       struct hci_conn *c, *n;
+       struct list_head *head = &hdev->conn_hash.list;
+       struct hci_conn *conn;
 
        BT_DBG("hdev %s", hdev->name);
 
-       list_for_each_entry_safe(c, n, &h->list, list) {
-               c->state = BT_CLOSED;
-
-               hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
-
-               /* Unlink before deleting otherwise it is possible that
-                * hci_conn_del removes the link which may cause the list to
-                * contain items already freed.
-                */
-               hci_conn_unlink(c);
-               hci_conn_del(c);
+       /* We should not traverse the list here, because hci_conn_del
+        * can remove extra links, which may cause the list traversal
+        * to hit items that have already been released.
+        */
+       while ((conn = list_first_entry_or_null(head,
+                                               struct hci_conn,
+                                               list)) != NULL) {
+               conn->state = BT_CLOSED;
+               hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
+               hci_conn_del(conn);
        }
 }
 
index 2b05328..efb0960 100644 (file)
@@ -27,6 +27,10 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br,
 int br_get_vlan_tunnel_info_size(struct net_bridge_vlan_group *vg);
 int br_fill_vlan_tunnel_info(struct sk_buff *skb,
                             struct net_bridge_vlan_group *vg);
+bool vlan_tunid_inrange(const struct net_bridge_vlan *v_curr,
+                       const struct net_bridge_vlan *v_last);
+int br_vlan_tunnel_info(const struct net_bridge_port *p, int cmd,
+                       u16 vid, u32 tun_id, bool *changed);
 
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
 /* br_vlan_tunnel.c */
@@ -43,10 +47,6 @@ void br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
                                   struct net_bridge_vlan_group *vg);
 int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
                                 struct net_bridge_vlan *vlan);
-bool vlan_tunid_inrange(const struct net_bridge_vlan *v_curr,
-                       const struct net_bridge_vlan *v_last);
-int br_vlan_tunnel_info(const struct net_bridge_port *p, int cmd,
-                       u16 vid, u32 tun_id, bool *changed);
 #else
 static inline int vlan_tunnel_init(struct net_bridge_vlan_group *vg)
 {
index a750259..84f9aba 100644 (file)
@@ -1139,7 +1139,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        struct isotp_sock *so = isotp_sk(sk);
        int ret = 0;
 
-       if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
+       if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK | MSG_CMSG_COMPAT))
                return -EINVAL;
 
        if (!so->bound)
index 7e90f9e..1790469 100644 (file)
@@ -798,7 +798,7 @@ static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
        struct j1939_sk_buff_cb *skcb;
        int ret = 0;
 
-       if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE))
+       if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
                return -EINVAL;
 
        if (flags & MSG_ERRQUEUE)
index e212e9d..a3e12a6 100644 (file)
@@ -134,6 +134,29 @@ EXPORT_SYMBOL(page_pool_ethtool_stats_get);
 #define recycle_stat_add(pool, __stat, val)
 #endif
 
+static bool page_pool_producer_lock(struct page_pool *pool)
+       __acquires(&pool->ring.producer_lock)
+{
+       bool in_softirq = in_softirq();
+
+       if (in_softirq)
+               spin_lock(&pool->ring.producer_lock);
+       else
+               spin_lock_bh(&pool->ring.producer_lock);
+
+       return in_softirq;
+}
+
+static void page_pool_producer_unlock(struct page_pool *pool,
+                                     bool in_softirq)
+       __releases(&pool->ring.producer_lock)
+{
+       if (in_softirq)
+               spin_unlock(&pool->ring.producer_lock);
+       else
+               spin_unlock_bh(&pool->ring.producer_lock);
+}
+
 static int page_pool_init(struct page_pool *pool,
                          const struct page_pool_params *params)
 {
@@ -617,6 +640,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
                             int count)
 {
        int i, bulk_len = 0;
+       bool in_softirq;
 
        for (i = 0; i < count; i++) {
                struct page *page = virt_to_head_page(data[i]);
@@ -635,7 +659,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
                return;
 
        /* Bulk producer into ptr_ring page_pool cache */
-       page_pool_ring_lock(pool);
+       in_softirq = page_pool_producer_lock(pool);
        for (i = 0; i < bulk_len; i++) {
                if (__ptr_ring_produce(&pool->ring, data[i])) {
                        /* ring full */
@@ -644,7 +668,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
                }
        }
        recycle_stat_add(pool, ring, i);
-       page_pool_ring_unlock(pool);
+       page_pool_producer_unlock(pool, in_softirq);
 
        /* Hopefully all pages was return into ptr_ring */
        if (likely(i == bulk_len))
index 515ec5c..cea28d3 100644 (file)
@@ -5224,8 +5224,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
        } else {
                skb = skb_clone(orig_skb, GFP_ATOMIC);
 
-               if (skb_orphan_frags_rx(skb, GFP_ATOMIC))
+               if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) {
+                       kfree_skb(skb);
                        return;
+               }
        }
        if (!skb)
                return;
index f818837..a9060e1 100644 (file)
@@ -481,8 +481,6 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
                msg_rx = sk_psock_peek_msg(psock);
        }
 out:
-       if (psock->work_state.skb && copied > 0)
-               schedule_work(&psock->work);
        return copied;
 }
 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
@@ -624,42 +622,33 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
 
 static void sk_psock_skb_state(struct sk_psock *psock,
                               struct sk_psock_work_state *state,
-                              struct sk_buff *skb,
                               int len, int off)
 {
        spin_lock_bh(&psock->ingress_lock);
        if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
-               state->skb = skb;
                state->len = len;
                state->off = off;
-       } else {
-               sock_drop(psock->sk, skb);
        }
        spin_unlock_bh(&psock->ingress_lock);
 }
 
 static void sk_psock_backlog(struct work_struct *work)
 {
-       struct sk_psock *psock = container_of(work, struct sk_psock, work);
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
        struct sk_psock_work_state *state = &psock->work_state;
        struct sk_buff *skb = NULL;
+       u32 len = 0, off = 0;
        bool ingress;
-       u32 len, off;
        int ret;
 
        mutex_lock(&psock->work_mutex);
-       if (unlikely(state->skb)) {
-               spin_lock_bh(&psock->ingress_lock);
-               skb = state->skb;
+       if (unlikely(state->len)) {
                len = state->len;
                off = state->off;
-               state->skb = NULL;
-               spin_unlock_bh(&psock->ingress_lock);
        }
-       if (skb)
-               goto start;
 
-       while ((skb = skb_dequeue(&psock->ingress_skb))) {
+       while ((skb = skb_peek(&psock->ingress_skb))) {
                len = skb->len;
                off = 0;
                if (skb_bpf_strparser(skb)) {
@@ -668,7 +657,6 @@ static void sk_psock_backlog(struct work_struct *work)
                        off = stm->offset;
                        len = stm->full_len;
                }
-start:
                ingress = skb_bpf_ingress(skb);
                skb_bpf_redirect_clear(skb);
                do {
@@ -678,22 +666,28 @@ start:
                                                          len, ingress);
                        if (ret <= 0) {
                                if (ret == -EAGAIN) {
-                                       sk_psock_skb_state(psock, state, skb,
-                                                          len, off);
+                                       sk_psock_skb_state(psock, state, len, off);
+
+                                       /* Delay slightly to prioritize any
+                                        * other work that might be here.
+                                        */
+                                       if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+                                               schedule_delayed_work(&psock->work, 1);
                                        goto end;
                                }
                                /* Hard errors break pipe and stop xmit. */
                                sk_psock_report_error(psock, ret ? -ret : EPIPE);
                                sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
-                               sock_drop(psock->sk, skb);
                                goto end;
                        }
                        off += ret;
                        len -= ret;
                } while (len);
 
-               if (!ingress)
+               skb = skb_dequeue(&psock->ingress_skb);
+               if (!ingress) {
                        kfree_skb(skb);
+               }
        }
 end:
        mutex_unlock(&psock->work_mutex);
@@ -734,7 +728,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
        INIT_LIST_HEAD(&psock->link);
        spin_lock_init(&psock->link_lock);
 
-       INIT_WORK(&psock->work, sk_psock_backlog);
+       INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
        mutex_init(&psock->work_mutex);
        INIT_LIST_HEAD(&psock->ingress_msg);
        spin_lock_init(&psock->ingress_lock);
@@ -786,11 +780,6 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
                skb_bpf_redirect_clear(skb);
                sock_drop(psock->sk, skb);
        }
-       kfree_skb(psock->work_state.skb);
-       /* We null the skb here to ensure that calls to sk_psock_backlog
-        * do not pick up the free'd skb.
-        */
-       psock->work_state.skb = NULL;
        __sk_psock_purge_ingress_msg(psock);
 }
 
@@ -809,7 +798,6 @@ void sk_psock_stop(struct sk_psock *psock)
        spin_lock_bh(&psock->ingress_lock);
        sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
        sk_psock_cork_free(psock);
-       __sk_psock_zap_ingress(psock);
        spin_unlock_bh(&psock->ingress_lock);
 }
 
@@ -823,7 +811,8 @@ static void sk_psock_destroy(struct work_struct *work)
 
        sk_psock_done_strp(psock);
 
-       cancel_work_sync(&psock->work);
+       cancel_delayed_work_sync(&psock->work);
+       __sk_psock_zap_ingress(psock);
        mutex_destroy(&psock->work_mutex);
 
        psock_progs_drop(&psock->progs);
@@ -938,7 +927,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
        }
 
        skb_queue_tail(&psock_other->ingress_skb, skb);
-       schedule_work(&psock_other->work);
+       schedule_delayed_work(&psock_other->work, 0);
        spin_unlock_bh(&psock_other->ingress_lock);
        return 0;
 }
@@ -990,10 +979,8 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
                err = -EIO;
                sk_other = psock->sk;
                if (sock_flag(sk_other, SOCK_DEAD) ||
-                   !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
-                       skb_bpf_redirect_clear(skb);
+                   !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
                        goto out_free;
-               }
 
                skb_bpf_set_ingress(skb);
 
@@ -1018,22 +1005,23 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
                        spin_lock_bh(&psock->ingress_lock);
                        if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
                                skb_queue_tail(&psock->ingress_skb, skb);
-                               schedule_work(&psock->work);
+                               schedule_delayed_work(&psock->work, 0);
                                err = 0;
                        }
                        spin_unlock_bh(&psock->ingress_lock);
-                       if (err < 0) {
-                               skb_bpf_redirect_clear(skb);
+                       if (err < 0)
                                goto out_free;
-                       }
                }
                break;
        case __SK_REDIRECT:
+               tcp_eat_skb(psock->sk, skb);
                err = sk_psock_skb_redirect(psock, skb);
                break;
        case __SK_DROP:
        default:
 out_free:
+               skb_bpf_redirect_clear(skb);
+               tcp_eat_skb(psock->sk, skb);
                sock_drop(psock->sk, skb);
        }
 
@@ -1049,7 +1037,7 @@ static void sk_psock_write_space(struct sock *sk)
        psock = sk_psock(sk);
        if (likely(psock)) {
                if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
-                       schedule_work(&psock->work);
+                       schedule_delayed_work(&psock->work, 0);
                write_space = psock->saved_write_space;
        }
        rcu_read_unlock();
@@ -1078,8 +1066,7 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
                skb_dst_drop(skb);
                skb_bpf_redirect_clear(skb);
                ret = bpf_prog_run_pin_on_cpu(prog, skb);
-               if (ret == SK_PASS)
-                       skb_bpf_set_strparser(skb);
+               skb_bpf_set_strparser(skb);
                ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
                skb->sk = NULL;
        }
@@ -1183,12 +1170,11 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
        int ret = __SK_DROP;
        int len = skb->len;
 
-       skb_get(skb);
-
        rcu_read_lock();
        psock = sk_psock(sk);
        if (unlikely(!psock)) {
                len = 0;
+               tcp_eat_skb(sk, skb);
                sock_drop(sk, skb);
                goto out;
        }
@@ -1212,12 +1198,21 @@ out:
 static void sk_psock_verdict_data_ready(struct sock *sk)
 {
        struct socket *sock = sk->sk_socket;
+       int copied;
 
        trace_sk_data_ready(sk);
 
        if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
                return;
-       sock->ops->read_skb(sk, sk_psock_verdict_recv);
+       copied = sock->ops->read_skb(sk, sk_psock_verdict_recv);
+       if (copied >= 0) {
+               struct sk_psock *psock;
+
+               rcu_read_lock();
+               psock = sk_psock(sk);
+               psock->saved_data_ready(sk);
+               rcu_read_unlock();
+       }
 }
 
 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
index 7c189c2..00afb66 100644 (file)
@@ -1644,9 +1644,10 @@ void sock_map_close(struct sock *sk, long timeout)
                rcu_read_unlock();
                sk_psock_stop(psock);
                release_sock(sk);
-               cancel_work_sync(&psock->work);
+               cancel_delayed_work_sync(&psock->work);
                sk_psock_put(sk, psock);
        }
+
        /* Make sure we do not recurse. This is a bug.
         * Leak the socket instead of crashing on a stack overflow.
         */
index 777b091..c23ebab 100644 (file)
@@ -204,11 +204,6 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
        if (ret < 0)
                goto err_xa_alloc;
 
-       devlink->netdevice_nb.notifier_call = devlink_port_netdevice_event;
-       ret = register_netdevice_notifier(&devlink->netdevice_nb);
-       if (ret)
-               goto err_register_netdevice_notifier;
-
        devlink->dev = dev;
        devlink->ops = ops;
        xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC);
@@ -233,8 +228,6 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
 
        return devlink;
 
-err_register_netdevice_notifier:
-       xa_erase(&devlinks, devlink->index);
 err_xa_alloc:
        kfree(devlink);
        return NULL;
@@ -266,8 +259,6 @@ void devlink_free(struct devlink *devlink)
        xa_destroy(&devlink->params);
        xa_destroy(&devlink->ports);
 
-       WARN_ON_ONCE(unregister_netdevice_notifier(&devlink->netdevice_nb));
-
        xa_erase(&devlinks, devlink->index);
 
        devlink_put(devlink);
@@ -303,6 +294,10 @@ static struct pernet_operations devlink_pernet_ops __net_initdata = {
        .pre_exit = devlink_pernet_pre_exit,
 };
 
+static struct notifier_block devlink_port_netdevice_nb = {
+       .notifier_call = devlink_port_netdevice_event,
+};
+
 static int __init devlink_init(void)
 {
        int err;
@@ -311,6 +306,9 @@ static int __init devlink_init(void)
        if (err)
                goto out;
        err = register_pernet_subsys(&devlink_pernet_ops);
+       if (err)
+               goto out;
+       err = register_netdevice_notifier(&devlink_port_netdevice_nb);
 
 out:
        WARN_ON(err);
index e133f42..62921b2 100644 (file)
@@ -50,7 +50,6 @@ struct devlink {
        u8 reload_failed:1;
        refcount_t refcount;
        struct rcu_work rwork;
-       struct notifier_block netdevice_nb;
        char priv[] __aligned(NETDEV_ALIGN);
 };
 
index dffca2f..cd02549 100644 (file)
@@ -7073,10 +7073,9 @@ int devlink_port_netdevice_event(struct notifier_block *nb,
        struct devlink_port *devlink_port = netdev->devlink_port;
        struct devlink *devlink;
 
-       devlink = container_of(nb, struct devlink, netdevice_nb);
-
-       if (!devlink_port || devlink_port->devlink != devlink)
+       if (!devlink_port)
                return NOTIFY_OK;
+       devlink = devlink_port->devlink;
 
        switch (event) {
        case NETDEV_POST_INIT:
index e6adc5d..6d37bab 100644 (file)
@@ -102,7 +102,7 @@ struct handshake_req_alloc_test_param handshake_req_alloc_params[] = {
        {
                .desc                   = "handshake_req_alloc excessive privsize",
                .proto                  = &handshake_req_alloc_proto_6,
-               .gfp                    = GFP_KERNEL,
+               .gfp                    = GFP_KERNEL | __GFP_NOWARN,
                .expect_success         = false,
        },
        {
@@ -209,6 +209,7 @@ static void handshake_req_submit_test4(struct kunit *test)
 {
        struct handshake_req *req, *result;
        struct socket *sock;
+       struct file *filp;
        int err;
 
        /* Arrange */
@@ -218,9 +219,10 @@ static void handshake_req_submit_test4(struct kunit *test)
        err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
        KUNIT_ASSERT_NOT_NULL(test, sock->sk);
+       sock->file = filp;
 
        err = handshake_req_submit(sock, req, GFP_KERNEL);
        KUNIT_ASSERT_EQ(test, err, 0);
@@ -241,6 +243,7 @@ static void handshake_req_submit_test5(struct kunit *test)
        struct handshake_req *req;
        struct handshake_net *hn;
        struct socket *sock;
+       struct file *filp;
        struct net *net;
        int saved, err;
 
@@ -251,9 +254,10 @@ static void handshake_req_submit_test5(struct kunit *test)
        err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
        KUNIT_ASSERT_NOT_NULL(test, sock->sk);
+       sock->file = filp;
 
        net = sock_net(sock->sk);
        hn = handshake_pernet(net);
@@ -276,6 +280,7 @@ static void handshake_req_submit_test6(struct kunit *test)
 {
        struct handshake_req *req1, *req2;
        struct socket *sock;
+       struct file *filp;
        int err;
 
        /* Arrange */
@@ -287,9 +292,10 @@ static void handshake_req_submit_test6(struct kunit *test)
        err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
        KUNIT_ASSERT_NOT_NULL(test, sock->sk);
+       sock->file = filp;
 
        /* Act */
        err = handshake_req_submit(sock, req1, GFP_KERNEL);
@@ -307,6 +313,7 @@ static void handshake_req_cancel_test1(struct kunit *test)
 {
        struct handshake_req *req;
        struct socket *sock;
+       struct file *filp;
        bool result;
        int err;
 
@@ -318,8 +325,9 @@ static void handshake_req_cancel_test1(struct kunit *test)
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
 
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
+       sock->file = filp;
 
        err = handshake_req_submit(sock, req, GFP_KERNEL);
        KUNIT_ASSERT_EQ(test, err, 0);
@@ -340,6 +348,7 @@ static void handshake_req_cancel_test2(struct kunit *test)
        struct handshake_req *req, *next;
        struct handshake_net *hn;
        struct socket *sock;
+       struct file *filp;
        struct net *net;
        bool result;
        int err;
@@ -352,8 +361,9 @@ static void handshake_req_cancel_test2(struct kunit *test)
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
 
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
+       sock->file = filp;
 
        err = handshake_req_submit(sock, req, GFP_KERNEL);
        KUNIT_ASSERT_EQ(test, err, 0);
@@ -380,6 +390,7 @@ static void handshake_req_cancel_test3(struct kunit *test)
        struct handshake_req *req, *next;
        struct handshake_net *hn;
        struct socket *sock;
+       struct file *filp;
        struct net *net;
        bool result;
        int err;
@@ -392,8 +403,9 @@ static void handshake_req_cancel_test3(struct kunit *test)
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
 
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
+       sock->file = filp;
 
        err = handshake_req_submit(sock, req, GFP_KERNEL);
        KUNIT_ASSERT_EQ(test, err, 0);
@@ -436,6 +448,7 @@ static void handshake_req_destroy_test1(struct kunit *test)
 {
        struct handshake_req *req;
        struct socket *sock;
+       struct file *filp;
        int err;
 
        /* Arrange */
@@ -448,8 +461,9 @@ static void handshake_req_destroy_test1(struct kunit *test)
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
 
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
+       sock->file = filp;
 
        err = handshake_req_submit(sock, req, GFP_KERNEL);
        KUNIT_ASSERT_EQ(test, err, 0);
index 4dac965..8aeaadc 100644 (file)
@@ -31,6 +31,7 @@ struct handshake_req {
        struct list_head                hr_list;
        struct rhash_head               hr_rhash;
        unsigned long                   hr_flags;
+       struct file                     *hr_file;
        const struct handshake_proto    *hr_proto;
        struct sock                     *hr_sk;
        void                            (*hr_odestruct)(struct sock *sk);
index 35c9c44..1086653 100644 (file)
@@ -48,7 +48,7 @@ int handshake_genl_notify(struct net *net, const struct handshake_proto *proto,
                                proto->hp_handler_class))
                return -ESRCH;
 
-       msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, flags);
        if (!msg)
                return -ENOMEM;
 
@@ -99,9 +99,6 @@ static int handshake_dup(struct socket *sock)
        struct file *file;
        int newfd;
 
-       if (!sock->file)
-               return -EBADF;
-
        file = get_file(sock->file);
        newfd = get_unused_fd_flags(O_CLOEXEC);
        if (newfd < 0) {
@@ -142,15 +139,16 @@ int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
                goto out_complete;
        }
        err = req->hr_proto->hp_accept(req, info, fd);
-       if (err)
+       if (err) {
+               fput(sock->file);
                goto out_complete;
+       }
 
        trace_handshake_cmd_accept(net, req, req->hr_sk, fd);
        return 0;
 
 out_complete:
        handshake_complete(req, -EIO, NULL);
-       fput(sock->file);
 out_status:
        trace_handshake_cmd_accept_err(net, req, NULL, err);
        return err;
@@ -159,8 +157,8 @@ out_status:
 int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = sock_net(skb->sk);
+       struct handshake_req *req = NULL;
        struct socket *sock = NULL;
-       struct handshake_req *req;
        int fd, status, err;
 
        if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_DONE_SOCKFD))
index 94d5cef..d78d41a 100644 (file)
@@ -239,6 +239,7 @@ int handshake_req_submit(struct socket *sock, struct handshake_req *req,
        }
        req->hr_odestruct = req->hr_sk->sk_destruct;
        req->hr_sk->sk_destruct = handshake_sk_destruct;
+       req->hr_file = sock->file;
 
        ret = -EOPNOTSUPP;
        net = sock_net(req->hr_sk);
@@ -334,6 +335,9 @@ bool handshake_req_cancel(struct sock *sk)
                return false;
        }
 
+       /* Request accepted and waiting for DONE */
+       fput(req->hr_file);
+
 out_true:
        trace_handshake_cancel(net, req, sk);
 
index fcbeb63..b735f5c 100644 (file)
@@ -31,6 +31,7 @@ struct tls_handshake_req {
        int                     th_type;
        unsigned int            th_timeout_ms;
        int                     th_auth_mode;
+       const char              *th_peername;
        key_serial_t            th_keyring;
        key_serial_t            th_certificate;
        key_serial_t            th_privkey;
@@ -48,6 +49,7 @@ tls_handshake_req_init(struct handshake_req *req,
        treq->th_timeout_ms = args->ta_timeout_ms;
        treq->th_consumer_done = args->ta_done;
        treq->th_consumer_data = args->ta_data;
+       treq->th_peername = args->ta_peername;
        treq->th_keyring = args->ta_keyring;
        treq->th_num_peerids = 0;
        treq->th_certificate = TLS_NO_CERT;
@@ -214,6 +216,12 @@ static int tls_handshake_accept(struct handshake_req *req,
        ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_MESSAGE_TYPE, treq->th_type);
        if (ret < 0)
                goto out_cancel;
+       if (treq->th_peername) {
+               ret = nla_put_string(msg, HANDSHAKE_A_ACCEPT_PEERNAME,
+                                    treq->th_peername);
+               if (ret < 0)
+                       goto out_cancel;
+       }
        if (treq->th_timeout_ms) {
                ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_TIMEOUT, treq->th_timeout_ms);
                if (ret < 0)
index b511ff0..8e97d8d 100644 (file)
@@ -317,7 +317,14 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
                        ipc->tos = val;
                        ipc->priority = rt_tos2priority(ipc->tos);
                        break;
-
+               case IP_PROTOCOL:
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+                               return -EINVAL;
+                       val = *(int *)CMSG_DATA(cmsg);
+                       if (val < 1 || val > 255)
+                               return -EINVAL;
+                       ipc->protocol = val;
+                       break;
                default:
                        return -EINVAL;
                }
@@ -1761,6 +1768,9 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
        case IP_LOCAL_PORT_RANGE:
                val = inet->local_port_range.hi << 16 | inet->local_port_range.lo;
                break;
+       case IP_PROTOCOL:
+               val = inet_sk(sk)->inet_num;
+               break;
        default:
                sockopt_release_sock(sk);
                return -ENOPROTOOPT;
index ff712bf..eadf1c9 100644 (file)
@@ -532,6 +532,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        }
 
        ipcm_init_sk(&ipc, inet);
+       /* Keep backward compat */
+       if (hdrincl)
+               ipc.protocol = IPPROTO_RAW;
 
        if (msg->msg_controllen) {
                err = ip_cmsg_send(sk, msg, &ipc, false);
@@ -599,7 +602,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos,
                           RT_SCOPE_UNIVERSE,
-                          hdrincl ? IPPROTO_RAW : sk->sk_protocol,
+                          hdrincl ? ipc.protocol : sk->sk_protocol,
                           inet_sk_flowi_flags(sk) |
                            (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
                           daddr, saddr, 0, 0, sk->sk_uid);
index 4d6392c..a60f6f4 100644 (file)
@@ -1571,7 +1571,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
  * calculation of whether or not we must ACK for the sake of
  * a window update.
  */
-static void __tcp_cleanup_rbuf(struct sock *sk, int copied)
+void __tcp_cleanup_rbuf(struct sock *sk, int copied)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        bool time_to_ack = false;
@@ -1773,7 +1773,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
                WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
                tcp_flags = TCP_SKB_CB(skb)->tcp_flags;
                used = recv_actor(sk, skb);
-               consume_skb(skb);
                if (used < 0) {
                        if (!copied)
                                copied = used;
@@ -1787,14 +1786,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
                        break;
                }
        }
-       WRITE_ONCE(tp->copied_seq, seq);
-
-       tcp_rcv_space_adjust(sk);
-
-       /* Clean up data we have read: This will do ACK frames. */
-       if (copied > 0)
-               __tcp_cleanup_rbuf(sk, copied);
-
        return copied;
 }
 EXPORT_SYMBOL(tcp_read_skb);
index 2e95474..5f93918 100644 (file)
 #include <net/inet_common.h>
 #include <net/tls.h>
 
+void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
+{
+       struct tcp_sock *tcp;
+       int copied;
+
+       if (!skb || !skb->len || !sk_is_tcp(sk))
+               return;
+
+       if (skb_bpf_strparser(skb))
+               return;
+
+       tcp = tcp_sk(sk);
+       copied = tcp->copied_seq + skb->len;
+       WRITE_ONCE(tcp->copied_seq, copied);
+       tcp_rcv_space_adjust(sk);
+       __tcp_cleanup_rbuf(sk, skb->len);
+}
+
 static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
                           struct sk_msg *msg, u32 apply_bytes, int flags)
 {
@@ -174,14 +192,34 @@ static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
        return ret;
 }
 
+static bool is_next_msg_fin(struct sk_psock *psock)
+{
+       struct scatterlist *sge;
+       struct sk_msg *msg_rx;
+       int i;
+
+       msg_rx = sk_psock_peek_msg(psock);
+       i = msg_rx->sg.start;
+       sge = sk_msg_elem(msg_rx, i);
+       if (!sge->length) {
+               struct sk_buff *skb = msg_rx->skb;
+
+               if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+                       return true;
+       }
+       return false;
+}
+
 static int tcp_bpf_recvmsg_parser(struct sock *sk,
                                  struct msghdr *msg,
                                  size_t len,
                                  int flags,
                                  int *addr_len)
 {
+       struct tcp_sock *tcp = tcp_sk(sk);
+       u32 seq = tcp->copied_seq;
        struct sk_psock *psock;
-       int copied;
+       int copied = 0;
 
        if (unlikely(flags & MSG_ERRQUEUE))
                return inet_recv_error(sk, msg, len, addr_len);
@@ -194,8 +232,43 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
                return tcp_recvmsg(sk, msg, len, flags, addr_len);
 
        lock_sock(sk);
+
+       /* We may have received data on the sk_receive_queue pre-accept and
+        * then we can not use read_skb in this context because we haven't
+        * assigned a sk_socket yet so have no link to the ops. The work-around
+        * is to check the sk_receive_queue and in these cases read skbs off
+        * queue again. The read_skb hook is not running at this point because
+        * of lock_sock so we avoid having multiple runners in read_skb.
+        */
+       if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
+               tcp_data_ready(sk);
+               /* This handles the ENOMEM errors if we both receive data
+                * pre accept and are already under memory pressure. At least
+                * let user know to retry.
+                */
+               if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
+                       copied = -EAGAIN;
+                       goto out;
+               }
+       }
+
 msg_bytes_ready:
        copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
+       /* The typical case for EFAULT is the socket was gracefully
+        * shutdown with a FIN pkt. So check here the other case is
+        * some error on copy_page_to_iter which would be unexpected.
+        * On fin return correct return code to zero.
+        */
+       if (copied == -EFAULT) {
+               bool is_fin = is_next_msg_fin(psock);
+
+               if (is_fin) {
+                       copied = 0;
+                       seq++;
+                       goto out;
+               }
+       }
+       seq += copied;
        if (!copied) {
                long timeo;
                int data;
@@ -233,6 +306,10 @@ msg_bytes_ready:
                copied = -EAGAIN;
        }
 out:
+       WRITE_ONCE(tcp->copied_seq, seq);
+       tcp_rcv_space_adjust(sk);
+       if (copied > 0)
+               __tcp_cleanup_rbuf(sk, copied);
        release_sock(sk);
        sk_psock_put(sk, psock);
        return copied;
index 39bda2b..06d2573 100644 (file)
@@ -829,6 +829,9 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
                                   inet_twsk(sk)->tw_priority : sk->sk_priority;
                transmit_time = tcp_transmit_time(sk);
                xfrm_sk_clone_policy(ctl_sk, sk);
+       } else {
+               ctl_sk->sk_mark = 0;
+               ctl_sk->sk_priority = 0;
        }
        ip_send_unicast_reply(ctl_sk,
                              skb, &TCP_SKB_CB(skb)->header.h4.opt,
@@ -836,7 +839,6 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
                              &arg, arg.iov[0].iov_len,
                              transmit_time);
 
-       ctl_sk->sk_mark = 0;
        xfrm_sk_free_policy(ctl_sk);
        sock_net_set(ctl_sk, &init_net);
        __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
@@ -935,7 +937,6 @@ static void tcp_v4_send_ack(const struct sock *sk,
                              &arg, arg.iov[0].iov_len,
                              transmit_time);
 
-       ctl_sk->sk_mark = 0;
        sock_net_set(ctl_sk, &init_net);
        __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
        local_bh_enable();
index aa32afd..9482def 100644 (file)
@@ -1818,7 +1818,7 @@ EXPORT_SYMBOL(__skb_recv_udp);
 int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
 {
        struct sk_buff *skb;
-       int err, copied;
+       int err;
 
 try_again:
        skb = skb_recv_udp(sk, MSG_DONTWAIT, &err);
@@ -1837,10 +1837,7 @@ try_again:
        }
 
        WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
-       copied = recv_actor(sk, skb);
-       kfree_skb(skb);
-
-       return copied;
+       return recv_actor(sk, skb);
 }
 EXPORT_SYMBOL(udp_read_skb);
 
index e0c9cc3..56d94d2 100644 (file)
@@ -64,6 +64,8 @@ struct proto  udplite_prot = {
        .per_cpu_fw_alloc  = &udp_memory_per_cpu_fw_alloc,
 
        .sysctl_mem        = sysctl_udp_mem,
+       .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
+       .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
        .obj_size          = sizeof(struct udp_sock),
        .h.udp_table       = &udplite_table,
 };
index da46c42..49e31e4 100644 (file)
@@ -143,6 +143,8 @@ int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type)
                        optlen = 1;
                        break;
                default:
+                       if (len < 2)
+                               goto bad;
                        optlen = nh[offset + 1] + 2;
                        if (optlen > len)
                                goto bad;
index 2438da5..bac768d 100644 (file)
@@ -2491,7 +2491,7 @@ static int ipv6_route_native_seq_show(struct seq_file *seq, void *v)
        const struct net_device *dev;
 
        if (rt->nh)
-               fib6_nh = nexthop_fib6_nh_bh(rt->nh);
+               fib6_nh = nexthop_fib6_nh(rt->nh);
 
        seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen);
 
@@ -2556,14 +2556,14 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
 
        if (tbl) {
                h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1;
-               node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist));
+               node = rcu_dereference(hlist_next_rcu(&tbl->tb6_hlist));
        } else {
                h = 0;
                node = NULL;
        }
 
        while (!node && h < FIB6_TABLE_HASHSZ) {
-               node = rcu_dereference_bh(
+               node = rcu_dereference(
                        hlist_first_rcu(&net->ipv6.fib_table_hash[h++]));
        }
        return hlist_entry_safe(node, struct fib6_table, tb6_hlist);
@@ -2593,7 +2593,7 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        if (!v)
                goto iter_table;
 
-       n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
+       n = rcu_dereference(((struct fib6_info *)v)->fib6_next);
        if (n)
                return n;
 
@@ -2619,12 +2619,12 @@ iter_table:
 }
 
 static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(RCU_BH)
+       __acquires(RCU)
 {
        struct net *net = seq_file_net(seq);
        struct ipv6_route_iter *iter = seq->private;
 
-       rcu_read_lock_bh();
+       rcu_read_lock();
        iter->tbl = ipv6_route_seq_next_table(NULL, net);
        iter->skip = *pos;
 
@@ -2645,7 +2645,7 @@ static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
 }
 
 static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v)
-       __releases(RCU_BH)
+       __releases(RCU)
 {
        struct net *net = seq_file_net(seq);
        struct ipv6_route_iter *iter = seq->private;
@@ -2653,7 +2653,7 @@ static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v)
        if (ipv6_route_iter_active(iter))
                fib6_walker_unlink(net, &iter->w);
 
-       rcu_read_unlock_bh();
+       rcu_read_unlock();
 }
 
 #if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL)
index a4ecfc9..da80974 100644 (file)
@@ -1015,12 +1015,14 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                                            ntohl(tun_id),
                                            ntohl(md->u.index), truncate,
                                            false);
+                       proto = htons(ETH_P_ERSPAN);
                } else if (md->version == 2) {
                        erspan_build_header_v2(skb,
                                               ntohl(tun_id),
                                               md->u.md2.dir,
                                               get_hwid(&md->u.md2),
                                               truncate, false);
+                       proto = htons(ETH_P_ERSPAN2);
                } else {
                        goto tx_err;
                }
@@ -1043,24 +1045,25 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                        break;
                }
 
-               if (t->parms.erspan_ver == 1)
+               if (t->parms.erspan_ver == 1) {
                        erspan_build_header(skb, ntohl(t->parms.o_key),
                                            t->parms.index,
                                            truncate, false);
-               else if (t->parms.erspan_ver == 2)
+                       proto = htons(ETH_P_ERSPAN);
+               } else if (t->parms.erspan_ver == 2) {
                        erspan_build_header_v2(skb, ntohl(t->parms.o_key),
                                               t->parms.dir,
                                               t->parms.hwid,
                                               truncate, false);
-               else
+                       proto = htons(ETH_P_ERSPAN2);
+               } else {
                        goto tx_err;
+               }
 
                fl6.daddr = t->parms.raddr;
        }
 
        /* Push GRE header. */
-       proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
-                                          : htons(ETH_P_ERSPAN2);
        gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
 
        /* TooBig packet may have updated dst->dev's mtu */
index 7d0adb6..44ee7a2 100644 (file)
@@ -793,7 +793,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
                if (!proto)
                        proto = inet->inet_num;
-               else if (proto != inet->inet_num)
+               else if (proto != inet->inet_num &&
+                        inet->inet_num != IPPROTO_RAW)
                        return -EINVAL;
 
                if (proto > 255)
index 67eaf3c..3bab0cc 100644 (file)
@@ -60,6 +60,8 @@ struct proto udplitev6_prot = {
        .per_cpu_fw_alloc  = &udp_memory_per_cpu_fw_alloc,
 
        .sysctl_mem        = sysctl_udp_mem,
+       .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
+       .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
        .obj_size          = sizeof(struct udp6_sock),
        .h.udp_table       = &udplite_table,
 };
index a815f5a..31ab12f 100644 (file)
@@ -1940,7 +1940,8 @@ static u32 gen_reqid(struct net *net)
 }
 
 static int
-parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
+parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_policy *pol,
+                  struct sadb_x_ipsecrequest *rq)
 {
        struct net *net = xp_net(xp);
        struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr;
@@ -1958,9 +1959,12 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
        if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
                return -EINVAL;
        t->mode = mode;
-       if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE)
+       if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) {
+               if ((mode == XFRM_MODE_TUNNEL || mode == XFRM_MODE_BEET) &&
+                   pol->sadb_x_policy_dir == IPSEC_DIR_OUTBOUND)
+                       return -EINVAL;
                t->optional = 1;
-       else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
+       else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
                t->reqid = rq->sadb_x_ipsecrequest_reqid;
                if (t->reqid > IPSEC_MANUAL_REQID_MAX)
                        t->reqid = 0;
@@ -2002,7 +2006,7 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
                    rq->sadb_x_ipsecrequest_len < sizeof(*rq))
                        return -EINVAL;
 
-               if ((err = parse_ipsecrequest(xp, rq)) < 0)
+               if ((err = parse_ipsecrequest(xp, pol, rq)) < 0)
                        return err;
                len -= rq->sadb_x_ipsecrequest_len;
                rq = (void*)((u8*)rq + rq->sadb_x_ipsecrequest_len);
index 7317e4a..86b2036 100644 (file)
@@ -1578,9 +1578,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
                sdata_dereference(link->u.ap.unsol_bcast_probe_resp,
                                  sdata);
 
-       /* abort any running channel switch */
+       /* abort any running channel switch or color change */
        mutex_lock(&local->mtx);
        link_conf->csa_active = false;
+       link_conf->color_change_active = false;
        if (link->csa_block_tx) {
                ieee80211_wake_vif_queues(local, sdata,
                                          IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -3589,7 +3590,7 @@ void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_t
 EXPORT_SYMBOL(ieee80211_channel_switch_disconnect);
 
 static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
-                                         u32 *changed)
+                                         u64 *changed)
 {
        int err;
 
@@ -3632,7 +3633,7 @@ static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
 static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
-       u32 changed = 0;
+       u64 changed = 0;
        int err;
 
        sdata_assert_lock(sdata);
index dbc34fb..77c90ed 100644 (file)
@@ -258,7 +258,8 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata,
 
 static enum nl80211_chan_width
 ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
-                                         struct ieee80211_chanctx_conf *conf)
+                                         struct ieee80211_chanctx *ctx,
+                                         struct ieee80211_link_data *rsvd_for)
 {
        enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
        struct ieee80211_vif *vif = &sdata->vif;
@@ -267,13 +268,14 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
        rcu_read_lock();
        for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
                enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT;
-               struct ieee80211_bss_conf *link_conf =
-                       rcu_dereference(sdata->vif.link_conf[link_id]);
+               struct ieee80211_link_data *link =
+                       rcu_dereference(sdata->link[link_id]);
 
-               if (!link_conf)
+               if (!link)
                        continue;
 
-               if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
+               if (link != rsvd_for &&
+                   rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf)
                        continue;
 
                switch (vif->type) {
@@ -287,7 +289,7 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
                         * point, so take the width from the chandef, but
                         * account also for TDLS peers
                         */
-                       width = max(link_conf->chandef.width,
+                       width = max(link->conf->chandef.width,
                                    ieee80211_get_max_required_bw(sdata, link_id));
                        break;
                case NL80211_IFTYPE_P2P_DEVICE:
@@ -296,7 +298,7 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
                case NL80211_IFTYPE_ADHOC:
                case NL80211_IFTYPE_MESH_POINT:
                case NL80211_IFTYPE_OCB:
-                       width = link_conf->chandef.width;
+                       width = link->conf->chandef.width;
                        break;
                case NL80211_IFTYPE_WDS:
                case NL80211_IFTYPE_UNSPECIFIED:
@@ -316,7 +318,8 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
 
 static enum nl80211_chan_width
 ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
-                                     struct ieee80211_chanctx_conf *conf)
+                                     struct ieee80211_chanctx *ctx,
+                                     struct ieee80211_link_data *rsvd_for)
 {
        struct ieee80211_sub_if_data *sdata;
        enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
@@ -328,7 +331,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
                if (!ieee80211_sdata_running(sdata))
                        continue;
 
-               width = ieee80211_get_chanctx_vif_max_required_bw(sdata, conf);
+               width = ieee80211_get_chanctx_vif_max_required_bw(sdata, ctx,
+                                                                 rsvd_for);
 
                max_bw = max(max_bw, width);
        }
@@ -336,8 +340,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
        /* use the configured bandwidth in case of monitor interface */
        sdata = rcu_dereference(local->monitor_sdata);
        if (sdata &&
-           rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == conf)
-               max_bw = max(max_bw, conf->def.width);
+           rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &ctx->conf)
+               max_bw = max(max_bw, ctx->conf.def.width);
 
        rcu_read_unlock();
 
@@ -349,8 +353,10 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
  * the max of min required widths of all the interfaces bound to this
  * channel context.
  */
-static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
-                                            struct ieee80211_chanctx *ctx)
+static u32
+_ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
+                                 struct ieee80211_chanctx *ctx,
+                                 struct ieee80211_link_data *rsvd_for)
 {
        enum nl80211_chan_width max_bw;
        struct cfg80211_chan_def min_def;
@@ -370,7 +376,7 @@ static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
                return 0;
        }
 
-       max_bw = ieee80211_get_chanctx_max_required_bw(local, &ctx->conf);
+       max_bw = ieee80211_get_chanctx_max_required_bw(local, ctx, rsvd_for);
 
        /* downgrade chandef up to max_bw */
        min_def = ctx->conf.def;
@@ -448,9 +454,10 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
  * channel context.
  */
 void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
-                                     struct ieee80211_chanctx *ctx)
+                                     struct ieee80211_chanctx *ctx,
+                                     struct ieee80211_link_data *rsvd_for)
 {
-       u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx);
+       u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
 
        if (!changed)
                return;
@@ -464,10 +471,11 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
        ieee80211_chan_bw_change(local, ctx, false);
 }
 
-static void ieee80211_change_chanctx(struct ieee80211_local *local,
-                                    struct ieee80211_chanctx *ctx,
-                                    struct ieee80211_chanctx *old_ctx,
-                                    const struct cfg80211_chan_def *chandef)
+static void _ieee80211_change_chanctx(struct ieee80211_local *local,
+                                     struct ieee80211_chanctx *ctx,
+                                     struct ieee80211_chanctx *old_ctx,
+                                     const struct cfg80211_chan_def *chandef,
+                                     struct ieee80211_link_data *rsvd_for)
 {
        u32 changed;
 
@@ -492,7 +500,7 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
        ieee80211_chan_bw_change(local, old_ctx, true);
 
        if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
-               ieee80211_recalc_chanctx_min_def(local, ctx);
+               ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
                return;
        }
 
@@ -502,7 +510,7 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
 
        /* check if min chanctx also changed */
        changed = IEEE80211_CHANCTX_CHANGE_WIDTH |
-                 _ieee80211_recalc_chanctx_min_def(local, ctx);
+                 _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
        drv_change_chanctx(local, ctx, changed);
 
        if (!local->use_chanctx) {
@@ -514,6 +522,14 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
        ieee80211_chan_bw_change(local, old_ctx, false);
 }
 
+static void ieee80211_change_chanctx(struct ieee80211_local *local,
+                                    struct ieee80211_chanctx *ctx,
+                                    struct ieee80211_chanctx *old_ctx,
+                                    const struct cfg80211_chan_def *chandef)
+{
+       _ieee80211_change_chanctx(local, ctx, old_ctx, chandef, NULL);
+}
+
 static struct ieee80211_chanctx *
 ieee80211_find_chanctx(struct ieee80211_local *local,
                       const struct cfg80211_chan_def *chandef,
@@ -638,7 +654,7 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local,
        ctx->conf.rx_chains_dynamic = 1;
        ctx->mode = mode;
        ctx->conf.radar_enabled = false;
-       ieee80211_recalc_chanctx_min_def(local, ctx);
+       _ieee80211_recalc_chanctx_min_def(local, ctx, NULL);
 
        return ctx;
 }
@@ -855,6 +871,9 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
        }
 
        if (new_ctx) {
+               /* recalc considering the link we'll use it for now */
+               ieee80211_recalc_chanctx_min_def(local, new_ctx, link);
+
                ret = drv_assign_vif_chanctx(local, sdata, link->conf, new_ctx);
                if (ret)
                        goto out;
@@ -873,12 +892,12 @@ out:
                ieee80211_recalc_chanctx_chantype(local, curr_ctx);
                ieee80211_recalc_smps_chanctx(local, curr_ctx);
                ieee80211_recalc_radar_chanctx(local, curr_ctx);
-               ieee80211_recalc_chanctx_min_def(local, curr_ctx);
+               ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL);
        }
 
        if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) {
                ieee80211_recalc_txpower(sdata, false);
-               ieee80211_recalc_chanctx_min_def(local, new_ctx);
+               ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL);
        }
 
        if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
@@ -1270,7 +1289,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
 
        ieee80211_link_update_chandef(link, &link->reserved_chandef);
 
-       ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef);
+       _ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef, link);
 
        vif_chsw[0].vif = &sdata->vif;
        vif_chsw[0].old_ctx = &old_ctx->conf;
@@ -1300,7 +1319,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
        if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
                ieee80211_free_chanctx(local, old_ctx);
 
-       ieee80211_recalc_chanctx_min_def(local, new_ctx);
+       ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL);
        ieee80211_recalc_smps_chanctx(local, new_ctx);
        ieee80211_recalc_radar_chanctx(local, new_ctx);
 
@@ -1665,7 +1684,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
                ieee80211_recalc_chanctx_chantype(local, ctx);
                ieee80211_recalc_smps_chanctx(local, ctx);
                ieee80211_recalc_radar_chanctx(local, ctx);
-               ieee80211_recalc_chanctx_min_def(local, ctx);
+               ieee80211_recalc_chanctx_min_def(local, ctx, NULL);
 
                list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links,
                                         reserved_chanctx_list) {
index a0a7839..b0372e7 100644 (file)
@@ -2537,7 +2537,8 @@ int ieee80211_chanctx_refcount(struct ieee80211_local *local,
 void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
                                   struct ieee80211_chanctx *chanctx);
 void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
-                                     struct ieee80211_chanctx *ctx);
+                                     struct ieee80211_chanctx *ctx,
+                                     struct ieee80211_link_data *rsvd_for);
 bool ieee80211_is_radar_required(struct ieee80211_local *local);
 
 void ieee80211_dfs_cac_timer(unsigned long data);
index de5d69f..db0d013 100644 (file)
@@ -67,7 +67,7 @@
                        __entry->min_freq_offset = (c)->chan ? (c)->chan->freq_offset : 0;      \
                        __entry->min_chan_width = (c)->width;                           \
                        __entry->min_center_freq1 = (c)->center_freq1;                  \
-                       __entry->freq1_offset = (c)->freq1_offset;                      \
+                       __entry->min_freq1_offset = (c)->freq1_offset;                  \
                        __entry->min_center_freq2 = (c)->center_freq2;
 #define MIN_CHANDEF_PR_FMT     " min_control:%d.%03d MHz min_width:%d min_center: %d.%03d/%d MHz"
 #define MIN_CHANDEF_PR_ARG     __entry->min_control_freq, __entry->min_freq_offset,    \
index 1a33274..0d9fbc8 100644 (file)
@@ -3791,6 +3791,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
        ieee80211_tx_result r;
        struct ieee80211_vif *vif = txq->vif;
        int q = vif->hw_queue[txq->ac];
+       unsigned long flags;
        bool q_stopped;
 
        WARN_ON_ONCE(softirq_count() == 0);
@@ -3799,9 +3800,9 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
                return NULL;
 
 begin:
-       spin_lock(&local->queue_stop_reason_lock);
+       spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
        q_stopped = local->queue_stop_reasons[q];
-       spin_unlock(&local->queue_stop_reason_lock);
+       spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
        if (unlikely(q_stopped)) {
                /* mark for waking later */
index 1527d6a..4bf7615 100644 (file)
@@ -3015,7 +3015,7 @@ void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata,
 
                chanctx = container_of(chanctx_conf, struct ieee80211_chanctx,
                                       conf);
-               ieee80211_recalc_chanctx_min_def(local, chanctx);
+               ieee80211_recalc_chanctx_min_def(local, chanctx, NULL);
        }
  unlock:
        mutex_unlock(&local->chanctx_mtx);
index d40544c..69c8c8c 100644 (file)
@@ -2976,7 +2976,9 @@ nla_put_failure:
        return -1;
 }
 
+#if IS_ENABLED(CONFIG_NF_NAT)
 static const union nf_inet_addr any_addr;
+#endif
 
 static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
 {
@@ -3460,10 +3462,12 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_NF_NAT)
 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
        [CTA_EXPECT_NAT_DIR]    = { .type = NLA_U32 },
        [CTA_EXPECT_NAT_TUPLE]  = { .type = NLA_NESTED },
 };
+#endif
 
 static int
 ctnetlink_parse_expect_nat(const struct nlattr *attr,
index 59fb832..dc56759 100644 (file)
@@ -3865,12 +3865,10 @@ static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
        struct nft_trans *trans;
 
        list_for_each_entry(trans, &nft_net->commit_list, list) {
-               struct nft_rule *rule = nft_trans_rule(trans);
-
                if (trans->msg_type == NFT_MSG_NEWRULE &&
                    trans->ctx.chain == chain &&
                    id == nft_trans_rule_id(trans))
-                       return rule;
+                       return nft_trans_rule(trans);
        }
        return ERR_PTR(-ENOENT);
 }
index 19ea4d3..2f114aa 100644 (file)
@@ -221,7 +221,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
 {
        struct nft_set *set = (struct nft_set *)__set;
        struct rb_node *prev = rb_prev(&rbe->node);
-       struct nft_rbtree_elem *rbe_prev;
+       struct nft_rbtree_elem *rbe_prev = NULL;
        struct nft_set_gc_batch *gcb;
 
        gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
@@ -229,17 +229,21 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
                return -ENOMEM;
 
        /* search for expired end interval coming before this element. */
-       do {
+       while (prev) {
                rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
                if (nft_rbtree_interval_end(rbe_prev))
                        break;
 
                prev = rb_prev(prev);
-       } while (prev != NULL);
+       }
+
+       if (rbe_prev) {
+               rb_erase(&rbe_prev->node, &priv->root);
+               atomic_dec(&set->nelems);
+       }
 
-       rb_erase(&rbe_prev->node, &priv->root);
        rb_erase(&rbe->node, &priv->root);
-       atomic_sub(2, &set->nelems);
+       atomic_dec(&set->nelems);
 
        nft_set_gc_batch_add(gcb, rbe);
        nft_set_gc_batch_complete(gcb);
@@ -268,7 +272,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                               struct nft_set_ext **ext)
 {
        struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
-       struct rb_node *node, *parent, **p, *first = NULL;
+       struct rb_node *node, *next, *parent, **p, *first = NULL;
        struct nft_rbtree *priv = nft_set_priv(set);
        u8 genmask = nft_genmask_next(net);
        int d, err;
@@ -307,7 +311,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
         * Values stored in the tree are in reversed order, starting from
         * highest to lowest value.
         */
-       for (node = first; node != NULL; node = rb_next(node)) {
+       for (node = first; node != NULL; node = next) {
+               next = rb_next(node);
+
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
 
                if (!nft_set_elem_active(&rbe->ext, genmask))
index e9ca007..0f23e5e 100644 (file)
@@ -77,13 +77,12 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
                                       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
+       u16 mac_offset = skb->mac_header;
        unsigned int nsh_len, mac_len;
        __be16 proto;
-       int nhoff;
 
        skb_reset_network_header(skb);
 
-       nhoff = skb->network_header - skb->mac_header;
        mac_len = skb->mac_len;
 
        if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
@@ -108,15 +107,14 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
        segs = skb_mac_gso_segment(skb, features);
        if (IS_ERR_OR_NULL(segs)) {
                skb_gso_error_unwind(skb, htons(ETH_P_NSH), nsh_len,
-                                    skb->network_header - nhoff,
-                                    mac_len);
+                                    mac_offset, mac_len);
                goto out;
        }
 
        for (skb = segs; skb; skb = skb->next) {
                skb->protocol = htons(ETH_P_NSH);
                __skb_push(skb, nsh_len);
-               skb_set_mac_header(skb, -nhoff);
+               skb->mac_header = mac_offset;
                skb->network_header = skb->mac_header + mac_len;
                skb->mac_len = mac_len;
        }
index 2f66a20..2abe45a 100644 (file)
@@ -324,9 +324,12 @@ bool sctp_transport_pl_recv(struct sctp_transport *t)
                t->pl.probe_size += SCTP_PL_BIG_STEP;
        } else if (t->pl.state == SCTP_PL_SEARCH) {
                if (!t->pl.probe_high) {
-                       t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
-                                              SCTP_MAX_PLPMTU);
-                       return false;
+                       if (t->pl.probe_size < SCTP_MAX_PLPMTU) {
+                               t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
+                                                      SCTP_MAX_PLPMTU);
+                               return false;
+                       }
+                       t->pl.probe_high = SCTP_MAX_PLPMTU;
                }
                t->pl.probe_size += SCTP_PL_MIN_STEP;
                if (t->pl.probe_size >= t->pl.probe_high) {
@@ -341,7 +344,7 @@ bool sctp_transport_pl_recv(struct sctp_transport *t)
        } else if (t->pl.state == SCTP_PL_COMPLETE) {
                /* Raise probe_size again after 30 * interval in Search Complete */
                t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
-               t->pl.probe_size += SCTP_PL_MIN_STEP;
+               t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_MIN_STEP, SCTP_MAX_PLPMTU);
        }
 
        return t->pl.state == SCTP_PL_COMPLETE;
index 50c38b6..538e9c6 100644 (file)
@@ -2000,8 +2000,10 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
                return rc;
 
        /* create send buffer and rmb */
-       if (smc_buf_create(new_smc, false))
+       if (smc_buf_create(new_smc, false)) {
+               smc_conn_abort(new_smc, ini->first_contact_local);
                return SMC_CLC_DECL_MEM;
+       }
 
        return 0;
 }
@@ -2217,8 +2219,11 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
        smcr_version = ini->smcr_version;
        ini->smcr_version = SMC_V2;
        rc = smc_listen_rdma_init(new_smc, ini);
-       if (!rc)
+       if (!rc) {
                rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
+               if (rc)
+                       smc_conn_abort(new_smc, ini->first_contact_local);
+       }
        if (!rc)
                return;
        ini->smcr_version = smcr_version;
index 4543567..3f465fa 100644 (file)
@@ -127,6 +127,7 @@ static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
        int i, j;
 
        /* do link balancing */
+       conn->lnk = NULL;       /* reset conn->lnk first */
        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
                struct smc_link *lnk = &conn->lgr->lnk[i];
 
index 212c5d5..9734e1d 100644 (file)
@@ -639,6 +639,16 @@ gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf,
 
        ret = write_bytes_to_xdr_buf(buf, offset, data, len);
 
+#if IS_ENABLED(CONFIG_KUNIT)
+       /*
+        * CBC-CTS does not define an output IV but RFC 3962 defines it as the
+        * penultimate block of ciphertext, so copy that into the IV buffer
+        * before returning.
+        */
+       if (encrypt)
+               memcpy(iv, data, crypto_sync_skcipher_ivsize(cipher));
+#endif
+
 out:
        kfree(data);
        return ret;
index c8321de..6debf4f 100644 (file)
@@ -927,11 +927,10 @@ static void __rpc_execute(struct rpc_task *task)
                 */
                do_action = task->tk_action;
                /* Tasks with an RPC error status should exit */
-               if (do_action != rpc_exit_task &&
+               if (do_action && do_action != rpc_exit_task &&
                    (status = READ_ONCE(task->tk_rpc_status)) != 0) {
                        task->tk_status = status;
-                       if (do_action != NULL)
-                               do_action = rpc_exit_task;
+                       do_action = rpc_exit_task;
                }
                /* Callbacks override all actions */
                if (task->tk_callback) {
index 26367cf..79967b6 100644 (file)
@@ -1052,7 +1052,7 @@ static int __svc_register(struct net *net, const char *progname,
 #endif
        }
 
-       trace_svc_register(progname, version, protocol, port, family, error);
+       trace_svc_register(progname, version, family, protocol, port, error);
        return error;
 }
 
@@ -1416,7 +1416,7 @@ err_bad_rpc:
        /* Only RPCv2 supported */
        xdr_stream_encode_u32(xdr, RPC_VERSION);
        xdr_stream_encode_u32(xdr, RPC_VERSION);
-       goto sendit;
+       return 1;       /* don't wrap */
 
 err_bad_auth:
        dprintk("svc: authentication failed (%d)\n",
@@ -1432,7 +1432,7 @@ err_bad_auth:
 err_bad_prog:
        dprintk("svc: unknown program %d\n", rqstp->rq_prog);
        serv->sv_stats->rpcbadfmt++;
-       xdr_stream_encode_u32(xdr, RPC_PROG_UNAVAIL);
+       *rqstp->rq_accept_statp = rpc_prog_unavail;
        goto sendit;
 
 err_bad_vers:
@@ -1440,7 +1440,12 @@ err_bad_vers:
                       rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
 
        serv->sv_stats->rpcbadfmt++;
-       xdr_stream_encode_u32(xdr, RPC_PROG_MISMATCH);
+       *rqstp->rq_accept_statp = rpc_prog_mismatch;
+
+       /*
+        * svc_authenticate() has already added the verifier and
+        * advanced the stream just past rq_accept_statp.
+        */
        xdr_stream_encode_u32(xdr, process.mismatch.lovers);
        xdr_stream_encode_u32(xdr, process.mismatch.hivers);
        goto sendit;
@@ -1449,19 +1454,19 @@ err_bad_proc:
        svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
 
        serv->sv_stats->rpcbadfmt++;
-       xdr_stream_encode_u32(xdr, RPC_PROC_UNAVAIL);
+       *rqstp->rq_accept_statp = rpc_proc_unavail;
        goto sendit;
 
 err_garbage_args:
        svc_printk(rqstp, "failed to decode RPC header\n");
 
        serv->sv_stats->rpcbadfmt++;
-       xdr_stream_encode_u32(xdr, RPC_GARBAGE_ARGS);
+       *rqstp->rq_accept_statp = rpc_garbage_args;
        goto sendit;
 
 err_system_err:
        serv->sv_stats->rpcbadfmt++;
-       xdr_stream_encode_u32(xdr, RPC_SYSTEM_ERR);
+       *rqstp->rq_accept_statp = rpc_system_err;
        goto sendit;
 }
 
index 84e5d7d..13a1489 100644 (file)
@@ -532,13 +532,23 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
 }
 EXPORT_SYMBOL_GPL(svc_reserve);
 
+static void free_deferred(struct svc_xprt *xprt, struct svc_deferred_req *dr)
+{
+       if (!dr)
+               return;
+
+       xprt->xpt_ops->xpo_release_ctxt(xprt, dr->xprt_ctxt);
+       kfree(dr);
+}
+
 static void svc_xprt_release(struct svc_rqst *rqstp)
 {
        struct svc_xprt *xprt = rqstp->rq_xprt;
 
-       xprt->xpt_ops->xpo_release_rqst(rqstp);
+       xprt->xpt_ops->xpo_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
+       rqstp->rq_xprt_ctxt = NULL;
 
-       kfree(rqstp->rq_deferred);
+       free_deferred(xprt, rqstp->rq_deferred);
        rqstp->rq_deferred = NULL;
 
        svc_rqst_release_pages(rqstp);
@@ -1054,7 +1064,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
        spin_unlock_bh(&serv->sv_lock);
 
        while ((dr = svc_deferred_dequeue(xprt)) != NULL)
-               kfree(dr);
+               free_deferred(xprt, dr);
 
        call_xpt_users(xprt);
        svc_xprt_put(xprt);
@@ -1176,8 +1186,8 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
        if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
                spin_unlock(&xprt->xpt_lock);
                trace_svc_defer_drop(dr);
+               free_deferred(xprt, dr);
                svc_xprt_put(xprt);
-               kfree(dr);
                return;
        }
        dr->xprt = NULL;
@@ -1222,14 +1232,14 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
                dr->addrlen = rqstp->rq_addrlen;
                dr->daddr = rqstp->rq_daddr;
                dr->argslen = rqstp->rq_arg.len >> 2;
-               dr->xprt_ctxt = rqstp->rq_xprt_ctxt;
-               rqstp->rq_xprt_ctxt = NULL;
 
                /* back up head to the start of the buffer and copy */
                skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
                memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
                       dr->argslen << 2);
        }
+       dr->xprt_ctxt = rqstp->rq_xprt_ctxt;
+       rqstp->rq_xprt_ctxt = NULL;
        trace_svc_defer(rqstp);
        svc_xprt_get(rqstp->rq_xprt);
        dr->xprt = rqstp->rq_xprt;
@@ -1262,6 +1272,8 @@ static noinline int svc_deferred_recv(struct svc_rqst *rqstp)
        rqstp->rq_daddr       = dr->daddr;
        rqstp->rq_respages    = rqstp->rq_pages;
        rqstp->rq_xprt_ctxt   = dr->xprt_ctxt;
+
+       dr->xprt_ctxt = NULL;
        svc_xprt_received(rqstp->rq_xprt);
        return dr->argslen << 2;
 }
index a51c9b9..63fe7a3 100644 (file)
@@ -121,27 +121,27 @@ static void svc_reclassify_socket(struct socket *sock)
 #endif
 
 /**
- * svc_tcp_release_rqst - Release transport-related resources
- * @rqstp: request structure with resources to be released
+ * svc_tcp_release_ctxt - Release transport-related resources
+ * @xprt: the transport which owned the context
+ * @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
  *
  */
-static void svc_tcp_release_rqst(struct svc_rqst *rqstp)
+static void svc_tcp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
 {
 }
 
 /**
- * svc_udp_release_rqst - Release transport-related resources
- * @rqstp: request structure with resources to be released
+ * svc_udp_release_ctxt - Release transport-related resources
+ * @xprt: the transport which owned the context
+ * @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
  *
  */
-static void svc_udp_release_rqst(struct svc_rqst *rqstp)
+static void svc_udp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
 {
-       struct sk_buff *skb = rqstp->rq_xprt_ctxt;
+       struct sk_buff *skb = ctxt;
 
-       if (skb) {
-               rqstp->rq_xprt_ctxt = NULL;
+       if (skb)
                consume_skb(skb);
-       }
 }
 
 union svc_pktinfo_u {
@@ -696,7 +696,8 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
        unsigned int sent;
        int err;
 
-       svc_udp_release_rqst(rqstp);
+       svc_udp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
+       rqstp->rq_xprt_ctxt = NULL;
 
        svc_set_cmsg_data(rqstp, cmh);
 
@@ -768,7 +769,7 @@ static const struct svc_xprt_ops svc_udp_ops = {
        .xpo_recvfrom = svc_udp_recvfrom,
        .xpo_sendto = svc_udp_sendto,
        .xpo_result_payload = svc_sock_result_payload,
-       .xpo_release_rqst = svc_udp_release_rqst,
+       .xpo_release_ctxt = svc_udp_release_ctxt,
        .xpo_detach = svc_sock_detach,
        .xpo_free = svc_sock_free,
        .xpo_has_wspace = svc_udp_has_wspace,
@@ -895,6 +896,9 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
                trace_svcsock_accept_err(xprt, serv->sv_name, err);
                return NULL;
        }
+       if (IS_ERR(sock_alloc_file(newsock, O_NONBLOCK, NULL)))
+               return NULL;
+
        set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
 
        err = kernel_getpeername(newsock, sin);
@@ -935,7 +939,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
        return &newsvsk->sk_xprt;
 
 failed:
-       sock_release(newsock);
+       sockfd_put(newsock);
        return NULL;
 }
 
@@ -1298,7 +1302,8 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
        unsigned int sent;
        int err;
 
-       svc_tcp_release_rqst(rqstp);
+       svc_tcp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
+       rqstp->rq_xprt_ctxt = NULL;
 
        atomic_inc(&svsk->sk_sendqlen);
        mutex_lock(&xprt->xpt_mutex);
@@ -1343,7 +1348,7 @@ static const struct svc_xprt_ops svc_tcp_ops = {
        .xpo_recvfrom = svc_tcp_recvfrom,
        .xpo_sendto = svc_tcp_sendto,
        .xpo_result_payload = svc_sock_result_payload,
-       .xpo_release_rqst = svc_tcp_release_rqst,
+       .xpo_release_ctxt = svc_tcp_release_ctxt,
        .xpo_detach = svc_tcp_sock_detach,
        .xpo_free = svc_sock_free,
        .xpo_has_wspace = svc_tcp_has_wspace,
@@ -1430,7 +1435,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
                                                struct socket *sock,
                                                int flags)
 {
-       struct file     *filp = NULL;
        struct svc_sock *svsk;
        struct sock     *inet;
        int             pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
@@ -1439,14 +1443,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
        if (!svsk)
                return ERR_PTR(-ENOMEM);
 
-       if (!sock->file) {
-               filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
-               if (IS_ERR(filp)) {
-                       kfree(svsk);
-                       return ERR_CAST(filp);
-               }
-       }
-
        inet = sock->sk;
 
        if (pmap_register) {
@@ -1456,8 +1452,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
                                     inet->sk_protocol,
                                     ntohs(inet_sk(inet)->inet_sport));
                if (err < 0) {
-                       if (filp)
-                               fput(filp);
                        kfree(svsk);
                        return ERR_PTR(err);
                }
index 1c658fa..a22fe75 100644 (file)
@@ -239,21 +239,20 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
 }
 
 /**
- * svc_rdma_release_rqst - Release transport-specific per-rqst resources
- * @rqstp: svc_rqst being released
+ * svc_rdma_release_ctxt - Release transport-specific per-rqst resources
+ * @xprt: the transport which owned the context
+ * @vctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
  *
  * Ensure that the recv_ctxt is released whether or not a Reply
  * was sent. For example, the client could close the connection,
  * or svc_process could drop an RPC, before the Reply is sent.
  */
-void svc_rdma_release_rqst(struct svc_rqst *rqstp)
+void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt)
 {
-       struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt;
-       struct svc_xprt *xprt = rqstp->rq_xprt;
+       struct svc_rdma_recv_ctxt *ctxt = vctxt;
        struct svcxprt_rdma *rdma =
                container_of(xprt, struct svcxprt_rdma, sc_xprt);
 
-       rqstp->rq_xprt_ctxt = NULL;
        if (ctxt)
                svc_rdma_recv_ctxt_put(rdma, ctxt);
 }
index 416b298..ca04f7a 100644 (file)
@@ -80,7 +80,7 @@ static const struct svc_xprt_ops svc_rdma_ops = {
        .xpo_recvfrom = svc_rdma_recvfrom,
        .xpo_sendto = svc_rdma_sendto,
        .xpo_result_payload = svc_rdma_result_payload,
-       .xpo_release_rqst = svc_rdma_release_rqst,
+       .xpo_release_ctxt = svc_rdma_release_ctxt,
        .xpo_detach = svc_rdma_detach,
        .xpo_free = svc_rdma_free,
        .xpo_has_wspace = svc_rdma_has_wspace,
index 35cac77..5388140 100644 (file)
@@ -541,6 +541,19 @@ int tipc_bearer_mtu(struct net *net, u32 bearer_id)
        return mtu;
 }
 
+int tipc_bearer_min_mtu(struct net *net, u32 bearer_id)
+{
+       int mtu = TIPC_MIN_BEARER_MTU;
+       struct tipc_bearer *b;
+
+       rcu_read_lock();
+       b = bearer_get(net, bearer_id);
+       if (b)
+               mtu += b->encap_hlen;
+       rcu_read_unlock();
+       return mtu;
+}
+
 /* tipc_bearer_xmit_skb - sends buffer to destination over bearer
  */
 void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
@@ -1138,8 +1151,8 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
                                return -EINVAL;
                        }
 #ifdef CONFIG_TIPC_MEDIA_UDP
-                       if (tipc_udp_mtu_bad(nla_get_u32
-                                            (props[TIPC_NLA_PROP_MTU]))) {
+                       if (nla_get_u32(props[TIPC_NLA_PROP_MTU]) <
+                           b->encap_hlen + TIPC_MIN_BEARER_MTU) {
                                NL_SET_ERR_MSG(info->extack,
                                               "MTU value is out-of-range");
                                return -EINVAL;
index 490ad6e..bd0cc5c 100644 (file)
@@ -146,6 +146,7 @@ struct tipc_media {
  * @identity: array index of this bearer within TIPC bearer array
  * @disc: ptr to link setup request
  * @net_plane: network plane ('A' through 'H') currently associated with bearer
+ * @encap_hlen: encap headers length
  * @up: bearer up flag (bit 0)
  * @refcnt: tipc_bearer reference counter
  *
@@ -170,6 +171,7 @@ struct tipc_bearer {
        u32 identity;
        struct tipc_discoverer *disc;
        char net_plane;
+       u16 encap_hlen;
        unsigned long up;
        refcount_t refcnt;
 };
@@ -232,6 +234,7 @@ int tipc_bearer_setup(void);
 void tipc_bearer_cleanup(void);
 void tipc_bearer_stop(struct net *net);
 int tipc_bearer_mtu(struct net *net, u32 bearer_id);
+int tipc_bearer_min_mtu(struct net *net, u32 bearer_id);
 bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id);
 void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
                          struct sk_buff *skb,
index b3ce248..2eff1c7 100644 (file)
@@ -2200,7 +2200,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
        struct tipc_msg *hdr = buf_msg(skb);
        struct tipc_gap_ack_blks *ga = NULL;
        bool reply = msg_probe(hdr), retransmitted = false;
-       u32 dlen = msg_data_sz(hdr), glen = 0;
+       u32 dlen = msg_data_sz(hdr), glen = 0, msg_max;
        u16 peers_snd_nxt =  msg_next_sent(hdr);
        u16 peers_tol = msg_link_tolerance(hdr);
        u16 peers_prio = msg_linkprio(hdr);
@@ -2239,6 +2239,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
        switch (mtyp) {
        case RESET_MSG:
        case ACTIVATE_MSG:
+               msg_max = msg_max_pkt(hdr);
+               if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id))
+                       break;
                /* Complete own link name with peer's interface name */
                if_name =  strrchr(l->name, ':') + 1;
                if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
@@ -2283,8 +2286,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                l->peer_session = msg_session(hdr);
                l->in_session = true;
                l->peer_bearer_id = msg_bearer_id(hdr);
-               if (l->mtu > msg_max_pkt(hdr))
-                       l->mtu = msg_max_pkt(hdr);
+               if (l->mtu > msg_max)
+                       l->mtu = msg_max;
                break;
 
        case STATE_MSG:
index c2bb818..0a85244 100644 (file)
@@ -738,8 +738,8 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
                        udp_conf.local_ip.s_addr = local.ipv4.s_addr;
                udp_conf.use_udp_checksums = false;
                ub->ifindex = dev->ifindex;
-               if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
-                                     sizeof(struct udphdr))) {
+               b->encap_hlen = sizeof(struct iphdr) + sizeof(struct udphdr);
+               if (tipc_mtu_bad(dev, b->encap_hlen)) {
                        err = -EINVAL;
                        goto err;
                }
@@ -760,6 +760,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
                else
                        udp_conf.local_ip6 = local.ipv6;
                ub->ifindex = dev->ifindex;
+               b->encap_hlen = sizeof(struct ipv6hdr) + sizeof(struct udphdr);
                b->mtu = 1280;
 #endif
        } else {
index 804c388..0672aca 100644 (file)
@@ -167,6 +167,11 @@ static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx)
        return ctx->strp.msg_ready;
 }
 
+static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx)
+{
+       return ctx->strp.mixed_decrypted;
+}
+
 #ifdef CONFIG_TLS_DEVICE
 int tls_device_init(void);
 void tls_device_cleanup(void);
index a7cc4f9..bf69c9d 100644 (file)
@@ -1007,20 +1007,14 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
        struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
        struct sk_buff *skb = tls_strp_msg(sw_ctx);
        struct strp_msg *rxm = strp_msg(skb);
-       int is_decrypted = skb->decrypted;
-       int is_encrypted = !is_decrypted;
-       struct sk_buff *skb_iter;
-       int left;
-
-       left = rxm->full_len - skb->len;
-       /* Check if all the data is decrypted already */
-       skb_iter = skb_shinfo(skb)->frag_list;
-       while (skb_iter && left > 0) {
-               is_decrypted &= skb_iter->decrypted;
-               is_encrypted &= !skb_iter->decrypted;
-
-               left -= skb_iter->len;
-               skb_iter = skb_iter->next;
+       int is_decrypted, is_encrypted;
+
+       if (!tls_strp_msg_mixed_decrypted(sw_ctx)) {
+               is_decrypted = skb->decrypted;
+               is_encrypted = !is_decrypted;
+       } else {
+               is_decrypted = 0;
+               is_encrypted = 0;
        }
 
        trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
index 955ac3e..da95abb 100644 (file)
@@ -29,34 +29,50 @@ static void tls_strp_anchor_free(struct tls_strparser *strp)
        struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
 
        DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
-       shinfo->frag_list = NULL;
+       if (!strp->copy_mode)
+               shinfo->frag_list = NULL;
        consume_skb(strp->anchor);
        strp->anchor = NULL;
 }
 
-/* Create a new skb with the contents of input copied to its page frags */
-static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
+static struct sk_buff *
+tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb,
+                 int offset, int len)
 {
-       struct strp_msg *rxm;
        struct sk_buff *skb;
-       int i, err, offset;
+       int i, err;
 
-       skb = alloc_skb_with_frags(0, strp->stm.full_len, TLS_PAGE_ORDER,
+       skb = alloc_skb_with_frags(0, len, TLS_PAGE_ORDER,
                                   &err, strp->sk->sk_allocation);
        if (!skb)
                return NULL;
 
-       offset = strp->stm.offset;
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-               WARN_ON_ONCE(skb_copy_bits(strp->anchor, offset,
+               WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
                                           skb_frag_address(frag),
                                           skb_frag_size(frag)));
                offset += skb_frag_size(frag);
        }
 
-       skb_copy_header(skb, strp->anchor);
+       skb->len = len;
+       skb->data_len = len;
+       skb_copy_header(skb, in_skb);
+       return skb;
+}
+
+/* Create a new skb with the contents of input copied to its page frags */
+static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
+{
+       struct strp_msg *rxm;
+       struct sk_buff *skb;
+
+       skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset,
+                               strp->stm.full_len);
+       if (!skb)
+               return NULL;
+
        rxm = strp_msg(skb);
        rxm->offset = 0;
        return skb;
@@ -180,22 +196,22 @@ static void tls_strp_flush_anchor_copy(struct tls_strparser *strp)
        for (i = 0; i < shinfo->nr_frags; i++)
                __skb_frag_unref(&shinfo->frags[i], false);
        shinfo->nr_frags = 0;
+       if (strp->copy_mode) {
+               kfree_skb_list(shinfo->frag_list);
+               shinfo->frag_list = NULL;
+       }
        strp->copy_mode = 0;
+       strp->mixed_decrypted = 0;
 }
 
-static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
-                          unsigned int offset, size_t in_len)
+static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
+                               struct sk_buff *in_skb, unsigned int offset,
+                               size_t in_len)
 {
-       struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
-       struct sk_buff *skb;
-       skb_frag_t *frag;
        size_t len, chunk;
+       skb_frag_t *frag;
        int sz;
 
-       if (strp->msg_ready)
-               return 0;
-
-       skb = strp->anchor;
        frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
 
        len = in_len;
@@ -208,19 +224,26 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
                                           skb_frag_size(frag),
                                           chunk));
 
-               sz = tls_rx_msg_size(strp, strp->anchor);
-               if (sz < 0) {
-                       desc->error = sz;
-                       return 0;
-               }
-
-               /* We may have over-read, sz == 0 is guaranteed under-read */
-               if (sz > 0)
-                       chunk = min_t(size_t, chunk, sz - skb->len);
-
                skb->len += chunk;
                skb->data_len += chunk;
                skb_frag_size_add(frag, chunk);
+
+               sz = tls_rx_msg_size(strp, skb);
+               if (sz < 0)
+                       return sz;
+
+               /* We may have over-read, sz == 0 is guaranteed under-read */
+               if (unlikely(sz && sz < skb->len)) {
+                       int over = skb->len - sz;
+
+                       WARN_ON_ONCE(over > chunk);
+                       skb->len -= over;
+                       skb->data_len -= over;
+                       skb_frag_size_add(frag, -over);
+
+                       chunk -= over;
+               }
+
                frag++;
                len -= chunk;
                offset += chunk;
@@ -247,15 +270,99 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
                offset += chunk;
        }
 
-       if (strp->stm.full_len == skb->len) {
+read_done:
+       return in_len - len;
+}
+
+static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb,
+                              struct sk_buff *in_skb, unsigned int offset,
+                              size_t in_len)
+{
+       struct sk_buff *nskb, *first, *last;
+       struct skb_shared_info *shinfo;
+       size_t chunk;
+       int sz;
+
+       if (strp->stm.full_len)
+               chunk = strp->stm.full_len - skb->len;
+       else
+               chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
+       chunk = min(chunk, in_len);
+
+       nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk);
+       if (!nskb)
+               return -ENOMEM;
+
+       shinfo = skb_shinfo(skb);
+       if (!shinfo->frag_list) {
+               shinfo->frag_list = nskb;
+               nskb->prev = nskb;
+       } else {
+               first = shinfo->frag_list;
+               last = first->prev;
+               last->next = nskb;
+               first->prev = nskb;
+       }
+
+       skb->len += chunk;
+       skb->data_len += chunk;
+
+       if (!strp->stm.full_len) {
+               sz = tls_rx_msg_size(strp, skb);
+               if (sz < 0)
+                       return sz;
+
+               /* We may have over-read, sz == 0 is guaranteed under-read */
+               if (unlikely(sz && sz < skb->len)) {
+                       int over = skb->len - sz;
+
+                       WARN_ON_ONCE(over > chunk);
+                       skb->len -= over;
+                       skb->data_len -= over;
+                       __pskb_trim(nskb, nskb->len - over);
+
+                       chunk -= over;
+               }
+
+               strp->stm.full_len = sz;
+       }
+
+       return chunk;
+}
+
+static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
+                          unsigned int offset, size_t in_len)
+{
+       struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
+       struct sk_buff *skb;
+       int ret;
+
+       if (strp->msg_ready)
+               return 0;
+
+       skb = strp->anchor;
+       if (!skb->len)
+               skb_copy_decrypted(skb, in_skb);
+       else
+               strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb);
+
+       if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted)
+               ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len);
+       else
+               ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len);
+       if (ret < 0) {
+               desc->error = ret;
+               ret = 0;
+       }
+
+       if (strp->stm.full_len && strp->stm.full_len == skb->len) {
                desc->count = 0;
 
                strp->msg_ready = 1;
                tls_rx_msg_ready(strp);
        }
 
-read_done:
-       return in_len - len;
+       return ret;
 }
 
 static int tls_strp_read_copyin(struct tls_strparser *strp)
@@ -315,15 +422,19 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
        return 0;
 }
 
-static bool tls_strp_check_no_dup(struct tls_strparser *strp)
+static bool tls_strp_check_queue_ok(struct tls_strparser *strp)
 {
        unsigned int len = strp->stm.offset + strp->stm.full_len;
-       struct sk_buff *skb;
+       struct sk_buff *first, *skb;
        u32 seq;
 
-       skb = skb_shinfo(strp->anchor)->frag_list;
-       seq = TCP_SKB_CB(skb)->seq;
+       first = skb_shinfo(strp->anchor)->frag_list;
+       skb = first;
+       seq = TCP_SKB_CB(first)->seq;
 
+       /* Make sure there's no duplicate data in the queue,
+        * and the decrypted status matches.
+        */
        while (skb->len < len) {
                seq += skb->len;
                len -= skb->len;
@@ -331,6 +442,8 @@ static bool tls_strp_check_no_dup(struct tls_strparser *strp)
 
                if (TCP_SKB_CB(skb)->seq != seq)
                        return false;
+               if (skb_cmp_decrypted(first, skb))
+                       return false;
        }
 
        return true;
@@ -411,7 +524,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
                        return tls_strp_read_copy(strp, true);
        }
 
-       if (!tls_strp_check_no_dup(strp))
+       if (!tls_strp_check_queue_ok(strp))
                return tls_strp_read_copy(strp, false);
 
        strp->msg_ready = 1;
index 635b8bf..6e6a7c3 100644 (file)
@@ -2304,10 +2304,14 @@ static void tls_data_ready(struct sock *sk)
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
        struct sk_psock *psock;
+       gfp_t alloc_save;
 
        trace_sk_data_ready(sk);
 
+       alloc_save = sk->sk_allocation;
+       sk->sk_allocation = GFP_ATOMIC;
        tls_strp_data_ready(&ctx->strp);
+       sk->sk_allocation = alloc_save;
 
        psock = sk_psock_get(sk);
        if (psock) {
index cc695c9..e7728b5 100644 (file)
@@ -2553,7 +2553,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
 {
        struct unix_sock *u = unix_sk(sk);
        struct sk_buff *skb;
-       int err, copied;
+       int err;
 
        mutex_lock(&u->iolock);
        skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
@@ -2561,10 +2561,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
        if (!skb)
                return err;
 
-       copied = recv_actor(sk, skb);
-       kfree_skb(skb);
-
-       return copied;
+       return recv_actor(sk, skb);
 }
 
 /*
index 413407b..efb8a09 100644 (file)
@@ -1462,7 +1462,7 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
                        vsock_transport_cancel_pkt(vsk);
                        vsock_remove_connected(vsk);
                        goto out_wait;
-               } else if (timeout == 0) {
+               } else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
                        err = -ETIMEDOUT;
                        sk->sk_state = TCP_CLOSE;
                        sock->state = SS_UNCONNECTED;
index e487855..b769fc2 100644 (file)
@@ -1441,7 +1441,6 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
        struct sock *sk = sk_vsock(vsk);
        struct sk_buff *skb;
        int off = 0;
-       int copied;
        int err;
 
        spin_lock_bh(&vvs->rx_lock);
@@ -1454,9 +1453,7 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
        if (!skb)
                return err;
 
-       copied = recv_actor(sk, skb);
-       kfree_skb(skb);
-       return copied;
+       return recv_actor(sk, skb);
 }
 EXPORT_SYMBOL_GPL(virtio_transport_read_skb);
 
index a138225..c501db7 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2016      Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
  */
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -540,6 +540,10 @@ static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry,
        /* skip the TBTT offset */
        pos++;
 
+       /* ignore entries with invalid BSSID */
+       if (!is_valid_ether_addr(pos))
+               return -EINVAL;
+
        memcpy(entry->bssid, pos, ETH_ALEN);
        pos += ETH_ALEN;
 
index bef28c6..408f5e5 100644 (file)
@@ -378,7 +378,7 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
                break;
        default:
                xdo->dev = NULL;
-               dev_put(dev);
+               netdev_put(dev, &xdo->dev_tracker);
                NL_SET_ERR_MSG(extack, "Unrecognized offload direction");
                return -EINVAL;
        }
index 35279c2..1f99dc4 100644 (file)
@@ -310,52 +310,6 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
        skb->mark = 0;
 }
 
-static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi,
-                      int encap_type, unsigned short family)
-{
-       struct sec_path *sp;
-
-       sp = skb_sec_path(skb);
-       if (sp && (sp->len || sp->olen) &&
-           !xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
-               goto discard;
-
-       XFRM_SPI_SKB_CB(skb)->family = family;
-       if (family == AF_INET) {
-               XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
-               XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
-       } else {
-               XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
-               XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
-       }
-
-       return xfrm_input(skb, nexthdr, spi, encap_type);
-discard:
-       kfree_skb(skb);
-       return 0;
-}
-
-static int xfrmi4_rcv(struct sk_buff *skb)
-{
-       return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET);
-}
-
-static int xfrmi6_rcv(struct sk_buff *skb)
-{
-       return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
-                          0, 0, AF_INET6);
-}
-
-static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
-{
-       return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET);
-}
-
-static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
-{
-       return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6);
-}
-
 static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
 {
        const struct xfrm_mode *inner_mode;
@@ -991,8 +945,8 @@ static struct pernet_operations xfrmi_net_ops = {
 };
 
 static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
-       .handler        =       xfrmi6_rcv,
-       .input_handler  =       xfrmi6_input,
+       .handler        =       xfrm6_rcv,
+       .input_handler  =       xfrm_input,
        .cb_handler     =       xfrmi_rcv_cb,
        .err_handler    =       xfrmi6_err,
        .priority       =       10,
@@ -1042,8 +996,8 @@ static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
 #endif
 
 static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
-       .handler        =       xfrmi4_rcv,
-       .input_handler  =       xfrmi4_input,
+       .handler        =       xfrm4_rcv,
+       .input_handler  =       xfrm_input,
        .cb_handler     =       xfrmi_rcv_cb,
        .err_handler    =       xfrmi4_err,
        .priority       =       10,
index 5c61ec0..6d15788 100644 (file)
@@ -3312,7 +3312,7 @@ xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
 
 static inline int
 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
-             unsigned short family)
+             unsigned short family, u32 if_id)
 {
        if (xfrm_state_kern(x))
                return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
@@ -3323,7 +3323,8 @@ xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
                (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
                 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
                !(x->props.mode != XFRM_MODE_TRANSPORT &&
-                 xfrm_state_addr_cmp(tmpl, x, family));
+                 xfrm_state_addr_cmp(tmpl, x, family)) &&
+               (if_id == 0 || if_id == x->if_id);
 }
 
 /*
@@ -3335,7 +3336,7 @@ xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
  */
 static inline int
 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
-              unsigned short family)
+              unsigned short family, u32 if_id)
 {
        int idx = start;
 
@@ -3345,7 +3346,7 @@ xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int star
        } else
                start = -1;
        for (; idx < sp->len; idx++) {
-               if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
+               if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
                        return ++idx;
                if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
                        if (start == -1)
@@ -3712,12 +3713,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                }
                xfrm_nr = ti;
 
-               if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK &&
-                   !xfrm_nr) {
-                       XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
-                       goto reject;
-               }
-
                if (npols > 1) {
                        xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
                        tpp = stp;
@@ -3730,7 +3725,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                 * are implied between each two transformations.
                 */
                for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
-                       k = xfrm_policy_ok(tpp[i], sp, k, family);
+                       k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
                        if (k < 0) {
                                if (k < -1)
                                        /* "-2 - errored_index" returned */
@@ -3745,9 +3740,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                        goto reject;
                }
 
-               if (if_id)
-                       secpath_reset(skb);
-
                xfrm_pols_put(pols, npols);
                return 1;
        }
index d720e16..c34a2a0 100644 (file)
@@ -1770,7 +1770,7 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
 }
 
 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
-                        struct netlink_ext_ack *extack)
+                        int dir, struct netlink_ext_ack *extack)
 {
        u16 prev_family;
        int i;
@@ -1796,6 +1796,10 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
                switch (ut[i].mode) {
                case XFRM_MODE_TUNNEL:
                case XFRM_MODE_BEET:
+                       if (ut[i].optional && dir == XFRM_POLICY_OUT) {
+                               NL_SET_ERR_MSG(extack, "Mode in optional template not allowed in outbound policy");
+                               return -EINVAL;
+                       }
                        break;
                default:
                        if (ut[i].family != prev_family) {
@@ -1833,7 +1837,7 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
 }
 
 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs,
-                              struct netlink_ext_ack *extack)
+                              int dir, struct netlink_ext_ack *extack)
 {
        struct nlattr *rt = attrs[XFRMA_TMPL];
 
@@ -1844,7 +1848,7 @@ static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs,
                int nr = nla_len(rt) / sizeof(*utmpl);
                int err;
 
-               err = validate_tmpl(nr, utmpl, pol->family, extack);
+               err = validate_tmpl(nr, utmpl, pol->family, dir, extack);
                if (err)
                        return err;
 
@@ -1921,7 +1925,7 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net,
        if (err)
                goto error;
 
-       if (!(err = copy_from_user_tmpl(xp, attrs, extack)))
+       if (!(err = copy_from_user_tmpl(xp, attrs, p->dir, extack)))
                err = copy_from_user_sec_ctx(xp, attrs);
        if (err)
                goto error;
@@ -1980,6 +1984,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        if (err) {
                xfrm_dev_policy_delete(xp);
+               xfrm_dev_policy_free(xp);
                security_xfrm_policy_free(xp->security);
                kfree(xp);
                return err;
@@ -3499,7 +3504,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
                return NULL;
 
        nr = ((len - sizeof(*p)) / sizeof(*ut));
-       if (validate_tmpl(nr, ut, p->sel.family, NULL))
+       if (validate_tmpl(nr, ut, p->sel.family, p->dir, NULL))
                return NULL;
 
        if (p->dir > XFRM_POLICY_OUT)
index 6448b78..bf66277 100644 (file)
@@ -498,7 +498,6 @@ int main(int argc, char **argv)
                                        "Option -%c requires an argument.\n\n",
                                        optopt);
                case 'h':
-                       __fallthrough;
                default:
                        Usage();
                        return 0;
index 46e273b..50a6b50 100644 (file)
@@ -141,6 +141,14 @@ int snd_pcm_area_copy(const struct snd_pcm_channel_area *src_channel,
 
 void *snd_pcm_plug_buf_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t size);
 void snd_pcm_plug_buf_unlock(struct snd_pcm_substream *plug, void *ptr);
+#else
+
+static inline snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t drv_size) { return drv_size; }
+static inline snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t clt_size) { return clt_size; }
+static inline int snd_pcm_plug_slave_format(int format, const struct snd_mask *format_mask) { return format; }
+
+#endif
+
 snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream,
                                     const char *ptr, snd_pcm_uframes_t size,
                                     int in_kernel);
@@ -151,14 +159,6 @@ snd_pcm_sframes_t snd_pcm_oss_writev3(struct snd_pcm_substream *substream,
 snd_pcm_sframes_t snd_pcm_oss_readv3(struct snd_pcm_substream *substream,
                                     void **bufs, snd_pcm_uframes_t frames);
 
-#else
-
-static inline snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t drv_size) { return drv_size; }
-static inline snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t clt_size) { return clt_size; }
-static inline int snd_pcm_plug_slave_format(int format, const struct snd_mask *format_mask) { return format; }
-
-#endif
-
 #ifdef PLUGIN_DEBUG
 #define pdprintf(fmt, args...) printk(KERN_DEBUG "plugin: " fmt, ##args)
 #else
index a15f55b..295163b 100644 (file)
@@ -259,8 +259,10 @@ int snd_dg00x_stream_init_duplex(struct snd_dg00x *dg00x)
                return err;
 
        err = init_stream(dg00x, &dg00x->tx_stream);
-       if (err < 0)
+       if (err < 0) {
                destroy_stream(dg00x, &dg00x->rx_stream);
+               return err;
+       }
 
        err = amdtp_domain_init(&dg00x->domain);
        if (err < 0) {
index accc9d2..6c043fb 100644 (file)
@@ -611,7 +611,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
 int snd_hdac_keep_power_up(struct hdac_device *codec)
 {
        if (!atomic_inc_not_zero(&codec->in_pm)) {
-               int ret = pm_runtime_get_if_in_use(&codec->dev);
+               int ret = pm_runtime_get_if_active(&codec->dev, true);
                if (!ret)
                        return -1;
                if (ret < 0)
index 62f4584..7d882b3 100644 (file)
@@ -531,7 +531,7 @@ static int load_firmware(struct snd_cs46xx *chip)
        return err;
 }
 
-int snd_cs46xx_download_image(struct snd_cs46xx *chip)
+static __maybe_unused int snd_cs46xx_download_image(struct snd_cs46xx *chip)
 {
        int idx, err;
        unsigned int offset = 0;
index fc114e5..dbf7aa8 100644 (file)
@@ -1155,8 +1155,8 @@ static bool path_has_mixer(struct hda_codec *codec, int path_idx, int ctl_type)
        return path && path->ctls[ctl_type];
 }
 
-static const char * const channel_name[4] = {
-       "Front", "Surround", "CLFE", "Side"
+static const char * const channel_name[] = {
+       "Front", "Surround", "CLFE", "Side", "Back",
 };
 
 /* give some appropriate ctl name prefix for the given line out channel */
@@ -1182,7 +1182,7 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
 
        /* multi-io channels */
        if (ch >= cfg->line_outs)
-               return channel_name[ch];
+               goto fixed_name;
 
        switch (cfg->line_out_type) {
        case AUTO_PIN_SPEAKER_OUT:
@@ -1234,6 +1234,7 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
        if (cfg->line_outs == 1 && !spec->multi_ios)
                return "Line Out";
 
+ fixed_name:
        if (ch >= ARRAY_SIZE(channel_name)) {
                snd_BUG();
                return "PCM";
index 099722e..748a3c4 100644 (file)
@@ -1306,6 +1306,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
        SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
        SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
        SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI),
+       SND_PCI_QUIRK(0x3842, 0x104b, "EVGA X299 Dark", QUIRK_R3DI),
        SND_PCI_QUIRK(0x3842, 0x1055, "EVGA Z390 DARK", QUIRK_R3DI),
        SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
        SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
index 64a9440..5c0b1a0 100644 (file)
@@ -4589,6 +4589,11 @@ HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP",    patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a3, "GPU a3 HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a4, "GPU a4 HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a5, "GPU a5 HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a6, "GPU a6 HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a7, "GPU a7 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",      patch_nvhdmi_2ch),
 HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI",   patch_nvhdmi_2ch),
 HDA_CODEC_ENTRY(0x67663d82, "Arise 82 HDMI/DP",        patch_gf_hdmi),
index 172ffc2..7b5f194 100644 (file)
@@ -7063,6 +7063,8 @@ enum {
        ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC295_FIXUP_DISABLE_DAC3,
        ALC285_FIXUP_SPEAKER2_TO_DAC1,
+       ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1,
+       ALC285_FIXUP_ASUS_HEADSET_MIC,
        ALC280_FIXUP_HP_HEADSET_MIC,
        ALC221_FIXUP_HP_FRONT_MIC,
        ALC292_FIXUP_TPT460,
@@ -8033,6 +8035,22 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_THINKPAD_ACPI
        },
+       [ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_speaker2_to_dac1,
+               .chained = true,
+               .chain_id = ALC245_FIXUP_CS35L41_SPI_2
+       },
+       [ALC285_FIXUP_ASUS_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x03a11050 },
+                       { 0x1b, 0x03a11c30 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1
+       },
        [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -9363,7 +9381,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
        SND_PCI_QUIRK(0x103c, 0x8158, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
-       SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC295_FIXUP_HP_X360),
        SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
        SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
        SND_PCI_QUIRK(0x103c, 0x827f, "HP x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
@@ -9458,7 +9476,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8aa8, "HP EliteBook 640 G9 (MB 8AA6)", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8aab, "HP EliteBook 650 G9 (MB 8AA9)", ALC236_FIXUP_HP_GPIO_LED),
-        SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
@@ -9469,8 +9487,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8b63, "HP Elite Dragonfly 13.5 inch G4", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b65, "HP ProBook 455 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8b70, "HP EliteBook 835 G10", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x103c, 0x8b72, "HP EliteBook 845 G10", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x103c, 0x8b74, "HP EliteBook 845W G10", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x103c, 0x8b77, "HP ElieBook 865 G10", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b87, "HP", ALC236_FIXUP_HP_GPIO_LED),
@@ -9480,7 +9503,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8c26, "HP HP EliteBook 800G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -9500,6 +9525,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+       SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
        SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
@@ -9522,6 +9549,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
        SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
@@ -9618,6 +9646,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x7717, "Clevo NS70PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x7718, "Clevo L140PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x7724, "Clevo L140AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -11663,6 +11692,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
        SND_PCI_QUIRK(0x103c, 0x870c, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
        SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+       SND_PCI_QUIRK(0x103c, 0x872b, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
        SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
        SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2),
        SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2),
@@ -11689,6 +11719,8 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x3321, "Lenovo ThinkCentre M70 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x331b, "Lenovo ThinkCentre M90 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
index 0bc6e40..4406a5d 100644 (file)
@@ -311,6 +311,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "8A22"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "System76"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "pang12"),
+               }
+       },
        {}
 };
 
index 8538e28..1e42052 100644 (file)
@@ -46,7 +46,7 @@ static const struct reg_default cs35l41_reg[] = {
        { CS35L41_DSP1_RX5_SRC,                 0x00000020 },
        { CS35L41_DSP1_RX6_SRC,                 0x00000021 },
        { CS35L41_DSP1_RX7_SRC,                 0x0000003A },
-       { CS35L41_DSP1_RX8_SRC,                 0x00000001 },
+       { CS35L41_DSP1_RX8_SRC,                 0x0000003B },
        { CS35L41_NGATE1_SRC,                   0x00000008 },
        { CS35L41_NGATE2_SRC,                   0x00000009 },
        { CS35L41_AMP_DIG_VOL_CTRL,             0x00008000 },
@@ -58,8 +58,8 @@ static const struct reg_default cs35l41_reg[] = {
        { CS35L41_IRQ1_MASK2,                   0xFFFFFFFF },
        { CS35L41_IRQ1_MASK3,                   0xFFFF87FF },
        { CS35L41_IRQ1_MASK4,                   0xFEFFFFFF },
-       { CS35L41_GPIO1_CTRL1,                  0xE1000001 },
-       { CS35L41_GPIO2_CTRL1,                  0xE1000001 },
+       { CS35L41_GPIO1_CTRL1,                  0x81000001 },
+       { CS35L41_GPIO2_CTRL1,                  0x81000001 },
        { CS35L41_MIXER_NGATE_CFG,              0x00000000 },
        { CS35L41_MIXER_NGATE_CH1_CFG,          0x00000303 },
        { CS35L41_MIXER_NGATE_CH2_CFG,          0x00000303 },
index 46762f7..d1677d7 100644 (file)
@@ -852,10 +852,11 @@ static void cs35l56_dsp_work(struct work_struct *work)
         */
        if (cs35l56->sdw_peripheral) {
                cs35l56->sdw_irq_no_unmask = true;
-               cancel_work_sync(&cs35l56->sdw_irq_work);
+               flush_work(&cs35l56->sdw_irq_work);
                sdw_write_no_pm(cs35l56->sdw_peripheral, CS35L56_SDW_GEN_INT_MASK_1, 0);
                sdw_read_no_pm(cs35l56->sdw_peripheral, CS35L56_SDW_GEN_INT_STAT_1);
                sdw_write_no_pm(cs35l56->sdw_peripheral, CS35L56_SDW_GEN_INT_STAT_1, 0xFF);
+               flush_work(&cs35l56->sdw_irq_work);
        }
 
        ret = cs35l56_mbox_send(cs35l56, CS35L56_MBOX_CMD_SHUTDOWN);
index da6fcf7..de978c3 100644 (file)
@@ -746,6 +746,8 @@ static int tx_macro_put_dec_enum(struct snd_kcontrol *kcontrol,
        struct tx_macro *tx = snd_soc_component_get_drvdata(component);
 
        val = ucontrol->value.enumerated.item[0];
+       if (val >= e->items)
+               return -EINVAL;
 
        switch (e->reg) {
        case CDC_TX_INP_MUX_ADC_MUX0_CFG0:
@@ -772,6 +774,9 @@ static int tx_macro_put_dec_enum(struct snd_kcontrol *kcontrol,
        case CDC_TX_INP_MUX_ADC_MUX7_CFG0:
                mic_sel_reg = CDC_TX7_TX_PATH_CFG0;
                break;
+       default:
+               dev_err(component->dev, "Error in configuration!!\n");
+               return -EINVAL;
        }
 
        if (val != 0) {
index 2935c1b..5bc46b0 100644 (file)
@@ -267,7 +267,9 @@ static int rt5682_i2c_probe(struct i2c_client *i2c)
                ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
                        rt5682_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
                        | IRQF_ONESHOT, "rt5682", rt5682);
-               if (ret)
+               if (!ret)
+                       rt5682->irq = i2c->irq;
+               else
                        dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
        }
 
index f6c798b..5d99254 100644 (file)
@@ -2959,6 +2959,9 @@ static int rt5682_suspend(struct snd_soc_component *component)
        if (rt5682->is_sdw)
                return 0;
 
+       if (rt5682->irq)
+               disable_irq(rt5682->irq);
+
        cancel_delayed_work_sync(&rt5682->jack_detect_work);
        cancel_delayed_work_sync(&rt5682->jd_check_work);
        if (rt5682->hs_jack && (rt5682->jack_type & SND_JACK_HEADSET) == SND_JACK_HEADSET) {
@@ -3027,6 +3030,9 @@ static int rt5682_resume(struct snd_soc_component *component)
        mod_delayed_work(system_power_efficient_wq,
                &rt5682->jack_detect_work, msecs_to_jiffies(0));
 
+       if (rt5682->irq)
+               enable_irq(rt5682->irq);
+
        return 0;
 }
 #else
index d568c69..e8efd8a 100644 (file)
@@ -1462,6 +1462,7 @@ struct rt5682_priv {
        int pll_out[RT5682_PLLS];
 
        int jack_type;
+       int irq;
        int irq_work_delay_time;
 };
 
index 00b6036..c293244 100644 (file)
@@ -53,6 +53,18 @@ static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
        { .reg = 0x09, .def = 0x0000 }
 };
 
+/*
+ * ssm2602 register patch
+ * Workaround for playback distortions after power up: activates digital
+ * core, and then powers on output, DAC, and whole chip at the same time
+ */
+
+static const struct reg_sequence ssm2602_patch[] = {
+       { SSM2602_ACTIVE, 0x01 },
+       { SSM2602_PWR,    0x07 },
+       { SSM2602_RESET,  0x00 },
+};
+
 
 /*Appending several "None"s just for OSS mixer use*/
 static const char *ssm2602_input_select[] = {
@@ -598,6 +610,9 @@ static int ssm260x_component_probe(struct snd_soc_component *component)
                return ret;
        }
 
+       regmap_register_patch(ssm2602->regmap, ssm2602_patch,
+                             ARRAY_SIZE(ssm2602_patch));
+
        /* set the update bits */
        regmap_update_bits(ssm2602->regmap, SSM2602_LINVOL,
                            LINVOL_LRIN_BOTH, LINVOL_LRIN_BOTH);
index acdf98b..399a489 100644 (file)
@@ -132,13 +132,13 @@ static irqreturn_t i2s_irq_handler(int irq, void *dev_id)
 
                /* Error Handling: TX */
                if (isr[i] & ISR_TXFO) {
-                       dev_err(dev->dev, "TX overrun (ch_id=%d)\n", i);
+                       dev_err_ratelimited(dev->dev, "TX overrun (ch_id=%d)\n", i);
                        irq_valid = true;
                }
 
                /* Error Handling: TX */
                if (isr[i] & ISR_RXFO) {
-                       dev_err(dev->dev, "RX overrun (ch_id=%d)\n", i);
+                       dev_err_ratelimited(dev->dev, "RX overrun (ch_id=%d)\n", i);
                        irq_valid = true;
                }
        }
@@ -183,30 +183,6 @@ static void i2s_stop(struct dw_i2s_dev *dev,
        }
 }
 
-static int dw_i2s_startup(struct snd_pcm_substream *substream,
-               struct snd_soc_dai *cpu_dai)
-{
-       struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
-       union dw_i2s_snd_dma_data *dma_data = NULL;
-
-       if (!(dev->capability & DWC_I2S_RECORD) &&
-                       (substream->stream == SNDRV_PCM_STREAM_CAPTURE))
-               return -EINVAL;
-
-       if (!(dev->capability & DWC_I2S_PLAY) &&
-                       (substream->stream == SNDRV_PCM_STREAM_PLAYBACK))
-               return -EINVAL;
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               dma_data = &dev->play_dma_data;
-       else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-               dma_data = &dev->capture_dma_data;
-
-       snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)dma_data);
-
-       return 0;
-}
-
 static void dw_i2s_config(struct dw_i2s_dev *dev, int stream)
 {
        u32 ch_reg;
@@ -305,12 +281,6 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
        return 0;
 }
 
-static void dw_i2s_shutdown(struct snd_pcm_substream *substream,
-               struct snd_soc_dai *dai)
-{
-       snd_soc_dai_set_dma_data(dai, substream, NULL);
-}
-
 static int dw_i2s_prepare(struct snd_pcm_substream *substream,
                          struct snd_soc_dai *dai)
 {
@@ -382,8 +352,6 @@ static int dw_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
 }
 
 static const struct snd_soc_dai_ops dw_i2s_dai_ops = {
-       .startup        = dw_i2s_startup,
-       .shutdown       = dw_i2s_shutdown,
        .hw_params      = dw_i2s_hw_params,
        .prepare        = dw_i2s_prepare,
        .trigger        = dw_i2s_trigger,
@@ -625,6 +593,14 @@ static int dw_configure_dai_by_dt(struct dw_i2s_dev *dev,
 
 }
 
+static int dw_i2s_dai_probe(struct snd_soc_dai *dai)
+{
+       struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+       snd_soc_dai_init_dma_data(dai, &dev->play_dma_data, &dev->capture_dma_data);
+       return 0;
+}
+
 static int dw_i2s_probe(struct platform_device *pdev)
 {
        const struct i2s_platform_data *pdata = pdev->dev.platform_data;
@@ -643,6 +619,7 @@ static int dw_i2s_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        dw_i2s_dai->ops = &dw_i2s_dai_ops;
+       dw_i2s_dai->probe = dw_i2s_dai_probe;
 
        dev->i2s_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
        if (IS_ERR(dev->i2s_base))
index 94341e4..3f08082 100644 (file)
@@ -1159,7 +1159,7 @@ static int fsl_micfil_probe(struct platform_device *pdev)
        ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
        if (ret) {
                dev_err(&pdev->dev, "failed to pcm register\n");
-               return ret;
+               goto err_pm_disable;
        }
 
        fsl_micfil_dai.capture.formats = micfil->soc->formats;
@@ -1169,9 +1169,20 @@ static int fsl_micfil_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev, "failed to register component %s\n",
                        fsl_micfil_component.name);
+               goto err_pm_disable;
        }
 
        return ret;
+
+err_pm_disable:
+       pm_runtime_disable(&pdev->dev);
+
+       return ret;
+}
+
+static void fsl_micfil_remove(struct platform_device *pdev)
+{
+       pm_runtime_disable(&pdev->dev);
 }
 
 static int __maybe_unused fsl_micfil_runtime_suspend(struct device *dev)
@@ -1232,6 +1243,7 @@ static const struct dev_pm_ops fsl_micfil_pm_ops = {
 
 static struct platform_driver fsl_micfil_driver = {
        .probe = fsl_micfil_probe,
+       .remove_new = fsl_micfil_remove,
        .driver = {
                .name = "fsl-micfil-dai",
                .pm = &fsl_micfil_pm_ops,
index 02683dc..1860099 100644 (file)
@@ -169,6 +169,7 @@ static bool apl_lp_streaming(struct avs_dev *adev)
 {
        struct avs_path *path;
 
+       spin_lock(&adev->path_list_lock);
        /* Any gateway without buffer allocated in LP area disqualifies D0IX. */
        list_for_each_entry(path, &adev->path_list, node) {
                struct avs_path_pipeline *ppl;
@@ -188,11 +189,14 @@ static bool apl_lp_streaming(struct avs_dev *adev)
                                if (cfg->copier.dma_type == INVALID_OBJECT_ID)
                                        continue;
 
-                               if (!mod->gtw_attrs.lp_buffer_alloc)
+                               if (!mod->gtw_attrs.lp_buffer_alloc) {
+                                       spin_unlock(&adev->path_list_lock);
                                        return false;
+                               }
                        }
                }
        }
+       spin_unlock(&adev->path_list_lock);
 
        return true;
 }
index d7fccdc..0cf38c9 100644 (file)
@@ -283,8 +283,8 @@ void avs_release_firmwares(struct avs_dev *adev);
 
 int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id,
                        u8 core_id, u8 domain, void *param, u32 param_size,
-                       u16 *instance_id);
-void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u16 instance_id,
+                       u8 *instance_id);
+void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u8 instance_id,
                           u8 ppl_instance_id, u8 core_id);
 int avs_dsp_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority,
                            bool lp, u16 attributes, u8 *instance_id);
index b2823c2..60f8fb0 100644 (file)
@@ -443,7 +443,7 @@ static int avs_register_i2s_boards(struct avs_dev *adev)
        }
 
        for (mach = boards->machs; mach->id[0]; mach++) {
-               if (!acpi_dev_present(mach->id, NULL, -1))
+               if (!acpi_dev_present(mach->id, mach->uid, -1))
                        continue;
 
                if (mach->machine_quirk)
index a8b14b7..3dfa2e9 100644 (file)
@@ -21,17 +21,25 @@ static struct avs_dev *avs_get_kcontrol_adev(struct snd_kcontrol *kcontrol)
        return to_avs_dev(w->dapm->component->dev);
 }
 
-static struct avs_path_module *avs_get_kcontrol_module(struct avs_dev *adev, u32 id)
+static struct avs_path_module *avs_get_volume_module(struct avs_dev *adev, u32 id)
 {
        struct avs_path *path;
        struct avs_path_pipeline *ppl;
        struct avs_path_module *mod;
 
-       list_for_each_entry(path, &adev->path_list, node)
-               list_for_each_entry(ppl, &path->ppl_list, node)
-                       list_for_each_entry(mod, &ppl->mod_list, node)
-                               if (mod->template->ctl_id && mod->template->ctl_id == id)
+       spin_lock(&adev->path_list_lock);
+       list_for_each_entry(path, &adev->path_list, node) {
+               list_for_each_entry(ppl, &path->ppl_list, node) {
+                       list_for_each_entry(mod, &ppl->mod_list, node) {
+                               if (guid_equal(&mod->template->cfg_ext->type, &AVS_PEAKVOL_MOD_UUID)
+                                   && mod->template->ctl_id == id) {
+                                       spin_unlock(&adev->path_list_lock);
                                        return mod;
+                               }
+                       }
+               }
+       }
+       spin_unlock(&adev->path_list_lock);
 
        return NULL;
 }
@@ -49,7 +57,7 @@ int avs_control_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_va
        /* prevent access to modules while path is being constructed */
        mutex_lock(&adev->path_mutex);
 
-       active_module = avs_get_kcontrol_module(adev, ctl_data->id);
+       active_module = avs_get_volume_module(adev, ctl_data->id);
        if (active_module) {
                ret = avs_ipc_peakvol_get_volume(adev, active_module->module_id,
                                                 active_module->instance_id, &dspvols,
@@ -89,7 +97,7 @@ int avs_control_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_va
                changed = 1;
        }
 
-       active_module = avs_get_kcontrol_module(adev, ctl_data->id);
+       active_module = avs_get_volume_module(adev, ctl_data->id);
        if (active_module) {
                dspvol.channel_id = AVS_ALL_CHANNELS_MASK;
                dspvol.target_volume = *volume;
index b881100..aa03af4 100644 (file)
@@ -225,7 +225,7 @@ err:
 
 int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id,
                        u8 core_id, u8 domain, void *param, u32 param_size,
-                       u16 *instance_id)
+                       u8 *instance_id)
 {
        struct avs_module_entry mentry;
        bool was_loaded = false;
@@ -272,7 +272,7 @@ err_mod_entry:
        return ret;
 }
 
-void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u16 instance_id,
+void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u8 instance_id,
                           u8 ppl_instance_id, u8 core_id)
 {
        struct avs_module_entry mentry;
index d3b60ae..7f23a30 100644 (file)
@@ -619,7 +619,7 @@ enum avs_channel_config {
        AVS_CHANNEL_CONFIG_DUAL_MONO = 9,
        AVS_CHANNEL_CONFIG_I2S_DUAL_STEREO_0 = 10,
        AVS_CHANNEL_CONFIG_I2S_DUAL_STEREO_1 = 11,
-       AVS_CHANNEL_CONFIG_4_CHANNEL = 12,
+       AVS_CHANNEL_CONFIG_7_1 = 12,
        AVS_CHANNEL_CONFIG_INVALID
 };
 
index 197222c..657f7b0 100644 (file)
@@ -37,7 +37,7 @@ struct avs_path_pipeline {
 
 struct avs_path_module {
        u16 module_id;
-       u16 instance_id;
+       u8 instance_id;
        union avs_gtw_attributes gtw_attrs;
 
        struct avs_tplg_module *template;
index 31c032a..1fbb2c2 100644 (file)
@@ -468,21 +468,34 @@ static int avs_dai_fe_startup(struct snd_pcm_substream *substream, struct snd_so
 
        host_stream = snd_hdac_ext_stream_assign(bus, substream, HDAC_EXT_STREAM_TYPE_HOST);
        if (!host_stream) {
-               kfree(data);
-               return -EBUSY;
+               ret = -EBUSY;
+               goto err;
        }
 
        data->host_stream = host_stream;
-       snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+       ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+       if (ret < 0)
+               goto err;
+
        /* avoid wrap-around with wall-clock */
-       snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME, 20, 178000000);
-       snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_rates);
+       ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME, 20, 178000000);
+       if (ret < 0)
+               goto err;
+
+       ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_rates);
+       if (ret < 0)
+               goto err;
+
        snd_pcm_set_sync(substream);
 
        dev_dbg(dai->dev, "%s fe STARTUP tag %d str %p",
                __func__, hdac_stream(host_stream)->stream_tag, substream);
 
        return 0;
+
+err:
+       kfree(data);
+       return ret;
 }
 
 static void avs_dai_fe_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
index 70a9420..2759282 100644 (file)
@@ -18,7 +18,7 @@ static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id
 {
        struct avs_probe_cfg cfg = {{0}};
        struct avs_module_entry mentry;
-       u16 dummy;
+       u8 dummy;
 
        avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
 
index 6d9cfe0..d0f6c94 100644 (file)
@@ -218,18 +218,48 @@ static int jz4740_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        return 0;
 }
 
+static int jz4740_i2s_get_i2sdiv(unsigned long mclk, unsigned long rate,
+                                unsigned long i2sdiv_max)
+{
+       unsigned long div, rate1, rate2, err1, err2;
+
+       div = mclk / (64 * rate);
+       if (div == 0)
+               div = 1;
+
+       rate1 = mclk / (64 * div);
+       rate2 = mclk / (64 * (div + 1));
+
+       err1 = abs(rate1 - rate);
+       err2 = abs(rate2 - rate);
+
+       /*
+        * Choose the divider that produces the smallest error in the
+        * output rate and reject dividers with a 5% or higher error.
+        * In the event that both dividers are outside the acceptable
+        * error margin, reject the rate to prevent distorted audio.
+        * (The number 5% is arbitrary.)
+        */
+       if (div <= i2sdiv_max && err1 <= err2 && err1 < rate/20)
+               return div;
+       if (div < i2sdiv_max && err2 < rate/20)
+               return div + 1;
+
+       return -EINVAL;
+}
+
 static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream,
        struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
 {
        struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
        struct regmap_field *div_field;
+       unsigned long i2sdiv_max;
        unsigned int sample_size;
-       uint32_t ctrl;
-       int div;
+       uint32_t ctrl, conf;
+       int div = 1;
 
        regmap_read(i2s->regmap, JZ_REG_AIC_CTRL, &ctrl);
-
-       div = clk_get_rate(i2s->clk_i2s) / (64 * params_rate(params));
+       regmap_read(i2s->regmap, JZ_REG_AIC_CONF, &conf);
 
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S8:
@@ -258,11 +288,27 @@ static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream,
                        ctrl &= ~JZ_AIC_CTRL_MONO_TO_STEREO;
 
                div_field = i2s->field_i2sdiv_playback;
+               i2sdiv_max = GENMASK(i2s->soc_info->field_i2sdiv_playback.msb,
+                                    i2s->soc_info->field_i2sdiv_playback.lsb);
        } else {
                ctrl &= ~JZ_AIC_CTRL_INPUT_SAMPLE_SIZE;
                ctrl |= FIELD_PREP(JZ_AIC_CTRL_INPUT_SAMPLE_SIZE, sample_size);
 
                div_field = i2s->field_i2sdiv_capture;
+               i2sdiv_max = GENMASK(i2s->soc_info->field_i2sdiv_capture.msb,
+                                    i2s->soc_info->field_i2sdiv_capture.lsb);
+       }
+
+       /*
+        * Only calculate I2SDIV if we're supplying the bit or frame clock.
+        * If the codec is supplying both clocks then the divider output is
+        * unused, and we don't want it to limit the allowed sample rates.
+        */
+       if (conf & (JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER)) {
+               div = jz4740_i2s_get_i2sdiv(clk_get_rate(i2s->clk_i2s),
+                                           params_rate(params), i2sdiv_max);
+               if (div < 0)
+                       return div;
        }
 
        regmap_write(i2s->regmap, JZ_REG_AIC_CTRL, ctrl);
index a6b4f29..539e3a0 100644 (file)
@@ -644,9 +644,3 @@ int mt8186_init_clock(struct mtk_base_afe *afe)
 
        return 0;
 }
-
-void mt8186_deinit_clock(void *priv)
-{
-       struct mtk_base_afe *afe = priv;
-       mt8186_audsys_clk_unregister(afe);
-}
index d598871..a9d59e5 100644 (file)
@@ -81,7 +81,6 @@ enum {
 struct mtk_base_afe;
 int mt8186_set_audio_int_bus_parent(struct mtk_base_afe *afe, int clk_id);
 int mt8186_init_clock(struct mtk_base_afe *afe);
-void mt8186_deinit_clock(void *priv);
 int mt8186_afe_enable_cgs(struct mtk_base_afe *afe);
 void mt8186_afe_disable_cgs(struct mtk_base_afe *afe);
 int mt8186_afe_enable_clock(struct mtk_base_afe *afe);
index 41172a8..a868a04 100644 (file)
@@ -2848,10 +2848,6 @@ static int mt8186_afe_pcm_dev_probe(struct platform_device *pdev)
                return ret;
        }
 
-       ret = devm_add_action_or_reset(dev, mt8186_deinit_clock, (void *)afe);
-       if (ret)
-               return ret;
-
        /* init memif */
        afe->memif_32bit_supported = 0;
        afe->memif_size = MT8186_MEMIF_NUM;
index 578969c..5666be6 100644 (file)
@@ -84,6 +84,29 @@ static const struct afe_gate aud_clks[CLK_AUD_NR_CLK] = {
        GATE_AUD2(CLK_AUD_ETDM_OUT1_BCLK, "aud_etdm_out1_bclk", "top_audio", 24),
 };
 
+static void mt8186_audsys_clk_unregister(void *data)
+{
+       struct mtk_base_afe *afe = data;
+       struct mt8186_afe_private *afe_priv = afe->platform_priv;
+       struct clk *clk;
+       struct clk_lookup *cl;
+       int i;
+
+       if (!afe_priv)
+               return;
+
+       for (i = 0; i < CLK_AUD_NR_CLK; i++) {
+               cl = afe_priv->lookup[i];
+               if (!cl)
+                       continue;
+
+               clk = cl->clk;
+               clk_unregister_gate(clk);
+
+               clkdev_drop(cl);
+       }
+}
+
 int mt8186_audsys_clk_register(struct mtk_base_afe *afe)
 {
        struct mt8186_afe_private *afe_priv = afe->platform_priv;
@@ -124,27 +147,6 @@ int mt8186_audsys_clk_register(struct mtk_base_afe *afe)
                afe_priv->lookup[i] = cl;
        }
 
-       return 0;
+       return devm_add_action_or_reset(afe->dev, mt8186_audsys_clk_unregister, afe);
 }
 
-void mt8186_audsys_clk_unregister(struct mtk_base_afe *afe)
-{
-       struct mt8186_afe_private *afe_priv = afe->platform_priv;
-       struct clk *clk;
-       struct clk_lookup *cl;
-       int i;
-
-       if (!afe_priv)
-               return;
-
-       for (i = 0; i < CLK_AUD_NR_CLK; i++) {
-               cl = afe_priv->lookup[i];
-               if (!cl)
-                       continue;
-
-               clk = cl->clk;
-               clk_unregister_gate(clk);
-
-               clkdev_drop(cl);
-       }
-}
index b8d6a06..897a291 100644 (file)
@@ -10,6 +10,5 @@
 #define _MT8186_AUDSYS_CLK_H_
 
 int mt8186_audsys_clk_register(struct mtk_base_afe *afe);
-void mt8186_audsys_clk_unregister(struct mtk_base_afe *afe);
 
 #endif
index adb69d7..4fb1ac8 100644 (file)
@@ -2405,6 +2405,9 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
                if (!snd_soc_dpcm_be_can_update(fe, be, stream))
                        continue;
 
+               if (!snd_soc_dpcm_can_be_prepared(fe, be, stream))
+                       continue;
+
                if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) &&
@@ -3042,3 +3045,20 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
        return snd_soc_dpcm_check_state(fe, be, stream, state, ARRAY_SIZE(state));
 }
 EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params);
+
+/*
+ * We can only prepare a BE DAI if any of it's FE are not prepared,
+ * running or paused for the specified stream direction.
+ */
+int snd_soc_dpcm_can_be_prepared(struct snd_soc_pcm_runtime *fe,
+                                struct snd_soc_pcm_runtime *be, int stream)
+{
+       const enum snd_soc_dpcm_state state[] = {
+               SND_SOC_DPCM_STATE_START,
+               SND_SOC_DPCM_STATE_PAUSED,
+               SND_SOC_DPCM_STATE_PREPARE,
+       };
+
+       return snd_soc_dpcm_check_state(fe, be, stream, state, ARRAY_SIZE(state));
+}
+EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_prepared);
index 4e0c48a..749e856 100644 (file)
@@ -209,7 +209,12 @@ int acp_sof_ipc_msg_data(struct snd_sof_dev *sdev, struct snd_sof_pcm_stream *sp
                acp_mailbox_read(sdev, offset, p, sz);
        } else {
                struct snd_pcm_substream *substream = sps->substream;
-               struct acp_dsp_stream *stream = substream->runtime->private_data;
+               struct acp_dsp_stream *stream;
+
+               if (!substream || !substream->runtime)
+                       return -ESTRPIPE;
+
+               stream = substream->runtime->private_data;
 
                if (!stream)
                        return -ESTRPIPE;
index b42b598..d547318 100644 (file)
@@ -438,8 +438,8 @@ void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev, const char *msg)
                /* should we prevent DSP entering D3 ? */
                if (!sdev->ipc_dump_printed)
                        dev_info(sdev->dev,
-                                "preventing DSP entering D3 state to preserve context\n");
-               pm_runtime_get_noresume(sdev->dev);
+                                "Attempting to prevent DSP from entering D3 state to preserve context\n");
+               pm_runtime_get_if_in_use(sdev->dev);
        }
 
        /* dump vital information to the logs */
index 775582a..b7cbf66 100644 (file)
@@ -19,6 +19,9 @@
 
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_MLINK)
 
+/* worst-case number of sublinks is used for sublink refcount array allocation only */
+#define HDAML_MAX_SUBLINKS (AZX_ML_LCTL_CPA_SHIFT - AZX_ML_LCTL_SPA_SHIFT)
+
 /**
  * struct hdac_ext2_link - HDAudio extended+alternate link
  *
@@ -33,6 +36,7 @@
  * @leptr:             extended link pointer
  * @eml_lock:          mutual exclusion to access shared registers e.g. CPA/SPA bits
  * in LCTL register
+ * @sublink_ref_count: array of refcounts, required to power-manage sublinks independently
  * @base_ptr:          pointer to shim/ip/shim_vs space
  * @instance_offset:   offset between each of @slcount instances managed by link
  * @shim_offset:       offset to SHIM register base
@@ -53,6 +57,7 @@ struct hdac_ext2_link {
        u32 leptr;
 
        struct mutex eml_lock; /* prevent concurrent access to e.g. CPA/SPA */
+       int sublink_ref_count[HDAML_MAX_SUBLINKS];
 
        /* internal values computed from LCAP contents */
        void __iomem *base_ptr;
@@ -68,6 +73,7 @@ struct hdac_ext2_link {
 #define AZX_REG_SDW_SHIM_OFFSET                                0x0
 #define AZX_REG_SDW_IP_OFFSET                          0x100
 #define AZX_REG_SDW_VS_SHIM_OFFSET                     0x6000
+#define AZX_REG_SDW_SHIM_PCMSyCM(y)                    (0x16 + 0x4 * (y))
 
 /* only one instance supported */
 #define AZX_REG_INTEL_DMIC_SHIM_OFFSET                 0x0
@@ -91,7 +97,7 @@ struct hdac_ext2_link {
  */
 
 static int hdaml_lnk_enum(struct device *dev, struct hdac_ext2_link *h2link,
-                         void __iomem *ml_addr, int link_idx)
+                         void __iomem *remap_addr, void __iomem *ml_addr, int link_idx)
 {
        struct hdac_ext_link *hlink = &h2link->hext_link;
        u32 base_offset;
@@ -126,15 +132,16 @@ static int hdaml_lnk_enum(struct device *dev, struct hdac_ext2_link *h2link,
                link_idx, h2link->slcount);
 
        /* find IP ID and offsets */
-       h2link->leptr = readl(hlink->ml_addr + AZX_REG_ML_LEPTR);
+       h2link->leptr = readl(ml_addr + AZX_REG_ML_LEPTR);
 
        h2link->elid = FIELD_GET(AZX_REG_ML_LEPTR_ID, h2link->leptr);
 
        base_offset = FIELD_GET(AZX_REG_ML_LEPTR_PTR, h2link->leptr);
-       h2link->base_ptr = hlink->ml_addr + base_offset;
+       h2link->base_ptr = remap_addr + base_offset;
 
        switch (h2link->elid) {
        case AZX_REG_ML_LEPTR_ID_SDW:
+               h2link->instance_offset = AZX_REG_SDW_INSTANCE_OFFSET;
                h2link->shim_offset = AZX_REG_SDW_SHIM_OFFSET;
                h2link->ip_offset = AZX_REG_SDW_IP_OFFSET;
                h2link->shim_vs_offset = AZX_REG_SDW_VS_SHIM_OFFSET;
@@ -149,6 +156,7 @@ static int hdaml_lnk_enum(struct device *dev, struct hdac_ext2_link *h2link,
                        link_idx, base_offset);
                break;
        case AZX_REG_ML_LEPTR_ID_INTEL_SSP:
+               h2link->instance_offset = AZX_REG_INTEL_SSP_INSTANCE_OFFSET;
                h2link->shim_offset = AZX_REG_INTEL_SSP_SHIM_OFFSET;
                h2link->ip_offset = AZX_REG_INTEL_SSP_IP_OFFSET;
                h2link->shim_vs_offset = AZX_REG_INTEL_SSP_VS_SHIM_OFFSET;
@@ -333,6 +341,21 @@ static void hdaml_link_set_lsdiid(u32 __iomem *lsdiid, int dev_num)
        writel(val, lsdiid);
 }
 
+static void hdaml_shim_map_stream_ch(u16 __iomem *pcmsycm, int lchan, int hchan,
+                                    int stream_id, int dir)
+{
+       u16 val;
+
+       val = readw(pcmsycm);
+
+       u16p_replace_bits(&val, lchan, GENMASK(3, 0));
+       u16p_replace_bits(&val, hchan, GENMASK(7, 4));
+       u16p_replace_bits(&val, stream_id, GENMASK(13, 8));
+       u16p_replace_bits(&val, dir, BIT(15));
+
+       writew(val, pcmsycm);
+}
+
 static void hdaml_lctl_offload_enable(u32 __iomem *lctl, bool enable)
 {
        u32 val = readl(lctl);
@@ -364,7 +387,7 @@ static int hda_ml_alloc_h2link(struct hdac_bus *bus, int index)
        hlink->bus = bus;
        hlink->ml_addr = bus->mlcap + AZX_ML_BASE + (AZX_ML_INTERVAL * index);
 
-       ret = hdaml_lnk_enum(bus->dev, h2link, hlink->ml_addr, index);
+       ret = hdaml_lnk_enum(bus->dev, h2link, bus->remap_addr, hlink->ml_addr, index);
        if (ret < 0) {
                kfree(h2link);
                return ret;
@@ -641,8 +664,13 @@ static int hdac_bus_eml_power_up_base(struct hdac_bus *bus, bool alt, int elid,
        if (eml_lock)
                mutex_lock(&h2link->eml_lock);
 
-       if (++hlink->ref_count > 1)
-               goto skip_init;
+       if (!alt) {
+               if (++hlink->ref_count > 1)
+                       goto skip_init;
+       } else {
+               if (++h2link->sublink_ref_count[sublink] > 1)
+                       goto skip_init;
+       }
 
        ret = hdaml_link_init(hlink->ml_addr + AZX_REG_ML_LCTL, sublink);
 
@@ -684,9 +712,13 @@ static int hdac_bus_eml_power_down_base(struct hdac_bus *bus, bool alt, int elid
        if (eml_lock)
                mutex_lock(&h2link->eml_lock);
 
-       if (--hlink->ref_count > 0)
-               goto skip_shutdown;
-
+       if (!alt) {
+               if (--hlink->ref_count > 0)
+                       goto skip_shutdown;
+       } else {
+               if (--h2link->sublink_ref_count[sublink] > 0)
+                       goto skip_shutdown;
+       }
        ret = hdaml_link_shutdown(hlink->ml_addr + AZX_REG_ML_LCTL, sublink);
 
 skip_shutdown:
@@ -740,6 +772,40 @@ int hdac_bus_eml_sdw_set_lsdiid(struct hdac_bus *bus, int sublink, int dev_num)
        return 0;
 } EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_set_lsdiid, SND_SOC_SOF_HDA_MLINK);
 
+/*
+ * the 'y' parameter comes from the PCMSyCM hardware register naming. 'y' refers to the
+ * PDI index, i.e. the FIFO used for RX or TX
+ */
+int hdac_bus_eml_sdw_map_stream_ch(struct hdac_bus *bus, int sublink, int y,
+                                  int channel_mask, int stream_id, int dir)
+{
+       struct hdac_ext2_link *h2link;
+       u16 __iomem *pcmsycm;
+       u16 val;
+
+       h2link = find_ext2_link(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+       if (!h2link)
+               return -ENODEV;
+
+       pcmsycm = h2link->base_ptr + h2link->shim_offset +
+               h2link->instance_offset * sublink +
+               AZX_REG_SDW_SHIM_PCMSyCM(y);
+
+       mutex_lock(&h2link->eml_lock);
+
+       hdaml_shim_map_stream_ch(pcmsycm, 0, hweight32(channel_mask),
+                                stream_id, dir);
+
+       mutex_unlock(&h2link->eml_lock);
+
+       val = readw(pcmsycm);
+
+       dev_dbg(bus->dev, "channel_mask %#x stream_id %d dir %d pcmscm %#x\n",
+               channel_mask, stream_id, dir, val);
+
+       return 0;
+} EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_map_stream_ch, SND_SOC_SOF_HDA_MLINK);
+
 void hda_bus_ml_put_all(struct hdac_bus *bus)
 {
        struct hdac_ext_link *hlink;
@@ -836,6 +902,18 @@ struct hdac_ext_link *hdac_bus_eml_dmic_get_hlink(struct hdac_bus *bus)
 }
 EXPORT_SYMBOL_NS(hdac_bus_eml_dmic_get_hlink, SND_SOC_SOF_HDA_MLINK);
 
+struct hdac_ext_link *hdac_bus_eml_sdw_get_hlink(struct hdac_bus *bus)
+{
+       struct hdac_ext2_link *h2link;
+
+       h2link = find_ext2_link(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+       if (!h2link)
+               return NULL;
+
+       return &h2link->hext_link;
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_get_hlink, SND_SOC_SOF_HDA_MLINK);
+
 int hdac_bus_eml_enable_offload(struct hdac_bus *bus, bool alt, int elid, bool enable)
 {
        struct hdac_ext2_link *h2link;
index fc1eb8e..ba4ef29 100644 (file)
@@ -2103,10 +2103,13 @@ static int sof_ipc3_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget *
         * For the case of PAUSE/HW_FREE, since there are no quirks, flags can be used as is.
         */
 
-       if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS)
+       if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) {
+               /* Clear stale command */
+               config->flags &= ~SOF_DAI_CONFIG_FLAGS_CMD_MASK;
                config->flags |= flags;
-       else
+       } else {
                config->flags = flags;
+       }
 
        /* only send the IPC if the widget is set up in the DSP */
        if (swidget->use_count > 0) {
index 059eebf..5abe616 100644 (file)
@@ -59,7 +59,7 @@ static const struct sof_topology_token ipc4_in_audio_format_tokens[] = {
                audio_fmt.interleaving_style)},
        {SOF_TKN_CAVS_AUDIO_FORMAT_IN_FMT_CFG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
                offsetof(struct sof_ipc4_pin_format, audio_fmt.fmt_cfg)},
-       {SOF_TKN_CAVS_AUDIO_FORMAT_PIN_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+       {SOF_TKN_CAVS_AUDIO_FORMAT_INPUT_PIN_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
                offsetof(struct sof_ipc4_pin_format, pin_index)},
        {SOF_TKN_CAVS_AUDIO_FORMAT_IBS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
                offsetof(struct sof_ipc4_pin_format, buffer_size)},
@@ -79,7 +79,7 @@ static const struct sof_topology_token ipc4_out_audio_format_tokens[] = {
                audio_fmt.interleaving_style)},
        {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_FMT_CFG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
                offsetof(struct sof_ipc4_pin_format, audio_fmt.fmt_cfg)},
-       {SOF_TKN_CAVS_AUDIO_FORMAT_PIN_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+       {SOF_TKN_CAVS_AUDIO_FORMAT_OUTPUT_PIN_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
                offsetof(struct sof_ipc4_pin_format, pin_index)},
        {SOF_TKN_CAVS_AUDIO_FORMAT_OBS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
                offsetof(struct sof_ipc4_pin_format, buffer_size)},
index 567db32..d0ab6f3 100644 (file)
@@ -643,16 +643,17 @@ static int sof_pcm_probe(struct snd_soc_component *component)
                                       "%s/%s",
                                       plat_data->tplg_filename_prefix,
                                       plat_data->tplg_filename);
-       if (!tplg_filename)
-               return -ENOMEM;
+       if (!tplg_filename) {
+               ret = -ENOMEM;
+               goto pm_error;
+       }
 
        ret = snd_sof_load_topology(component, tplg_filename);
-       if (ret < 0) {
+       if (ret < 0)
                dev_err(component->dev, "error: failed to load DSP topology %d\n",
                        ret);
-               return ret;
-       }
 
+pm_error:
        pm_runtime_mark_last_busy(component->dev);
        pm_runtime_put_autosuspend(component->dev);
 
index 2fdbc53..2b23244 100644 (file)
@@ -164,7 +164,7 @@ static int sof_resume(struct device *dev, bool runtime_resume)
                ret = tplg_ops->set_up_all_pipelines(sdev, false);
                if (ret < 0) {
                        dev_err(sdev->dev, "Failed to restore pipeline after resume %d\n", ret);
-                       return ret;
+                       goto setup_fail;
                }
        }
 
@@ -178,6 +178,18 @@ static int sof_resume(struct device *dev, bool runtime_resume)
                        dev_err(sdev->dev, "ctx_restore IPC error during resume: %d\n", ret);
        }
 
+setup_fail:
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+       if (ret < 0) {
+               /*
+                * Debugfs cannot be read in runtime suspend, so cache
+                * the contents upon failure. This allows to capture
+                * possible DSP coredump information.
+                */
+               sof_cache_debugfs(sdev);
+       }
+#endif
+
        return ret;
 }
 
index fff1268..8d9e9d5 100644 (file)
@@ -218,12 +218,7 @@ static ssize_t sof_probes_dfs_points_read(struct file *file, char __user *to,
 
        ret = ipc->points_info(cdev, &desc, &num_desc);
        if (ret < 0)
-               goto exit;
-
-       pm_runtime_mark_last_busy(dev);
-       err = pm_runtime_put_autosuspend(dev);
-       if (err < 0)
-               dev_err_ratelimited(dev, "debugfs read failed to idle %d\n", err);
+               goto pm_error;
 
        for (i = 0; i < num_desc; i++) {
                offset = strlen(buf);
@@ -241,6 +236,13 @@ static ssize_t sof_probes_dfs_points_read(struct file *file, char __user *to,
        ret = simple_read_from_buffer(to, count, ppos, buf, strlen(buf));
 
        kfree(desc);
+
+pm_error:
+       pm_runtime_mark_last_busy(dev);
+       err = pm_runtime_put_autosuspend(dev);
+       if (err < 0)
+               dev_err_ratelimited(dev, "debugfs read failed to idle %d\n", err);
+
 exit:
        kfree(buf);
        return ret;
index d3d536b..f160dc4 100644 (file)
@@ -586,6 +586,10 @@ static int sof_copy_tuples(struct snd_sof_dev *sdev, struct snd_soc_tplg_vendor_
                                if (*num_copied_tuples == tuples_size)
                                        return 0;
                        }
+
+                       /* stop when we've found the required token instances */
+                       if (found == num_tokens * token_instance_num)
+                               return 0;
                }
 
                /* next array */
@@ -1261,7 +1265,7 @@ static int sof_widget_parse_tokens(struct snd_soc_component *scomp, struct snd_s
                if (num_sets > 1) {
                        struct snd_sof_tuple *new_tuples;
 
-                       num_tuples += token_list[object_token_list[i]].count * num_sets;
+                       num_tuples += token_list[object_token_list[i]].count * (num_sets - 1);
                        new_tuples = krealloc(swidget->tuples,
                                              sizeof(*new_tuples) * num_tuples, GFP_KERNEL);
                        if (!new_tuples) {
index 4b1c5ba..ab5fed9 100644 (file)
@@ -423,6 +423,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
        case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
        case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
        case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
+       case USB_ID(0x0e41, 0x424b): /* Line6 Pod Go */
        case USB_ID(0x19f7, 0x0011): /* Rode Rodecaster Pro */
                return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
        }
index f8129c6..f7ddd73 100644 (file)
@@ -198,6 +198,15 @@ struct kvm_arm_copy_mte_tags {
        __u64 reserved[2];
 };
 
+/*
+ * Counter/Timer offset structure. Describe the virtual/physical offset.
+ * To be used with KVM_ARM_SET_COUNTER_OFFSET.
+ */
+struct kvm_arm_counter_offset {
+       __u64 counter_offset;
+       __u64 reserved;
+};
+
 #define KVM_ARM_TAGS_TO_GUEST          0
 #define KVM_ARM_TAGS_FROM_GUEST                1
 
@@ -372,6 +381,10 @@ enum {
 #endif
 };
 
+/* Device Control API on vm fd */
+#define KVM_ARM_VM_SMCCC_CTRL          0
+#define   KVM_ARM_VM_SMCCC_FILTER      0
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR      0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
@@ -411,6 +424,8 @@ enum {
 #define KVM_ARM_VCPU_TIMER_CTRL                1
 #define   KVM_ARM_VCPU_TIMER_IRQ_VTIMER                0
 #define   KVM_ARM_VCPU_TIMER_IRQ_PTIMER                1
+#define   KVM_ARM_VCPU_TIMER_IRQ_HVTIMER       2
+#define   KVM_ARM_VCPU_TIMER_IRQ_HPTIMER       3
 #define KVM_ARM_VCPU_PVTIME_CTRL       2
 #define   KVM_ARM_VCPU_PVTIME_IPA      0
 
@@ -469,6 +484,27 @@ enum {
 /* run->fail_entry.hardware_entry_failure_reason codes. */
 #define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED    (1ULL << 0)
 
+enum kvm_smccc_filter_action {
+       KVM_SMCCC_FILTER_HANDLE = 0,
+       KVM_SMCCC_FILTER_DENY,
+       KVM_SMCCC_FILTER_FWD_TO_USER,
+
+#ifdef __KERNEL__
+       NR_SMCCC_FILTER_ACTIONS
+#endif
+};
+
+struct kvm_smccc_filter {
+       __u32 base;
+       __u32 nr_functions;
+       __u8 action;
+       __u8 pad[15];
+};
+
+/* arm64-specific KVM_EXIT_HYPERCALL flags */
+#define KVM_HYPERCALL_EXIT_SMC         (1U << 0)
+#define KVM_HYPERCALL_EXIT_16BIT       (1U << 1)
+
 #endif
 
 #endif /* __ARM_KVM_H__ */
index b890058..cb8ca46 100644 (file)
@@ -97,7 +97,7 @@
 #define X86_FEATURE_SYSENTER32         ( 3*32+15) /* "" sysenter in IA32 userspace */
 #define X86_FEATURE_REP_GOOD           ( 3*32+16) /* REP microcode works well */
 #define X86_FEATURE_AMD_LBR_V2         ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
-#define X86_FEATURE_LFENCE_RDTSC       ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
+/* FREE, was #define X86_FEATURE_LFENCE_RDTSC          ( 3*32+18) "" LFENCE synchronizes RDTSC */
 #define X86_FEATURE_ACC_POWER          ( 3*32+19) /* AMD Accumulated Power Mechanism */
 #define X86_FEATURE_NOPL               ( 3*32+20) /* The NOPL (0F 1F) instructions */
 #define X86_FEATURE_ALWAYS             ( 3*32+21) /* "" Always-present feature */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
-#define X86_FEATURE_VNMI               ( 8*32+ 1) /* Intel Virtual NMI */
-#define X86_FEATURE_FLEXPRIORITY       ( 8*32+ 2) /* Intel FlexPriority */
-#define X86_FEATURE_EPT                        ( 8*32+ 3) /* Intel Extended Page Table */
-#define X86_FEATURE_VPID               ( 8*32+ 4) /* Intel Virtual Processor ID */
+#define X86_FEATURE_FLEXPRIORITY       ( 8*32+ 1) /* Intel FlexPriority */
+#define X86_FEATURE_EPT                        ( 8*32+ 2) /* Intel Extended Page Table */
+#define X86_FEATURE_VPID               ( 8*32+ 3) /* Intel Virtual Processor ID */
 
 #define X86_FEATURE_VMMCALL            ( 8*32+15) /* Prefer VMMCALL to VMCALL */
 #define X86_FEATURE_XENPV              ( 8*32+16) /* "" Xen paravirtual guest */
 #define X86_FEATURE_SGX_EDECCSSA       (11*32+18) /* "" SGX EDECCSSA user leaf function */
 #define X86_FEATURE_CALL_DEPTH         (11*32+19) /* "" Call depth tracking for RSB stuffing */
 #define X86_FEATURE_MSR_TSX_CTRL       (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
+#define X86_FEATURE_SMBA               (11*32+21) /* "" Slow Memory Bandwidth Allocation */
+#define X86_FEATURE_BMEC               (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX_VNNI           (12*32+ 4) /* AVX VNNI instructions */
 #define X86_FEATURE_AVX512_BF16                (12*32+ 5) /* AVX512 BFLOAT16 instructions */
 #define X86_FEATURE_CMPCCXADD           (12*32+ 7) /* "" CMPccXADD instructions */
+#define X86_FEATURE_ARCH_PERFMON_EXT   (12*32+ 8) /* "" Intel Architectural PerfMon Extension */
+#define X86_FEATURE_FZRM               (12*32+10) /* "" Fast zero-length REP MOVSB */
+#define X86_FEATURE_FSRS               (12*32+11) /* "" Fast short REP STOSB */
+#define X86_FEATURE_FSRC               (12*32+12) /* "" Fast short REP {CMPSB,SCASB} */
 #define X86_FEATURE_LKGS               (12*32+18) /* "" Load "kernel" (userspace) GS */
 #define X86_FEATURE_AMX_FP16           (12*32+21) /* "" AMX fp16 Support */
 #define X86_FEATURE_AVX_IFMA            (12*32+23) /* "" Support for VPMADD52[H,L]UQ */
+#define X86_FEATURE_LAM                        (12*32+26) /* Linear Address Masking */
 
 /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
 #define X86_FEATURE_CLZERO             (13*32+ 0) /* CLZERO instruction */
 #define X86_FEATURE_VIRT_SSBD          (13*32+25) /* Virtualized Speculative Store Bypass Disable */
 #define X86_FEATURE_AMD_SSB_NO         (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
 #define X86_FEATURE_CPPC               (13*32+27) /* Collaborative Processor Performance Control */
+#define X86_FEATURE_AMD_PSFD            (13*32+28) /* "" Predictive Store Forwarding Disable */
 #define X86_FEATURE_BTC_NO             (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
 #define X86_FEATURE_BRS                        (13*32+31) /* Branch Sampling available */
 
 #define X86_FEATURE_VGIF               (15*32+16) /* Virtual GIF */
 #define X86_FEATURE_X2AVIC             (15*32+18) /* Virtual x2apic */
 #define X86_FEATURE_V_SPEC_CTRL                (15*32+20) /* Virtual SPEC_CTRL */
+#define X86_FEATURE_VNMI               (15*32+25) /* Virtual NMI */
 #define X86_FEATURE_SVME_ADDR_CHK      (15*32+28) /* "" SVME addr check */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
 #define X86_FEATURE_V_TSC_AUX          (19*32+ 9) /* "" Virtual TSC_AUX */
 #define X86_FEATURE_SME_COHERENT       (19*32+10) /* "" AMD hardware-enforced cache coherency */
 
+/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
+#define X86_FEATURE_NO_NESTED_DATA_BP  (20*32+ 0) /* "" No Nested Data Breakpoints */
+#define X86_FEATURE_LFENCE_RDTSC       (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */
+#define X86_FEATURE_NULL_SEL_CLR_BASE  (20*32+ 6) /* "" Null Selector Clears Base */
+#define X86_FEATURE_AUTOIBRS           (20*32+ 8) /* "" Automatic IBRS */
+#define X86_FEATURE_NO_SMM_CTL_MSR     (20*32+ 9) /* "" SMM_CTL MSR is not present */
+
 /*
  * BUG word(s)
  */
 #define X86_BUG_MMIO_UNKNOWN           X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
 #define X86_BUG_RETBLEED               X86_BUG(27) /* CPU is affected by RETBleed */
 #define X86_BUG_EIBRS_PBRSB            X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
+#define X86_BUG_SMT_RSB                        X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index 5dfa4fb..fafe9be 100644 (file)
 # define DISABLE_CALL_DEPTH_TRACKING   (1 << (X86_FEATURE_CALL_DEPTH & 31))
 #endif
 
+#ifdef CONFIG_ADDRESS_MASKING
+# define DISABLE_LAM           0
+#else
+# define DISABLE_LAM           (1 << (X86_FEATURE_LAM & 31))
+#endif
+
 #ifdef CONFIG_INTEL_IOMMU_SVM
 # define DISABLE_ENQCMD                0
 #else
 #define DISABLED_MASK10        0
 #define DISABLED_MASK11        (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \
                         DISABLE_CALL_DEPTH_TRACKING)
-#define DISABLED_MASK12        0
+#define DISABLED_MASK12        (DISABLE_LAM)
 #define DISABLED_MASK13        0
 #define DISABLED_MASK14        0
 #define DISABLED_MASK15        0
index ad35355..3aedae6 100644 (file)
 
 /* Abbreviated from Intel SDM name IA32_INTEGRITY_CAPABILITIES */
 #define MSR_INTEGRITY_CAPS                     0x000002d9
+#define MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT      2
+#define MSR_INTEGRITY_CAPS_ARRAY_BIST          BIT(MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT)
 #define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT   4
 #define MSR_INTEGRITY_CAPS_PERIODIC_BIST       BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT)
 
index 7f467fe..1a6a1f9 100644 (file)
@@ -559,4 +559,7 @@ struct kvm_pmu_event_filter {
 #define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */
 #define   KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
 
+/* x86-specific KVM_EXIT_HYPERCALL flags. */
+#define KVM_EXIT_HYPERCALL_LONG_MODE   BIT(0)
+
 #endif /* _ASM_X86_KVM_H */
index 500b96e..e8d7ebb 100644 (file)
 #define ARCH_GET_XCOMP_GUEST_PERM      0x1024
 #define ARCH_REQ_XCOMP_GUEST_PERM      0x1025
 
+#define ARCH_XCOMP_TILECFG             17
+#define ARCH_XCOMP_TILEDATA            18
+
 #define ARCH_MAP_VDSO_X32              0x2001
 #define ARCH_MAP_VDSO_32               0x2002
 #define ARCH_MAP_VDSO_64               0x2003
 
+#define ARCH_GET_UNTAG_MASK            0x4001
+#define ARCH_ENABLE_TAGGED_ADDR                0x4002
+#define ARCH_GET_MAX_TAG_BITS          0x4003
+#define ARCH_FORCE_TAGGED_SVA          0x4004
+
 #endif /* _ASM_X86_PRCTL_H */
index b8ddfc4..bc48a4d 100644 (file)
@@ -2,6 +2,9 @@
 #ifndef __NR_fork
 #define __NR_fork 2
 #endif
+#ifndef __NR_execve
+#define __NR_execve 11
+#endif
 #ifndef __NR_getppid
 #define __NR_getppid 64
 #endif
index a91ac66..d055b82 100644 (file)
 .section .noinstr.text, "ax"
 
 /*
- * We build a jump to memcpy_orig by default which gets NOPped out on
- * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
- * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
- * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
- */
-
-/*
  * memcpy - Copy a memory block.
  *
  * Input:
  *
  * Output:
  * rax original destination
+ *
+ * The FSRM alternative should be done inline (avoiding the call and
+ * the disgusting return handling), but that would require some help
+ * from the compiler for better calling conventions.
+ *
+ * The 'rep movsb' itself is small enough to replace the call, but the
+ * two register moves blow up the code. And one of them is "needed"
+ * only for the return value that is the same as the source input,
+ * which the compiler could/should do much better anyway.
  */
 SYM_TYPED_FUNC_START(__memcpy)
-       ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
-                     "jmp memcpy_erms", X86_FEATURE_ERMS
+       ALTERNATIVE "jmp memcpy_orig", "", X86_FEATURE_FSRM
 
        movq %rdi, %rax
        movq %rdx, %rcx
-       shrq $3, %rcx
-       andl $7, %edx
-       rep movsq
-       movl %edx, %ecx
        rep movsb
        RET
 SYM_FUNC_END(__memcpy)
@@ -45,17 +42,6 @@ EXPORT_SYMBOL(__memcpy)
 SYM_FUNC_ALIAS(memcpy, __memcpy)
 EXPORT_SYMBOL(memcpy)
 
-/*
- * memcpy_erms() - enhanced fast string memcpy. This is faster and
- * simpler than memcpy. Use memcpy_erms when possible.
- */
-SYM_FUNC_START_LOCAL(memcpy_erms)
-       movq %rdi, %rax
-       movq %rdx, %rcx
-       rep movsb
-       RET
-SYM_FUNC_END(memcpy_erms)
-
 SYM_FUNC_START_LOCAL(memcpy_orig)
        movq %rdi, %rax
 
index 6143b1a..7c59a70 100644 (file)
  * rdx   count (bytes)
  *
  * rax   original destination
+ *
+ * The FSRS alternative should be done inline (avoiding the call and
+ * the disgusting return handling), but that would require some help
+ * from the compiler for better calling conventions.
+ *
+ * The 'rep stosb' itself is small enough to replace the call, but all
+ * the register moves blow up the code. And two of them are "needed"
+ * only for the return value that is the same as the source input,
+ * which the compiler could/should do much better anyway.
  */
 SYM_FUNC_START(__memset)
-       /*
-        * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
-        * to use it when possible. If not available, use fast string instructions.
-        *
-        * Otherwise, use original memset function.
-        */
-       ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
-                     "jmp memset_erms", X86_FEATURE_ERMS
+       ALTERNATIVE "jmp memset_orig", "", X86_FEATURE_FSRS
 
        movq %rdi,%r9
+       movb %sil,%al
        movq %rdx,%rcx
-       andl $7,%edx
-       shrq $3,%rcx
-       /* expand byte value  */
-       movzbl %sil,%esi
-       movabs $0x0101010101010101,%rax
-       imulq %rsi,%rax
-       rep stosq
-       movl %edx,%ecx
        rep stosb
        movq %r9,%rax
        RET
@@ -48,26 +43,6 @@ EXPORT_SYMBOL(__memset)
 SYM_FUNC_ALIAS(memset, __memset)
 EXPORT_SYMBOL(memset)
 
-/*
- * ISO C memset - set a memory block to a byte value. This function uses
- * enhanced rep stosb to override the fast string function.
- * The code is simpler and shorter than the fast string function as well.
- *
- * rdi   destination
- * rsi   value (char)
- * rdx   count (bytes)
- *
- * rax   original destination
- */
-SYM_FUNC_START_LOCAL(memset_erms)
-       movq %rdi,%r9
-       movb %sil,%al
-       movq %rdx,%rcx
-       rep stosb
-       movq %r9,%rax
-       RET
-SYM_FUNC_END(memset_erms)
-
 SYM_FUNC_START_LOCAL(memset_orig)
        movq %rdi,%r10
 
index b54bd86..7ce02a2 100644 (file)
@@ -4,7 +4,6 @@
 
 /* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */
 
-#define altinstruction_entry #
-#define ALTERNATIVE_2 #
+#define ALTERNATIVE #
 
 #endif
index 6428085..a87bbbb 100644 (file)
@@ -972,6 +972,19 @@ extern "C" {
 #define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
 #define DRM_IOCTL_SET_VERSION          DRM_IOWR(0x07, struct drm_set_version)
 #define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08, struct drm_modeset_ctl)
+/**
+ * DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
+ *
+ * GEM handles are not reference-counted by the kernel. User-space is
+ * responsible for managing their lifetime. For example, if user-space imports
+ * the same memory object twice on the same DRM file description, the same GEM
+ * handle is returned by both imports, and user-space needs to ensure
+ * &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
+ * when a memory object is allocated, then exported and imported again on the
+ * same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
+ * and always returns fresh new GEM handles even if an existing GEM handle
+ * already refers to the same memory object before the IOCTL is performed.
+ */
 #define DRM_IOCTL_GEM_CLOSE            DRM_IOW (0x09, struct drm_gem_close)
 #define DRM_IOCTL_GEM_FLINK            DRM_IOWR(0x0a, struct drm_gem_flink)
 #define DRM_IOCTL_GEM_OPEN             DRM_IOWR(0x0b, struct drm_gem_open)
@@ -1012,7 +1025,37 @@ extern "C" {
 #define DRM_IOCTL_UNLOCK               DRM_IOW( 0x2b, struct drm_lock)
 #define DRM_IOCTL_FINISH               DRM_IOW( 0x2c, struct drm_lock)
 
+/**
+ * DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
+ *
+ * User-space sets &drm_prime_handle.handle with the GEM handle to export and
+ * &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
+ * &drm_prime_handle.fd.
+ *
+ * The export can fail for any driver-specific reason, e.g. because export is
+ * not supported for this specific GEM handle (but might be for others).
+ *
+ * Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
+ */
 #define DRM_IOCTL_PRIME_HANDLE_TO_FD    DRM_IOWR(0x2d, struct drm_prime_handle)
+/**
+ * DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
+ *
+ * User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
+ * import, and gets back a GEM handle in &drm_prime_handle.handle.
+ * &drm_prime_handle.flags is unused.
+ *
+ * If an existing GEM handle refers to the memory object backing the DMA-BUF,
+ * that GEM handle is returned. Therefore user-space which needs to handle
+ * arbitrary DMA-BUFs must have a user-space lookup data structure to manually
+ * reference-count duplicated GEM handles. For more information see
+ * &DRM_IOCTL_GEM_CLOSE.
+ *
+ * The import can fail for any driver-specific reason, e.g. because import is
+ * only supported for DMA-BUFs allocated on this DRM device.
+ *
+ * Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
+ */
 #define DRM_IOCTL_PRIME_FD_TO_HANDLE    DRM_IOWR(0x2e, struct drm_prime_handle)
 
 #define DRM_IOCTL_AGP_ACQUIRE          DRM_IO(  0x30)
@@ -1104,8 +1147,13 @@ extern "C" {
  * struct as the output.
  *
  * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
- * will be filled with GEM buffer handles. Planes are valid until one has a
- * zero handle -- this can be used to compute the number of planes.
+ * will be filled with GEM buffer handles. Fresh new GEM handles are always
+ * returned, even if another GEM handle referring to the same memory object
+ * already exists on the DRM file description. The caller is responsible for
+ * removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
+ * new handle will be returned for multiple planes in case they use the same
+ * memory object. Planes are valid until one has a zero handle -- this can be
+ * used to compute the number of planes.
  *
  * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
  * until one has a zero &drm_mode_fb_cmd2.pitches.
@@ -1113,6 +1161,11 @@ extern "C" {
  * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
  * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
  * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
+ *
+ * To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
+ * can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
+ * close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
+ * double-close handles which are specified multiple times in the array.
  */
 #define DRM_IOCTL_MODE_GETFB2          DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
 
index 8df261c..dba7c5a 100644 (file)
@@ -2491,7 +2491,7 @@ struct i915_context_param_engines {
 #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
 #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
 #define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
-       struct i915_engine_class_instance engines[0];
+       struct i915_engine_class_instance engines[];
 } __attribute__((packed));
 
 #define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
@@ -2676,6 +2676,10 @@ enum drm_i915_oa_format {
        I915_OAR_FORMAT_A32u40_A4u32_B8_C8,
        I915_OA_FORMAT_A24u40_A14u32_B8_C8,
 
+       /* MTL OAM */
+       I915_OAM_FORMAT_MPEC8u64_B8_C8,
+       I915_OAM_FORMAT_MPEC8u32_B8_C8,
+
        I915_OA_FORMAT_MAX          /* non-ABI */
 };
 
@@ -2758,6 +2762,25 @@ enum drm_i915_perf_property_id {
         */
        DRM_I915_PERF_PROP_POLL_OA_PERIOD,
 
+       /**
+        * Multiple engines may be mapped to the same OA unit. The OA unit is
+        * identified by class:instance of any engine mapped to it.
+        *
+        * This parameter specifies the engine class and must be passed along
+        * with DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE.
+        *
+        * This property is available in perf revision 6.
+        */
+       DRM_I915_PERF_PROP_OA_ENGINE_CLASS,
+
+       /**
+        * This parameter specifies the engine instance and must be passed along
+        * with DRM_I915_PERF_PROP_OA_ENGINE_CLASS.
+        *
+        * This property is available in perf revision 6.
+        */
+       DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE,
+
        DRM_I915_PERF_PROP_MAX /* non-ABI */
 };
 
index af2a44c..a429381 100644 (file)
@@ -28,7 +28,7 @@
 #define _BITUL(x)      (_UL(1) << (x))
 #define _BITULL(x)     (_ULL(1) << (x))
 
-#define __ALIGN_KERNEL(x, a)           __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define __ALIGN_KERNEL(x, a)           __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
 #define __ALIGN_KERNEL_MASK(x, mask)   (((x) + (mask)) & ~(mask))
 
 #define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
index 07a4cb1..4b7f2df 100644 (file)
@@ -162,6 +162,7 @@ struct in_addr {
 #define MCAST_MSFILTER                 48
 #define IP_MULTICAST_ALL               49
 #define IP_UNICAST_IF                  50
+#define IP_LOCAL_PORT_RANGE            51
 
 #define MCAST_EXCLUDE  0
 #define MCAST_INCLUDE  1
index 4003a16..737318b 100644 (file)
@@ -341,8 +341,13 @@ struct kvm_run {
                        __u64 nr;
                        __u64 args[6];
                        __u64 ret;
-                       __u32 longmode;
-                       __u32 pad;
+
+                       union {
+#ifndef __KERNEL__
+                               __u32 longmode;
+#endif
+                               __u64 flags;
+                       };
                } hypercall;
                /* KVM_EXIT_TPR_ACCESS */
                struct {
@@ -1184,6 +1189,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224
 #define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
 #define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
+#define KVM_CAP_COUNTER_OFFSET 227
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1543,6 +1549,8 @@ struct kvm_s390_ucas_mapping {
 #define KVM_SET_PMU_EVENT_FILTER  _IOW(KVMIO,  0xb2, struct kvm_pmu_event_filter)
 #define KVM_PPC_SVM_OFF                  _IO(KVMIO,  0xb3)
 #define KVM_ARM_MTE_COPY_TAGS    _IOR(KVMIO,  0xb4, struct kvm_arm_copy_mte_tags)
+/* Available with KVM_CAP_COUNTER_OFFSET */
+#define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO,  0xb5, struct kvm_arm_counter_offset)
 
 /* ioctl for vm fd */
 #define KVM_CREATE_DEVICE        _IOWR(KVMIO,  0xe0, struct kvm_create_device)
index 759b3f5..f23d9a1 100644 (file)
@@ -290,6 +290,8 @@ struct prctl_mm_map {
 #define PR_SET_VMA             0x53564d41
 # define PR_SET_VMA_ANON_NAME          0
 
+#define PR_GET_AUXV                    0x41555856
+
 #define PR_SET_MEMORY_MERGE            67
 #define PR_GET_MEMORY_MERGE            68
 #endif /* _LINUX_PRCTL_H */
index de6810e..0aa955a 100644 (file)
@@ -429,9 +429,14 @@ struct snd_pcm_sw_params {
        snd_pcm_uframes_t avail_min;            /* min avail frames for wakeup */
        snd_pcm_uframes_t xfer_align;           /* obsolete: xfer size need to be a multiple */
        snd_pcm_uframes_t start_threshold;      /* min hw_avail frames for automatic start */
-       snd_pcm_uframes_t stop_threshold;       /* min avail frames for automatic stop */
-       snd_pcm_uframes_t silence_threshold;    /* min distance from noise for silence filling */
-       snd_pcm_uframes_t silence_size;         /* silence block size */
+       /*
+        * The following two thresholds alleviate playback buffer underruns; when
+        * hw_avail drops below the threshold, the respective action is triggered:
+        */
+       snd_pcm_uframes_t stop_threshold;       /* - stop playback */
+       snd_pcm_uframes_t silence_threshold;    /* - pre-fill buffer with silence */
+       snd_pcm_uframes_t silence_size;         /* max size of silence pre-fill; when >= boundary,
+                                                * fill played area with silence immediately */
        snd_pcm_uframes_t boundary;             /* pointers wrap point */
        unsigned int proto;                     /* protocol version */
        unsigned int tstamp_type;               /* timestamp type (req. proto >= 2.0.12) */
@@ -570,7 +575,8 @@ struct __snd_pcm_mmap_status64 {
 struct __snd_pcm_mmap_control64 {
        __pad_before_uframe __pad1;
        snd_pcm_uframes_t appl_ptr;      /* RW: appl ptr (0...boundary-1) */
-       __pad_before_uframe __pad2;
+       __pad_before_uframe __pad2;      // This should be __pad_after_uframe, but binary
+                                        // backwards compatibility constraints prevent a fix.
 
        __pad_before_uframe __pad3;
        snd_pcm_uframes_t  avail_min;    /* RW: min available frames for wakeup */
index 4884520..7026844 100644 (file)
@@ -216,6 +216,12 @@ ifeq ($(call get-executable,$(BISON)),)
   dummy := $(error Error: $(BISON) is missing on this system, please install it)
 endif
 
+ifeq ($(BUILD_BPF_SKEL),1)
+  ifeq ($(call get-executable,$(CLANG)),)
+    dummy := $(error $(CLANG) is missing on this system, please install it to be able to build with BUILD_BPF_SKEL=1)
+  endif
+endif
+
 ifneq ($(OUTPUT),)
   ifeq ($(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \>\= 371), 1)
     BISON_FILE_PREFIX_MAP := --file-prefix-map=$(OUTPUT)=
index a42a6a9..1593c5d 100644 (file)
@@ -1057,14 +1057,32 @@ $(SKEL_TMP_OUT) $(LIBAPI_OUTPUT) $(LIBBPF_OUTPUT) $(LIBPERF_OUTPUT) $(LIBSUBCMD_
 
 ifdef BUILD_BPF_SKEL
 BPFTOOL := $(SKEL_TMP_OUT)/bootstrap/bpftool
-BPF_INCLUDE := -I$(SKEL_TMP_OUT)/.. -I$(LIBBPF_INCLUDE)
+# Get Clang's default includes on this system, as opposed to those seen by
+# '-target bpf'. This fixes "missing" files on some architectures/distros,
+# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
+#
+# Use '-idirafter': Don't interfere with include mechanics except where the
+# build would have failed anyways.
+define get_sys_includes
+$(shell $(1) $(2) -v -E - </dev/null 2>&1 \
+       | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
+$(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}')
+endef
+
+ifneq ($(CROSS_COMPILE),)
+CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
+endif
+
+CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
+BPF_INCLUDE := -I$(SKEL_TMP_OUT)/.. -I$(LIBBPF_INCLUDE) $(CLANG_SYS_INCLUDES)
+TOOLS_UAPI_INCLUDE := -I$(srctree)/tools/include/uapi
 
 $(BPFTOOL): | $(SKEL_TMP_OUT)
        $(Q)CFLAGS= $(MAKE) -C ../bpf/bpftool \
                OUTPUT=$(SKEL_TMP_OUT)/ bootstrap
 
 $(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) | $(SKEL_TMP_OUT)
-       $(QUIET_CLANG)$(CLANG) -g -O2 -target bpf -Wall -Werror $(BPF_INCLUDE) \
+       $(QUIET_CLANG)$(CLANG) -g -O2 -target bpf -Wall -Werror $(BPF_INCLUDE) $(TOOLS_UAPI_INCLUDE) \
          -c $(filter util/bpf_skel/%.bpf.c,$^) -o $@ && $(LLVM_STRIP) -g $@
 
 $(SKEL_OUT)/%.skel.h: $(SKEL_TMP_OUT)/%.bpf.o | $(BPFTOOL)
index 77cb03e..9ca040b 100644 (file)
@@ -78,9 +78,9 @@ static int cs_etm_validate_context_id(struct auxtrace_record *itr,
        char path[PATH_MAX];
        int err;
        u32 val;
-       u64 contextid =
-               evsel->core.attr.config &
-               (perf_pmu__format_bits(&cs_etm_pmu->format, "contextid1") |
+       u64 contextid = evsel->core.attr.config &
+               (perf_pmu__format_bits(&cs_etm_pmu->format, "contextid") |
+                perf_pmu__format_bits(&cs_etm_pmu->format, "contextid1") |
                 perf_pmu__format_bits(&cs_etm_pmu->format, "contextid2"));
 
        if (!contextid)
@@ -114,8 +114,7 @@ static int cs_etm_validate_context_id(struct auxtrace_record *itr,
                 *  0b00100 Maximum of 32-bit Context ID size.
                 *  All other values are reserved.
                 */
-               val = BMVAL(val, 5, 9);
-               if (!val || val != 0x4) {
+               if (BMVAL(val, 5, 9) != 0x4) {
                        pr_err("%s: CONTEXTIDR_EL1 isn't supported, disable with %s/contextid1=0/\n",
                               CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME);
                        return -EINVAL;
index d730666..80b9f62 100644 (file)
@@ -29,8 +29,8 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus)
                char path[PATH_MAX];
                FILE *file;
 
-               scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d"MIDR,
-                               sysfs, cpus->map[cpu]);
+               scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d" MIDR,
+                         sysfs, RC_CHK_ACCESS(cpus)->map[cpu].cpu);
 
                file = fopen(path, "r");
                if (!file) {
index fa143ac..ef1ed64 100644 (file)
@@ -18,7 +18,7 @@ static struct perf_pmu *pmu__find_core_pmu(void)
                 * The cpumap should cover all CPUs. Otherwise, some CPUs may
                 * not support some events or have different event IDs.
                 */
-               if (pmu->cpus->nr != cpu__max_cpu().cpu)
+               if (RC_CHK_ACCESS(pmu->cpus)->nr != cpu__max_cpu().cpu)
                        return NULL;
 
                return pmu;
index 7991476..b68f475 100644 (file)
 444  common    landlock_create_ruleset sys_landlock_create_ruleset     sys_landlock_create_ruleset
 445  common    landlock_add_rule       sys_landlock_add_rule           sys_landlock_add_rule
 446  common    landlock_restrict_self  sys_landlock_restrict_self      sys_landlock_restrict_self
-# 447 reserved for memfd_secret
+447  common    memfd_secret            sys_memfd_secret                sys_memfd_secret
 448  common    process_mrelease        sys_process_mrelease            sys_process_mrelease
 449  common    futex_waitv             sys_futex_waitv                 sys_futex_waitv
 450  common    set_mempolicy_home_node sys_set_mempolicy_home_node     sys_set_mempolicy_home_node
index 50ae8bd..6188e19 100644 (file)
@@ -7,7 +7,3 @@ MEMCPY_FN(memcpy_orig,
 MEMCPY_FN(__memcpy,
        "x86-64-movsq",
        "movsq-based memcpy() in arch/x86/lib/memcpy_64.S")
-
-MEMCPY_FN(memcpy_erms,
-       "x86-64-movsb",
-       "movsb-based memcpy() in arch/x86/lib/memcpy_64.S")
index 6eb45a2..1b9fef7 100644 (file)
@@ -2,7 +2,7 @@
 
 /* Various wrappers to make the kernel .S file build in user-space: */
 
-// memcpy_orig and memcpy_erms are being defined as SYM_L_LOCAL but we need it
+// memcpy_orig is being defined as SYM_L_LOCAL but we need it
 #define SYM_FUNC_START_LOCAL(name)                      \
         SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
 #define memcpy MEMCPY /* don't hide glibc's memcpy() */
index dac6d2b..247c72f 100644 (file)
@@ -7,7 +7,3 @@ MEMSET_FN(memset_orig,
 MEMSET_FN(__memset,
        "x86-64-stosq",
        "movsq-based memset() in arch/x86/lib/memset_64.S")
-
-MEMSET_FN(memset_erms,
-       "x86-64-stosb",
-       "movsb-based memset() in arch/x86/lib/memset_64.S")
index 6f093c4..abd26c9 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-// memset_orig and memset_erms are being defined as SYM_L_LOCAL but we need it
+// memset_orig is being defined as SYM_L_LOCAL but we need it
 #define SYM_FUNC_START_LOCAL(name)                      \
         SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
 #define memset MEMSET /* don't hide glibc's memset() */
index 006f522..c57be48 100644 (file)
@@ -3647,6 +3647,13 @@ static int process_stat_config_event(struct perf_session *session __maybe_unused
                                     union perf_event *event)
 {
        perf_event__read_stat_config(&stat_config, &event->stat_config);
+
+       /*
+        * Aggregation modes are not used since post-processing scripts are
+        * supposed to take care of such requirements
+        */
+       stat_config.aggr_mode = AGGR_NONE;
+
        return 0;
 }
 
index cc9fa48..b9ad32f 100644 (file)
@@ -667,6 +667,13 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
                        evsel_list->core.threads->err_thread = -1;
                        return COUNTER_RETRY;
                }
+       } else if (counter->skippable) {
+               if (verbose > 0)
+                       ui__warning("skipping event %s that kernel failed to open .\n",
+                                   evsel__name(counter));
+               counter->supported = false;
+               counter->errored = true;
+               return COUNTER_SKIP;
        }
 
        evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
@@ -1890,15 +1897,28 @@ static int add_default_attributes(void)
                 * caused by exposing latent bugs. This is fixed properly in:
                 * https://lore.kernel.org/lkml/bff481ba-e60a-763f-0aa0-3ee53302c480@linux.intel.com/
                 */
-               if (metricgroup__has_metric("TopdownL1") && !perf_pmu__has_hybrid() &&
-                   metricgroup__parse_groups(evsel_list, "TopdownL1",
-                                           /*metric_no_group=*/false,
-                                           /*metric_no_merge=*/false,
-                                           /*metric_no_threshold=*/true,
-                                           stat_config.user_requested_cpu_list,
-                                           stat_config.system_wide,
-                                           &stat_config.metric_events) < 0)
-                       return -1;
+               if (metricgroup__has_metric("TopdownL1") && !perf_pmu__has_hybrid()) {
+                       struct evlist *metric_evlist = evlist__new();
+                       struct evsel *metric_evsel;
+
+                       if (!metric_evlist)
+                               return -1;
+
+                       if (metricgroup__parse_groups(metric_evlist, "TopdownL1",
+                                                       /*metric_no_group=*/false,
+                                                       /*metric_no_merge=*/false,
+                                                       /*metric_no_threshold=*/true,
+                                                       stat_config.user_requested_cpu_list,
+                                                       stat_config.system_wide,
+                                                       &stat_config.metric_events) < 0)
+                               return -1;
+
+                       evlist__for_each_entry(metric_evlist, metric_evsel) {
+                               metric_evsel->skippable = true;
+                       }
+                       evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries);
+                       evlist__delete(metric_evlist);
+               }
 
                /* Platform specific attrs */
                if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
index 75d80e7..1f90475 100644 (file)
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls.  Note that uops must be available for consumption in order for this event to count.  If a uop is not available (IQ is empty), this event will not count.   The rest of these subevents count backend stalls, in cycles, due to an outstanding request which is memory bound vs core bound.   The subevents are not slot based events and therefore can not be precisely added or subtracted from the Backend_Bound_Aux subevents which are slot based.",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound_aux",
         "MetricThreshold": "tma_backend_bound_aux > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls.  Note that UOPS must be available for consumption in order for this event to count.  If a uop is not available (IQ is empty), this event will not count.  All of these subevents count backend stalls, in slots, due to a resource limitation.   These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based.  These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation.",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
         "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_base",
         "MetricThreshold": "tma_base > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.05",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.05",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_ms_uops",
         "MetricThreshold": "tma_ms_uops > 0.05",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS).  This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
         "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_aux_group",
         "MetricName": "tma_resource_bound",
         "MetricThreshold": "tma_resource_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls.  Note that uops must be available for consumption in order for this event to count.  If a uop is not available (IQ is empty), this event will not count.",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.75",
+        "MetricgroupNoGroup": "TopdownL1",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: TOPDOWN.BR_MISPREDICT_SLOTS. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 6 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. Sample with: UOPS_RETIRED.HEAVY",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
index 1a85d93..0402adb 100644 (file)
@@ -98,6 +98,7 @@
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls.  Note that uops must be available for consumption in order for this event to count.  If a uop is not available (IQ is empty), this event will not count.   The rest of these subevents count backend stalls, in cycles, due to an outstanding request which is memory bound vs core bound.   The subevents are not slot based events and therefore can not be precisely added or subtracted from the Backend_Bound_Aux subevents which are slot based.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound_aux",
         "MetricThreshold": "tma_backend_bound_aux > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls.  Note that UOPS must be available for consumption in order for this event to count.  If a uop is not available (IQ is empty), this event will not count.  All of these subevents count backend stalls, in slots, due to a resource limitation.   These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based.  These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_base",
         "MetricThreshold": "tma_base > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.05",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.05",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_ms_uops",
         "MetricThreshold": "tma_ms_uops > 0.05",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS).  This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_aux_group",
         "MetricName": "tma_resource_bound",
         "MetricThreshold": "tma_resource_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls.  Note that uops must be available for consumption in order for this event to count.  If a uop is not available (IQ is empty), this event will not count.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.75",
+        "MetricgroupNoGroup": "TopdownL1",
         "ScaleUnit": "100%"
     },
     {
index 51cf856..f9e2316 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index fb57c73..e9c46d3 100644 (file)
@@ -97,6 +97,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: TOPDOWN.BR_MISPREDICT_SLOTS. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
         "ScaleUnit": "100%"
     },
index 65ec0c9..437b986 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 8f7dc72..875c766 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 2528418..9570a88 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 11f152c..a522202 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index f45ae34..1a2154f 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
         "ScaleUnit": "100%"
     },
index 0f9b174..1ef772b 100644 (file)
@@ -80,6 +80,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
         "ScaleUnit": "100%"
     },
@@ -89,6 +90,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
         "ScaleUnit": "100%"
     },
index 5247f69..11080cc 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 89469b1..65a46d6 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index e8f4e5c..66a6f65 100644 (file)
@@ -76,6 +76,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
@@ -85,6 +86,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
@@ -95,6 +97,7 @@
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 4a99fe5..4b8bc19 100644 (file)
@@ -76,6 +76,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
@@ -85,6 +86,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
@@ -95,6 +97,7 @@
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 126300b..620fc5b 100644 (file)
@@ -87,6 +87,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
         "ScaleUnit": "100%"
     },
@@ -96,6 +97,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: TOPDOWN.BR_MISPREDICT_SLOTS. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 6 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. Sample with: UOPS_RETIRED.HEAVY",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
         "ScaleUnit": "100%"
     },
index a6d212b..21ef6c9 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index fa2f7f1..eb6f12c 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 4c80d6b..b442ed4 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
         "ScaleUnit": "100%"
     },
index ca99b9c..f57a8f2 100755 (executable)
@@ -52,7 +52,8 @@ _json_event_attributes = [
 # Attributes that are in pmu_metric rather than pmu_event.
 _json_metric_attributes = [
     'metric_name', 'metric_group', 'metric_expr', 'metric_threshold', 'desc',
-    'long_desc', 'unit', 'compat', 'aggr_mode', 'event_grouping'
+    'long_desc', 'unit', 'compat', 'metricgroup_no_group', 'aggr_mode',
+    'event_grouping'
 ]
 # Attributes that are bools or enum int values, encoded as '0', '1',...
 _json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
@@ -303,6 +304,7 @@ class JsonEvent:
     self.deprecated = jd.get('Deprecated')
     self.metric_name = jd.get('MetricName')
     self.metric_group = jd.get('MetricGroup')
+    self.metricgroup_no_group = jd.get('MetricgroupNoGroup')
     self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint'))
     self.metric_expr = None
     if 'MetricExpr' in jd:
index b7dff8f..8034968 100644 (file)
@@ -59,6 +59,7 @@ struct pmu_metric {
        const char *compat;
        const char *desc;
        const char *long_desc;
+       const char *metricgroup_no_group;
        enum aggr_mode_class aggr_mode;
        enum metric_event_groups event_grouping;
 };
index ccfef86..e890c26 100644 (file)
@@ -152,7 +152,7 @@ def parse_version(version):
 #   - expected values assignments
 class Test(object):
     def __init__(self, path, options):
-        parser = configparser.SafeConfigParser()
+        parser = configparser.ConfigParser()
         parser.read(path)
 
         log.warning("running '%s'" % path)
@@ -247,7 +247,7 @@ class Test(object):
         return True
 
     def load_events(self, path, events):
-        parser_event = configparser.SafeConfigParser()
+        parser_event = configparser.ConfigParser()
         parser_event.read(path)
 
         # The event record section header contains 'event' word,
@@ -261,7 +261,7 @@ class Test(object):
             # Read parent event if there's any
             if (':' in section):
                 base = section[section.index(':') + 1:]
-                parser_base = configparser.SafeConfigParser()
+                parser_base = configparser.ConfigParser()
                 parser_base.read(self.test_dir + '/' + base)
                 base_items = parser_base.items('event')
 
index a21fb65..fccd8ec 100644 (file)
@@ -16,7 +16,7 @@ pinned=0
 exclusive=0
 exclude_user=0
 exclude_kernel=0|1
-exclude_hv=0
+exclude_hv=0|1
 exclude_idle=0
 mmap=0
 comm=0
index d8ea6a8..a1e2da0 100644 (file)
@@ -40,7 +40,6 @@ fd=6
 type=0
 config=7
 optional=1
-
 # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND
 [event7:base-stat]
 fd=7
@@ -89,79 +88,98 @@ enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
 [event13:base-stat]
 fd=13
 group_fd=11
 type=4
-config=33024
+config=33280
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
 [event14:base-stat]
 fd=14
 group_fd=11
 type=4
-config=33280
+config=33536
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
 [event15:base-stat]
 fd=15
 group_fd=11
 type=4
-config=33536
+config=33024
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
 [event16:base-stat]
 fd=16
-group_fd=11
 type=4
-config=33792
-disabled=0
-enable_on_exec=0
-read_format=15
+config=4109
 optional=1
 
-# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
 [event17:base-stat]
 fd=17
-group_fd=11
 type=4
-config=34048
-disabled=0
-enable_on_exec=0
-read_format=15
+config=17039629
 optional=1
 
-# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
 [event18:base-stat]
 fd=18
-group_fd=11
 type=4
-config=34304
-disabled=0
-enable_on_exec=0
-read_format=15
+config=60
 optional=1
 
-# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
 [event19:base-stat]
 fd=19
-group_fd=11
 type=4
-config=34560
-disabled=0
-enable_on_exec=0
-read_format=15
+config=2097421
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+[event20:base-stat]
+fd=20
+type=4
+config=316
+optional=1
+
+# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
+[event21:base-stat]
+fd=21
+type=4
+config=412
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
+[event22:base-stat]
+fd=22
+type=4
+config=572
+optional=1
+
+# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
+[event23:base-stat]
+fd=23
+type=4
+config=706
+optional=1
+
+# PERF_TYPE_RAW / UOPS_ISSUED.ANY
+[event24:base-stat]
+fd=24
+type=4
+config=270
 optional=1
index b656ab9..1c52cb0 100644 (file)
@@ -90,89 +90,108 @@ enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
 [event13:base-stat]
 fd=13
 group_fd=11
 type=4
-config=33024
+config=33280
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
 [event14:base-stat]
 fd=14
 group_fd=11
 type=4
-config=33280
+config=33536
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
 [event15:base-stat]
 fd=15
 group_fd=11
 type=4
-config=33536
+config=33024
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
 [event16:base-stat]
 fd=16
-group_fd=11
 type=4
-config=33792
-disabled=0
-enable_on_exec=0
-read_format=15
+config=4109
 optional=1
 
-# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
 [event17:base-stat]
 fd=17
-group_fd=11
 type=4
-config=34048
-disabled=0
-enable_on_exec=0
-read_format=15
+config=17039629
 optional=1
 
-# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
 [event18:base-stat]
 fd=18
-group_fd=11
 type=4
-config=34304
-disabled=0
-enable_on_exec=0
-read_format=15
+config=60
 optional=1
 
-# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
 [event19:base-stat]
 fd=19
-group_fd=11
 type=4
-config=34560
-disabled=0
-enable_on_exec=0
-read_format=15
+config=2097421
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+[event20:base-stat]
+fd=20
+type=4
+config=316
+optional=1
+
+# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
+[event21:base-stat]
+fd=21
+type=4
+config=412
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
+[event22:base-stat]
+fd=22
+type=4
+config=572
+optional=1
+
+# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
+[event23:base-stat]
+fd=23
+type=4
+config=706
+optional=1
+
+# PERF_TYPE_RAW / UOPS_ISSUED.ANY
+[event24:base-stat]
+fd=24
+type=4
+config=270
 optional=1
 
 # PERF_TYPE_HW_CACHE /
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event20:base-stat]
-fd=20
+[event25:base-stat]
+fd=25
 type=3
 config=0
 optional=1
@@ -181,8 +200,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event21:base-stat]
-fd=21
+[event26:base-stat]
+fd=26
 type=3
 config=65536
 optional=1
@@ -191,8 +210,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event22:base-stat]
-fd=22
+[event27:base-stat]
+fd=27
 type=3
 config=2
 optional=1
@@ -201,8 +220,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event23:base-stat]
-fd=23
+[event28:base-stat]
+fd=28
 type=3
 config=65538
 optional=1
index 9762509..7e961d2 100644 (file)
@@ -90,89 +90,108 @@ enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
 [event13:base-stat]
 fd=13
 group_fd=11
 type=4
-config=33024
+config=33280
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
 [event14:base-stat]
 fd=14
 group_fd=11
 type=4
-config=33280
+config=33536
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
 [event15:base-stat]
 fd=15
 group_fd=11
 type=4
-config=33536
+config=33024
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
 [event16:base-stat]
 fd=16
-group_fd=11
 type=4
-config=33792
-disabled=0
-enable_on_exec=0
-read_format=15
+config=4109
 optional=1
 
-# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
 [event17:base-stat]
 fd=17
-group_fd=11
 type=4
-config=34048
-disabled=0
-enable_on_exec=0
-read_format=15
+config=17039629
 optional=1
 
-# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
 [event18:base-stat]
 fd=18
-group_fd=11
 type=4
-config=34304
-disabled=0
-enable_on_exec=0
-read_format=15
+config=60
 optional=1
 
-# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
 [event19:base-stat]
 fd=19
-group_fd=11
 type=4
-config=34560
-disabled=0
-enable_on_exec=0
-read_format=15
+config=2097421
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+[event20:base-stat]
+fd=20
+type=4
+config=316
+optional=1
+
+# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
+[event21:base-stat]
+fd=21
+type=4
+config=412
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
+[event22:base-stat]
+fd=22
+type=4
+config=572
+optional=1
+
+# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
+[event23:base-stat]
+fd=23
+type=4
+config=706
+optional=1
+
+# PERF_TYPE_RAW / UOPS_ISSUED.ANY
+[event24:base-stat]
+fd=24
+type=4
+config=270
 optional=1
 
 # PERF_TYPE_HW_CACHE /
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event20:base-stat]
-fd=20
+[event25:base-stat]
+fd=25
 type=3
 config=0
 optional=1
@@ -181,8 +200,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event21:base-stat]
-fd=21
+[event26:base-stat]
+fd=26
 type=3
 config=65536
 optional=1
@@ -191,8 +210,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event22:base-stat]
-fd=22
+[event27:base-stat]
+fd=27
 type=3
 config=2
 optional=1
@@ -201,8 +220,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event23:base-stat]
-fd=23
+[event28:base-stat]
+fd=28
 type=3
 config=65538
 optional=1
@@ -211,8 +230,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event24:base-stat]
-fd=24
+[event29:base-stat]
+fd=29
 type=3
 config=1
 optional=1
@@ -221,8 +240,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event25:base-stat]
-fd=25
+[event30:base-stat]
+fd=30
 type=3
 config=65537
 optional=1
@@ -231,8 +250,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event26:base-stat]
-fd=26
+[event31:base-stat]
+fd=31
 type=3
 config=3
 optional=1
@@ -241,8 +260,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event27:base-stat]
-fd=27
+[event32:base-stat]
+fd=32
 type=3
 config=65539
 optional=1
@@ -251,8 +270,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event28:base-stat]
-fd=28
+[event33:base-stat]
+fd=33
 type=3
 config=4
 optional=1
@@ -261,8 +280,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event29:base-stat]
-fd=29
+[event34:base-stat]
+fd=34
 type=3
 config=65540
 optional=1
index d555042..e50535f 100644 (file)
@@ -90,89 +90,108 @@ enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
 [event13:base-stat]
 fd=13
 group_fd=11
 type=4
-config=33024
+config=33280
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
 [event14:base-stat]
 fd=14
 group_fd=11
 type=4
-config=33280
+config=33536
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
 [event15:base-stat]
 fd=15
 group_fd=11
 type=4
-config=33536
+config=33024
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
 [event16:base-stat]
 fd=16
-group_fd=11
 type=4
-config=33792
-disabled=0
-enable_on_exec=0
-read_format=15
+config=4109
 optional=1
 
-# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
 [event17:base-stat]
 fd=17
-group_fd=11
 type=4
-config=34048
-disabled=0
-enable_on_exec=0
-read_format=15
+config=17039629
 optional=1
 
-# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
 [event18:base-stat]
 fd=18
-group_fd=11
 type=4
-config=34304
-disabled=0
-enable_on_exec=0
-read_format=15
+config=60
 optional=1
 
-# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
 [event19:base-stat]
 fd=19
-group_fd=11
 type=4
-config=34560
-disabled=0
-enable_on_exec=0
-read_format=15
+config=2097421
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+[event20:base-stat]
+fd=20
+type=4
+config=316
+optional=1
+
+# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
+[event21:base-stat]
+fd=21
+type=4
+config=412
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
+[event22:base-stat]
+fd=22
+type=4
+config=572
+optional=1
+
+# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
+[event23:base-stat]
+fd=23
+type=4
+config=706
+optional=1
+
+# PERF_TYPE_RAW / UOPS_ISSUED.ANY
+[event24:base-stat]
+fd=24
+type=4
+config=270
 optional=1
 
 # PERF_TYPE_HW_CACHE /
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event20:base-stat]
-fd=20
+[event25:base-stat]
+fd=25
 type=3
 config=0
 optional=1
@@ -181,8 +200,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event21:base-stat]
-fd=21
+[event26:base-stat]
+fd=26
 type=3
 config=65536
 optional=1
@@ -191,8 +210,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event22:base-stat]
-fd=22
+[event27:base-stat]
+fd=27
 type=3
 config=2
 optional=1
@@ -201,8 +220,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event23:base-stat]
-fd=23
+[event28:base-stat]
+fd=28
 type=3
 config=65538
 optional=1
@@ -211,8 +230,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event24:base-stat]
-fd=24
+[event29:base-stat]
+fd=29
 type=3
 config=1
 optional=1
@@ -221,8 +240,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event25:base-stat]
-fd=25
+[event30:base-stat]
+fd=30
 type=3
 config=65537
 optional=1
@@ -231,8 +250,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event26:base-stat]
-fd=26
+[event31:base-stat]
+fd=31
 type=3
 config=3
 optional=1
@@ -241,8 +260,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event27:base-stat]
-fd=27
+[event32:base-stat]
+fd=32
 type=3
 config=65539
 optional=1
@@ -251,8 +270,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event28:base-stat]
-fd=28
+[event33:base-stat]
+fd=33
 type=3
 config=4
 optional=1
@@ -261,8 +280,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event29:base-stat]
-fd=29
+[event34:base-stat]
+fd=34
 type=3
 config=65540
 optional=1
@@ -271,8 +290,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event30:base-stat]
-fd=30
+[event35:base-stat]
+fd=35
 type=3
 config=512
 optional=1
@@ -281,8 +300,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event31:base-stat]
-fd=31
+[event36:base-stat]
+fd=36
 type=3
 config=66048
 optional=1
index cbf0e0c..733ead1 100644 (file)
@@ -120,7 +120,8 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
 
        p = "FOO/0";
        ret = expr__parse(&val, ctx, p);
-       TEST_ASSERT_VAL("division by zero", ret == -1);
+       TEST_ASSERT_VAL("division by zero", ret == 0);
+       TEST_ASSERT_VAL("division by zero", isnan(val));
 
        p = "BAR/";
        ret = expr__parse(&val, ctx, p);
index 1185b79..c05148e 100644 (file)
@@ -38,6 +38,7 @@ static void load_runtime_stat(struct evlist *evlist, struct value *vals)
        evlist__alloc_aggr_stats(evlist, 1);
        evlist__for_each_entry(evlist, evsel) {
                count = find_value(evsel->name, vals);
+               evsel->supported = true;
                evsel->stats->aggr->counts.val = count;
                if (evsel__name_is(evsel, "duration_time"))
                        update_stats(&walltime_nsecs_stats, count);
index 2c1d3f7..b154fbb 100755 (executable)
@@ -28,6 +28,18 @@ test_stat_record_report() {
   echo "stat record and report test [Success]"
 }
 
+test_stat_record_script() {
+  echo "stat record and script test"
+  if ! perf stat record -o - true | perf script -i - 2>&1 | \
+    grep -E -q "CPU[[:space:]]+THREAD[[:space:]]+VAL[[:space:]]+ENA[[:space:]]+RUN[[:space:]]+TIME[[:space:]]+EVENT"
+  then
+    echo "stat record and script test [Failed]"
+    err=1
+    return
+  fi
+  echo "stat record and script test [Success]"
+}
+
 test_stat_repeat_weak_groups() {
   echo "stat repeat weak groups test"
   if ! perf stat -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}' \
@@ -93,6 +105,7 @@ test_topdown_weak_groups() {
 
 test_default_stat
 test_stat_record_report
+test_stat_record_script
 test_stat_repeat_weak_groups
 test_topdown_groups
 test_topdown_weak_groups
index 4ddb17c..3a8b9bf 100755 (executable)
@@ -506,6 +506,13 @@ test_sample()
                echo "perf record failed with --aux-sample"
                return 1
        fi
+       # Check with event with PMU name
+       if perf_record_no_decode -o "${perfdatafile}" -e br_misp_retired.all_branches:u uname ; then
+               if ! perf_record_no_decode -o "${perfdatafile}" -e '{intel_pt//,br_misp_retired.all_branches/aux-sample-size=8192/}:u' uname ; then
+                       echo "perf record failed with --aux-sample-size"
+                       return 1
+               fi
+       fi
        echo OK
        return 0
 }
index 90cea88..499539d 100755 (executable)
@@ -56,7 +56,7 @@ if [ $? -ne 0 ]; then
        exit 1
 fi
 
-if ! perf inject -i $PERF_DATA -o $PERF_INJ_DATA -j; then
+if ! DEBUGINFOD_URLS='' perf inject -i $PERF_DATA -o $PERF_INJ_DATA -j; then
        echo "Fail to inject samples"
        exit 1
 fi
index fe022ca..a211348 100644 (file)
 
 static DEFINE_STRARRAY_OFFSET(x86_arch_prctl_codes_1, "ARCH_", x86_arch_prctl_codes_1_offset);
 static DEFINE_STRARRAY_OFFSET(x86_arch_prctl_codes_2, "ARCH_", x86_arch_prctl_codes_2_offset);
+static DEFINE_STRARRAY_OFFSET(x86_arch_prctl_codes_3, "ARCH_", x86_arch_prctl_codes_3_offset);
 
 static struct strarray *x86_arch_prctl_codes[] = {
        &strarray__x86_arch_prctl_codes_1,
        &strarray__x86_arch_prctl_codes_2,
+       &strarray__x86_arch_prctl_codes_3,
 };
 
 static DEFINE_STRARRAYS(x86_arch_prctl_codes);
index 57fa6aa..fd5c740 100755 (executable)
@@ -24,3 +24,4 @@ print_range () {
 
 print_range 1 0x1 0x1001
 print_range 2 0x2 0x2001
+print_range 3 0x4 0x4001
index 8d3cfbb..1d48226 100644 (file)
@@ -416,6 +416,8 @@ int contention_end(u64 *ctx)
        return 0;
 }
 
+struct rq {};
+
 extern struct rq runqueues __ksym;
 
 struct rq___old {
index 449b1ea..c7ed51b 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __VMLINUX_H
 #define __VMLINUX_H
 
+#include <linux/stddef.h> // for define __always_inline
 #include <linux/bpf.h>
 #include <linux/types.h>
 #include <linux/perf_event.h>
index 356c07f..2f5910b 100644 (file)
@@ -290,6 +290,7 @@ void evsel__init(struct evsel *evsel,
        evsel->per_pkg_mask  = NULL;
        evsel->collect_stat  = false;
        evsel->pmu_name      = NULL;
+       evsel->skippable     = false;
 }
 
 struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
@@ -828,26 +829,26 @@ bool evsel__name_is(struct evsel *evsel, const char *name)
 
 const char *evsel__group_pmu_name(const struct evsel *evsel)
 {
-       const struct evsel *leader;
+       struct evsel *leader = evsel__leader(evsel);
+       struct evsel *pos;
 
-       /* If the pmu_name is set use it. pmu_name isn't set for CPU and software events. */
-       if (evsel->pmu_name)
-               return evsel->pmu_name;
        /*
         * Software events may be in a group with other uncore PMU events. Use
-        * the pmu_name of the group leader to avoid breaking the software event
-        * out of the group.
+        * the pmu_name of the first non-software event to avoid breaking the
+        * software event out of the group.
         *
         * Aux event leaders, like intel_pt, expect a group with events from
         * other PMUs, so substitute the AUX event's PMU in this case.
         */
-       leader  = evsel__leader(evsel);
-       if ((evsel->core.attr.type == PERF_TYPE_SOFTWARE || evsel__is_aux_event(leader)) &&
-           leader->pmu_name) {
-               return leader->pmu_name;
+       if (evsel->core.attr.type == PERF_TYPE_SOFTWARE || evsel__is_aux_event(leader)) {
+               /* Starting with the leader, find the first event with a named PMU. */
+               for_each_group_evsel(pos, leader) {
+                       if (pos->pmu_name)
+                               return pos->pmu_name;
+               }
        }
 
-       return "cpu";
+       return evsel->pmu_name ?: "cpu";
 }
 
 const char *evsel__metric_id(const struct evsel *evsel)
@@ -1725,9 +1726,13 @@ static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
                return -1;
 
        fd = FD(leader, cpu_map_idx, thread);
-       BUG_ON(fd == -1);
+       BUG_ON(fd == -1 && !leader->skippable);
 
-       return fd;
+       /*
+        * When the leader has been skipped, return -2 to distinguish from no
+        * group leader case.
+        */
+       return fd == -1 ? -2 : fd;
 }
 
 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx)
@@ -2109,6 +2114,12 @@ retry_open:
 
                        group_fd = get_group_fd(evsel, idx, thread);
 
+                       if (group_fd == -2) {
+                               pr_debug("broken group leader for %s\n", evsel->name);
+                               err = -EINVAL;
+                               goto out_close;
+                       }
+
                        test_attr__ready();
 
                        /* Debug message used by test scripts */
index d575390..df89287 100644 (file)
@@ -95,6 +95,7 @@ struct evsel {
                bool                    weak_group;
                bool                    bpf_counter;
                bool                    use_config_name;
+               bool                    skippable;
                int                     bpf_fd;
                struct bpf_object       *bpf_obj;
                struct list_head        config_terms;
index 250e444..4ce931c 100644 (file)
@@ -225,7 +225,11 @@ expr: NUMBER
 {
        if (fpclassify($3.val) == FP_ZERO) {
                pr_debug("division by zero\n");
-               YYABORT;
+               assert($3.ids == NULL);
+               if (compute_ids)
+                       ids__free($1.ids);
+               $$.val = NAN;
+               $$.ids = NULL;
        } else if (!compute_ids || (is_const($1.val) && is_const($3.val))) {
                assert($1.ids == NULL);
                assert($3.ids == NULL);
index c566c68..5e9c657 100644 (file)
@@ -1144,12 +1144,12 @@ static int metricgroup__add_metric_callback(const struct pmu_metric *pm,
        struct metricgroup__add_metric_data *data = vdata;
        int ret = 0;
 
-       if (pm->metric_expr &&
-               (match_metric(pm->metric_group, data->metric_name) ||
-                match_metric(pm->metric_name, data->metric_name))) {
+       if (pm->metric_expr && match_pm_metric(pm, data->metric_name)) {
+               bool metric_no_group = data->metric_no_group ||
+                       match_metric(data->metric_name, pm->metricgroup_no_group);
 
                data->has_match = true;
-               ret = add_metric(data->list, pm, data->modifier, data->metric_no_group,
+               ret = add_metric(data->list, pm, data->modifier, metric_no_group,
                                 data->metric_no_threshold, data->user_requested_cpu_list,
                                 data->system_wide, /*root_metric=*/NULL,
                                 /*visited_metrics=*/NULL, table);
@@ -1672,7 +1672,7 @@ static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm,
 {
        unsigned int *max_level = data;
        unsigned int level;
-       const char *p = strstr(pm->metric_group, "TopdownL");
+       const char *p = strstr(pm->metric_group ?: "", "TopdownL");
 
        if (!p || p[8] == '\0')
                return 0;
index d71019d..34ba840 100644 (file)
@@ -2140,25 +2140,32 @@ static int evlist__cmp(void *state, const struct list_head *l, const struct list
        int *leader_idx = state;
        int lhs_leader_idx = *leader_idx, rhs_leader_idx = *leader_idx, ret;
        const char *lhs_pmu_name, *rhs_pmu_name;
+       bool lhs_has_group = false, rhs_has_group = false;
 
        /*
         * First sort by grouping/leader. Read the leader idx only if the evsel
         * is part of a group, as -1 indicates no group.
         */
-       if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1)
+       if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) {
+               lhs_has_group = true;
                lhs_leader_idx = lhs_core->leader->idx;
-       if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1)
+       }
+       if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) {
+               rhs_has_group = true;
                rhs_leader_idx = rhs_core->leader->idx;
+       }
 
        if (lhs_leader_idx != rhs_leader_idx)
                return lhs_leader_idx - rhs_leader_idx;
 
-       /* Group by PMU. Groups can't span PMUs. */
-       lhs_pmu_name = evsel__group_pmu_name(lhs);
-       rhs_pmu_name = evsel__group_pmu_name(rhs);
-       ret = strcmp(lhs_pmu_name, rhs_pmu_name);
-       if (ret)
-               return ret;
+       /* Group by PMU if there is a group. Groups can't span PMUs. */
+       if (lhs_has_group && rhs_has_group) {
+               lhs_pmu_name = evsel__group_pmu_name(lhs);
+               rhs_pmu_name = evsel__group_pmu_name(rhs);
+               ret = strcmp(lhs_pmu_name, rhs_pmu_name);
+               if (ret)
+                       return ret;
+       }
 
        /* Architecture specific sorting. */
        return arch_evlist__cmp(lhs, rhs);
index 73b2ff2..bf5a6c1 100644 (file)
@@ -431,7 +431,7 @@ static void print_metric_json(struct perf_stat_config *config __maybe_unused,
        struct outstate *os = ctx;
        FILE *out = os->fh;
 
-       fprintf(out, "\"metric-value\" : %f, ", val);
+       fprintf(out, "\"metric-value\" : \"%f\", ", val);
        fprintf(out, "\"metric-unit\" : \"%s\"", unit);
        if (!config->metric_only)
                fprintf(out, "}");
index eeccab6..1566a20 100644 (file)
@@ -403,12 +403,25 @@ static int prepare_metric(struct evsel **metric_events,
                        if (!aggr)
                                break;
 
-                       /*
-                        * If an event was scaled during stat gathering, reverse
-                        * the scale before computing the metric.
-                        */
-                       val = aggr->counts.val * (1.0 / metric_events[i]->scale);
-                       source_count = evsel__source_count(metric_events[i]);
+                        if (!metric_events[i]->supported) {
+                               /*
+                                * Not supported events will have a count of 0,
+                                * which can be confusing in a
+                                * metric. Explicitly set the value to NAN. Not
+                                * counted events (enable time of 0) are read as
+                                * 0.
+                                */
+                               val = NAN;
+                               source_count = 0;
+                       } else {
+                               /*
+                                * If an event was scaled during stat gathering,
+                                * reverse the scale before computing the
+                                * metric.
+                                */
+                               val = aggr->counts.val * (1.0 / metric_events[i]->scale);
+                               source_count = evsel__source_count(metric_events[i]);
+                       }
                }
                n = strdup(evsel__metric_id(metric_events[i]));
                if (!n)
index 0ce29ee..a7a59c6 100644 (file)
@@ -40,25 +40,34 @@ static int sysfs_get_enabled(char *path, int *mode)
 {
        int fd;
        char yes_no;
+       int ret = 0;
 
        *mode = 0;
 
        fd = open(path, O_RDONLY);
-       if (fd == -1)
-               return -1;
+       if (fd == -1) {
+               ret = -1;
+               goto out;
+       }
 
        if (read(fd, &yes_no, 1) != 1) {
-               close(fd);
-               return -1;
+               ret = -1;
+               goto out_close;
        }
 
        if (yes_no == '1') {
                *mode = 1;
-               return 0;
+               goto out_close;
        } else if (yes_no == '0') {
-               return 0;
+               goto out_close;
+       } else {
+               ret = -1;
+               goto out_close;
        }
-       return -1;
+out_close:
+       close(fd);
+out:
+       return ret;
 }
 
 int powercap_get_enabled(int *mode)
index e7d48cb..ae6af35 100644 (file)
@@ -70,8 +70,8 @@ static int max_freq_mode;
  */
 static unsigned long max_frequency;
 
-static unsigned long long tsc_at_measure_start;
-static unsigned long long tsc_at_measure_end;
+static unsigned long long *tsc_at_measure_start;
+static unsigned long long *tsc_at_measure_end;
 static unsigned long long *mperf_previous_count;
 static unsigned long long *aperf_previous_count;
 static unsigned long long *mperf_current_count;
@@ -169,7 +169,7 @@ static int mperf_get_count_percent(unsigned int id, double *percent,
        aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
 
        if (max_freq_mode == MAX_FREQ_TSC_REF) {
-               tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
+               tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
                *percent = 100.0 * mperf_diff / tsc_diff;
                dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n",
                       mperf_cstates[id].name, mperf_diff, tsc_diff);
@@ -206,7 +206,7 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
 
        if (max_freq_mode == MAX_FREQ_TSC_REF) {
                /* Calculate max_freq from TSC count */
-               tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
+               tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
                time_diff = timespec_diff_us(time_start, time_end);
                max_frequency = tsc_diff / time_diff;
        }
@@ -225,33 +225,27 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
 static int mperf_start(void)
 {
        int cpu;
-       unsigned long long dbg;
 
        clock_gettime(CLOCK_REALTIME, &time_start);
-       mperf_get_tsc(&tsc_at_measure_start);
 
-       for (cpu = 0; cpu < cpu_count; cpu++)
+       for (cpu = 0; cpu < cpu_count; cpu++) {
+               mperf_get_tsc(&tsc_at_measure_start[cpu]);
                mperf_init_stats(cpu);
+       }
 
-       mperf_get_tsc(&dbg);
-       dprint("TSC diff: %llu\n", dbg - tsc_at_measure_start);
        return 0;
 }
 
 static int mperf_stop(void)
 {
-       unsigned long long dbg;
        int cpu;
 
-       for (cpu = 0; cpu < cpu_count; cpu++)
+       for (cpu = 0; cpu < cpu_count; cpu++) {
                mperf_measure_stats(cpu);
+               mperf_get_tsc(&tsc_at_measure_end[cpu]);
+       }
 
-       mperf_get_tsc(&tsc_at_measure_end);
        clock_gettime(CLOCK_REALTIME, &time_end);
-
-       mperf_get_tsc(&dbg);
-       dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end);
-
        return 0;
 }
 
@@ -353,7 +347,8 @@ struct cpuidle_monitor *mperf_register(void)
        aperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
        mperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
        aperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
-
+       tsc_at_measure_start = calloc(cpu_count, sizeof(unsigned long long));
+       tsc_at_measure_end = calloc(cpu_count, sizeof(unsigned long long));
        mperf_monitor.name_len = strlen(mperf_monitor.name);
        return &mperf_monitor;
 }
@@ -364,6 +359,8 @@ void mperf_unregister(void)
        free(aperf_previous_count);
        free(mperf_current_count);
        free(aperf_current_count);
+       free(tsc_at_measure_start);
+       free(tsc_at_measure_end);
        free(is_valid);
 }
 
index c49e540..28d2c77 100644 (file)
@@ -197,7 +197,7 @@ $(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_r
 
 $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c
        $(call msg,SIGN-FILE,,$@)
-       $(Q)$(CC) $(shell $(HOSTPKG_CONFIG)--cflags libcrypto 2> /dev/null) \
+       $(Q)$(CC) $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) \
                  $< -o $@ \
                  $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto)
 
index 0ce25a9..064cc5e 100644 (file)
@@ -2,6 +2,7 @@
 // Copyright (c) 2020 Cloudflare
 #include <error.h>
 #include <netinet/tcp.h>
+#include <sys/epoll.h>
 
 #include "test_progs.h"
 #include "test_skmsg_load_helpers.skel.h"
@@ -9,8 +10,12 @@
 #include "test_sockmap_invalid_update.skel.h"
 #include "test_sockmap_skb_verdict_attach.skel.h"
 #include "test_sockmap_progs_query.skel.h"
+#include "test_sockmap_pass_prog.skel.h"
+#include "test_sockmap_drop_prog.skel.h"
 #include "bpf_iter_sockmap.skel.h"
 
+#include "sockmap_helpers.h"
+
 #define TCP_REPAIR             19      /* TCP sock is under repair right now */
 
 #define TCP_REPAIR_ON          1
@@ -350,6 +355,126 @@ out:
        test_sockmap_progs_query__destroy(skel);
 }
 
+#define MAX_EVENTS 10
+static void test_sockmap_skb_verdict_shutdown(void)
+{
+       struct epoll_event ev, events[MAX_EVENTS];
+       int n, err, map, verdict, s, c1, p1;
+       struct test_sockmap_pass_prog *skel;
+       int epollfd;
+       int zero = 0;
+       char b;
+
+       skel = test_sockmap_pass_prog__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "open_and_load"))
+               return;
+
+       verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+       map = bpf_map__fd(skel->maps.sock_map_rx);
+
+       err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+       if (!ASSERT_OK(err, "bpf_prog_attach"))
+               goto out;
+
+       s = socket_loopback(AF_INET, SOCK_STREAM);
+       if (s < 0)
+               goto out;
+       err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1);
+       if (err < 0)
+               goto out;
+
+       err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
+       if (err < 0)
+               goto out_close;
+
+       shutdown(p1, SHUT_WR);
+
+       ev.events = EPOLLIN;
+       ev.data.fd = c1;
+
+       epollfd = epoll_create1(0);
+       if (!ASSERT_GT(epollfd, -1, "epoll_create(0)"))
+               goto out_close;
+       err = epoll_ctl(epollfd, EPOLL_CTL_ADD, c1, &ev);
+       if (!ASSERT_OK(err, "epoll_ctl(EPOLL_CTL_ADD)"))
+               goto out_close;
+       err = epoll_wait(epollfd, events, MAX_EVENTS, -1);
+       if (!ASSERT_EQ(err, 1, "epoll_wait(fd)"))
+               goto out_close;
+
+       n = recv(c1, &b, 1, SOCK_NONBLOCK);
+       ASSERT_EQ(n, 0, "recv_timeout(fin)");
+out_close:
+       close(c1);
+       close(p1);
+out:
+       test_sockmap_pass_prog__destroy(skel);
+}
+
+static void test_sockmap_skb_verdict_fionread(bool pass_prog)
+{
+       int expected, zero = 0, sent, recvd, avail;
+       int err, map, verdict, s, c0, c1, p0, p1;
+       struct test_sockmap_pass_prog *pass;
+       struct test_sockmap_drop_prog *drop;
+       char buf[256] = "0123456789";
+
+       if (pass_prog) {
+               pass = test_sockmap_pass_prog__open_and_load();
+               if (!ASSERT_OK_PTR(pass, "open_and_load"))
+                       return;
+               verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
+               map = bpf_map__fd(pass->maps.sock_map_rx);
+               expected = sizeof(buf);
+       } else {
+               drop = test_sockmap_drop_prog__open_and_load();
+               if (!ASSERT_OK_PTR(drop, "open_and_load"))
+                       return;
+               verdict = bpf_program__fd(drop->progs.prog_skb_verdict);
+               map = bpf_map__fd(drop->maps.sock_map_rx);
+               /* On drop data is consumed immediately and copied_seq inc'd */
+               expected = 0;
+       }
+
+
+       err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+       if (!ASSERT_OK(err, "bpf_prog_attach"))
+               goto out;
+
+       s = socket_loopback(AF_INET, SOCK_STREAM);
+       if (!ASSERT_GT(s, -1, "socket_loopback(s)"))
+               goto out;
+       err = create_socket_pairs(s, AF_INET, SOCK_STREAM, &c0, &c1, &p0, &p1);
+       if (!ASSERT_OK(err, "create_socket_pairs(s)"))
+               goto out;
+
+       err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
+       if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
+               goto out_close;
+
+       sent = xsend(p1, &buf, sizeof(buf), 0);
+       ASSERT_EQ(sent, sizeof(buf), "xsend(p0)");
+       err = ioctl(c1, FIONREAD, &avail);
+       ASSERT_OK(err, "ioctl(FIONREAD) error");
+       ASSERT_EQ(avail, expected, "ioctl(FIONREAD)");
+       /* On DROP test there will be no data to read */
+       if (pass_prog) {
+               recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC);
+               ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)");
+       }
+
+out_close:
+       close(c0);
+       close(p0);
+       close(c1);
+       close(p1);
+out:
+       if (pass_prog)
+               test_sockmap_pass_prog__destroy(pass);
+       else
+               test_sockmap_drop_prog__destroy(drop);
+}
+
 void test_sockmap_basic(void)
 {
        if (test__start_subtest("sockmap create_update_free"))
@@ -384,4 +509,10 @@ void test_sockmap_basic(void)
                test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT);
        if (test__start_subtest("sockmap skb_verdict progs query"))
                test_sockmap_progs_query(BPF_SK_SKB_VERDICT);
+       if (test__start_subtest("sockmap skb_verdict shutdown"))
+               test_sockmap_skb_verdict_shutdown();
+       if (test__start_subtest("sockmap skb_verdict fionread"))
+               test_sockmap_skb_verdict_fionread(true);
+       if (test__start_subtest("sockmap skb_verdict fionread on drop"))
+               test_sockmap_skb_verdict_fionread(false);
 }
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
new file mode 100644 (file)
index 0000000..d126654
--- /dev/null
@@ -0,0 +1,390 @@
+#ifndef __SOCKMAP_HELPERS__
+#define __SOCKMAP_HELPERS__
+
+#include <linux/vm_sockets.h>
+
+#define IO_TIMEOUT_SEC 30
+#define MAX_STRERR_LEN 256
+#define MAX_TEST_NAME 80
+
+/* workaround for older vm_sockets.h */
+#ifndef VMADDR_CID_LOCAL
+#define VMADDR_CID_LOCAL 1
+#endif
+
+#define __always_unused        __attribute__((__unused__))
+
+#define _FAIL(errnum, fmt...)                                                  \
+       ({                                                                     \
+               error_at_line(0, (errnum), __func__, __LINE__, fmt);           \
+               CHECK_FAIL(true);                                              \
+       })
+#define FAIL(fmt...) _FAIL(0, fmt)
+#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt)
+#define FAIL_LIBBPF(err, msg)                                                  \
+       ({                                                                     \
+               char __buf[MAX_STRERR_LEN];                                    \
+               libbpf_strerror((err), __buf, sizeof(__buf));                  \
+               FAIL("%s: %s", (msg), __buf);                                  \
+       })
+
+/* Wrappers that fail the test on error and report it. */
+
+#define xaccept_nonblock(fd, addr, len)                                        \
+       ({                                                                     \
+               int __ret =                                                    \
+                       accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC);   \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("accept");                                  \
+               __ret;                                                         \
+       })
+
+#define xbind(fd, addr, len)                                                   \
+       ({                                                                     \
+               int __ret = bind((fd), (addr), (len));                         \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("bind");                                    \
+               __ret;                                                         \
+       })
+
+#define xclose(fd)                                                             \
+       ({                                                                     \
+               int __ret = close((fd));                                       \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("close");                                   \
+               __ret;                                                         \
+       })
+
+#define xconnect(fd, addr, len)                                                \
+       ({                                                                     \
+               int __ret = connect((fd), (addr), (len));                      \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("connect");                                 \
+               __ret;                                                         \
+       })
+
+#define xgetsockname(fd, addr, len)                                            \
+       ({                                                                     \
+               int __ret = getsockname((fd), (addr), (len));                  \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("getsockname");                             \
+               __ret;                                                         \
+       })
+
+#define xgetsockopt(fd, level, name, val, len)                                 \
+       ({                                                                     \
+               int __ret = getsockopt((fd), (level), (name), (val), (len));   \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("getsockopt(" #name ")");                   \
+               __ret;                                                         \
+       })
+
+#define xlisten(fd, backlog)                                                   \
+       ({                                                                     \
+               int __ret = listen((fd), (backlog));                           \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("listen");                                  \
+               __ret;                                                         \
+       })
+
+#define xsetsockopt(fd, level, name, val, len)                                 \
+       ({                                                                     \
+               int __ret = setsockopt((fd), (level), (name), (val), (len));   \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("setsockopt(" #name ")");                   \
+               __ret;                                                         \
+       })
+
+#define xsend(fd, buf, len, flags)                                             \
+       ({                                                                     \
+               ssize_t __ret = send((fd), (buf), (len), (flags));             \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("send");                                    \
+               __ret;                                                         \
+       })
+
+#define xrecv_nonblock(fd, buf, len, flags)                                    \
+       ({                                                                     \
+               ssize_t __ret = recv_timeout((fd), (buf), (len), (flags),      \
+                                            IO_TIMEOUT_SEC);                  \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("recv");                                    \
+               __ret;                                                         \
+       })
+
+#define xsocket(family, sotype, flags)                                         \
+       ({                                                                     \
+               int __ret = socket(family, sotype, flags);                     \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("socket");                                  \
+               __ret;                                                         \
+       })
+
+#define xbpf_map_delete_elem(fd, key)                                          \
+       ({                                                                     \
+               int __ret = bpf_map_delete_elem((fd), (key));                  \
+               if (__ret < 0)                                               \
+                       FAIL_ERRNO("map_delete");                              \
+               __ret;                                                         \
+       })
+
+#define xbpf_map_lookup_elem(fd, key, val)                                     \
+       ({                                                                     \
+               int __ret = bpf_map_lookup_elem((fd), (key), (val));           \
+               if (__ret < 0)                                               \
+                       FAIL_ERRNO("map_lookup");                              \
+               __ret;                                                         \
+       })
+
+#define xbpf_map_update_elem(fd, key, val, flags)                              \
+       ({                                                                     \
+               int __ret = bpf_map_update_elem((fd), (key), (val), (flags));  \
+               if (__ret < 0)                                               \
+                       FAIL_ERRNO("map_update");                              \
+               __ret;                                                         \
+       })
+
+#define xbpf_prog_attach(prog, target, type, flags)                            \
+       ({                                                                     \
+               int __ret =                                                    \
+                       bpf_prog_attach((prog), (target), (type), (flags));    \
+               if (__ret < 0)                                               \
+                       FAIL_ERRNO("prog_attach(" #type ")");                  \
+               __ret;                                                         \
+       })
+
+#define xbpf_prog_detach2(prog, target, type)                                  \
+       ({                                                                     \
+               int __ret = bpf_prog_detach2((prog), (target), (type));        \
+               if (__ret < 0)                                               \
+                       FAIL_ERRNO("prog_detach2(" #type ")");                 \
+               __ret;                                                         \
+       })
+
+#define xpthread_create(thread, attr, func, arg)                               \
+       ({                                                                     \
+               int __ret = pthread_create((thread), (attr), (func), (arg));   \
+               errno = __ret;                                                 \
+               if (__ret)                                                     \
+                       FAIL_ERRNO("pthread_create");                          \
+               __ret;                                                         \
+       })
+
+#define xpthread_join(thread, retval)                                          \
+       ({                                                                     \
+               int __ret = pthread_join((thread), (retval));                  \
+               errno = __ret;                                                 \
+               if (__ret)                                                     \
+                       FAIL_ERRNO("pthread_join");                            \
+               __ret;                                                         \
+       })
+
+static inline int poll_read(int fd, unsigned int timeout_sec)
+{
+       struct timeval timeout = { .tv_sec = timeout_sec };
+       fd_set rfds;
+       int r;
+
+       FD_ZERO(&rfds);
+       FD_SET(fd, &rfds);
+
+       r = select(fd + 1, &rfds, NULL, NULL, &timeout);
+       if (r == 0)
+               errno = ETIME;
+
+       return r == 1 ? 0 : -1;
+}
+
+static inline int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len,
+                                unsigned int timeout_sec)
+{
+       if (poll_read(fd, timeout_sec))
+               return -1;
+
+       return accept(fd, addr, len);
+}
+
+static inline int recv_timeout(int fd, void *buf, size_t len, int flags,
+                              unsigned int timeout_sec)
+{
+       if (poll_read(fd, timeout_sec))
+               return -1;
+
+       return recv(fd, buf, len, flags);
+}
+
+static inline void init_addr_loopback4(struct sockaddr_storage *ss,
+                                      socklen_t *len)
+{
+       struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss));
+
+       addr4->sin_family = AF_INET;
+       addr4->sin_port = 0;
+       addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+       *len = sizeof(*addr4);
+}
+
+static inline void init_addr_loopback6(struct sockaddr_storage *ss,
+                                      socklen_t *len)
+{
+       struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss));
+
+       addr6->sin6_family = AF_INET6;
+       addr6->sin6_port = 0;
+       addr6->sin6_addr = in6addr_loopback;
+       *len = sizeof(*addr6);
+}
+
+static inline void init_addr_loopback_vsock(struct sockaddr_storage *ss,
+                                           socklen_t *len)
+{
+       struct sockaddr_vm *addr = memset(ss, 0, sizeof(*ss));
+
+       addr->svm_family = AF_VSOCK;
+       addr->svm_port = VMADDR_PORT_ANY;
+       addr->svm_cid = VMADDR_CID_LOCAL;
+       *len = sizeof(*addr);
+}
+
+static inline void init_addr_loopback(int family, struct sockaddr_storage *ss,
+                                     socklen_t *len)
+{
+       switch (family) {
+       case AF_INET:
+               init_addr_loopback4(ss, len);
+               return;
+       case AF_INET6:
+               init_addr_loopback6(ss, len);
+               return;
+       case AF_VSOCK:
+               init_addr_loopback_vsock(ss, len);
+               return;
+       default:
+               FAIL("unsupported address family %d", family);
+       }
+}
+
+static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss)
+{
+       return (struct sockaddr *)ss;
+}
+
+static inline int add_to_sockmap(int sock_mapfd, int fd1, int fd2)
+{
+       u64 value;
+       u32 key;
+       int err;
+
+       key = 0;
+       value = fd1;
+       err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+       if (err)
+               return err;
+
+       key = 1;
+       value = fd2;
+       return xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+}
+
+static inline int create_pair(int s, int family, int sotype, int *c, int *p)
+{
+       struct sockaddr_storage addr;
+       socklen_t len;
+       int err = 0;
+
+       len = sizeof(addr);
+       err = xgetsockname(s, sockaddr(&addr), &len);
+       if (err)
+               return err;
+
+       *c = xsocket(family, sotype, 0);
+       if (*c < 0)
+               return errno;
+       err = xconnect(*c, sockaddr(&addr), len);
+       if (err) {
+               err = errno;
+               goto close_cli0;
+       }
+
+       *p = xaccept_nonblock(s, NULL, NULL);
+       if (*p < 0) {
+               err = errno;
+               goto close_cli0;
+       }
+       return err;
+close_cli0:
+       close(*c);
+       return err;
+}
+
+static inline int create_socket_pairs(int s, int family, int sotype,
+                                     int *c0, int *c1, int *p0, int *p1)
+{
+       int err;
+
+       err = create_pair(s, family, sotype, c0, p0);
+       if (err)
+               return err;
+
+       err = create_pair(s, family, sotype, c1, p1);
+       if (err) {
+               close(*c0);
+               close(*p0);
+       }
+       return err;
+}
+
+static inline int enable_reuseport(int s, int progfd)
+{
+       int err, one = 1;
+
+       err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
+       if (err)
+               return -1;
+       err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd,
+                         sizeof(progfd));
+       if (err)
+               return -1;
+
+       return 0;
+}
+
+static inline int socket_loopback_reuseport(int family, int sotype, int progfd)
+{
+       struct sockaddr_storage addr;
+       socklen_t len;
+       int err, s;
+
+       init_addr_loopback(family, &addr, &len);
+
+       s = xsocket(family, sotype, 0);
+       if (s == -1)
+               return -1;
+
+       if (progfd >= 0)
+               enable_reuseport(s, progfd);
+
+       err = xbind(s, sockaddr(&addr), len);
+       if (err)
+               goto close;
+
+       if (sotype & SOCK_DGRAM)
+               return s;
+
+       err = xlisten(s, SOMAXCONN);
+       if (err)
+               goto close;
+
+       return s;
+close:
+       xclose(s);
+       return -1;
+}
+
+static inline int socket_loopback(int family, int sotype)
+{
+       return socket_loopback_reuseport(family, sotype, -1);
+}
+
+
+#endif // __SOCKMAP_HELPERS__
index 141c1e5..b4f6f3a 100644 (file)
 #include <unistd.h>
 #include <linux/vm_sockets.h>
 
-/* workaround for older vm_sockets.h */
-#ifndef VMADDR_CID_LOCAL
-#define VMADDR_CID_LOCAL 1
-#endif
-
 #include <bpf/bpf.h>
 #include <bpf/libbpf.h>
 
 #include "test_progs.h"
 #include "test_sockmap_listen.skel.h"
 
-#define IO_TIMEOUT_SEC 30
-#define MAX_STRERR_LEN 256
-#define MAX_TEST_NAME 80
-
-#define __always_unused        __attribute__((__unused__))
-
-#define _FAIL(errnum, fmt...)                                                  \
-       ({                                                                     \
-               error_at_line(0, (errnum), __func__, __LINE__, fmt);           \
-               CHECK_FAIL(true);                                              \
-       })
-#define FAIL(fmt...) _FAIL(0, fmt)
-#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt)
-#define FAIL_LIBBPF(err, msg)                                                  \
-       ({                                                                     \
-               char __buf[MAX_STRERR_LEN];                                    \
-               libbpf_strerror((err), __buf, sizeof(__buf));                  \
-               FAIL("%s: %s", (msg), __buf);                                  \
-       })
-
-/* Wrappers that fail the test on error and report it. */
-
-#define xaccept_nonblock(fd, addr, len)                                        \
-       ({                                                                     \
-               int __ret =                                                    \
-                       accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC);   \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("accept");                                  \
-               __ret;                                                         \
-       })
-
-#define xbind(fd, addr, len)                                                   \
-       ({                                                                     \
-               int __ret = bind((fd), (addr), (len));                         \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("bind");                                    \
-               __ret;                                                         \
-       })
-
-#define xclose(fd)                                                             \
-       ({                                                                     \
-               int __ret = close((fd));                                       \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("close");                                   \
-               __ret;                                                         \
-       })
-
-#define xconnect(fd, addr, len)                                                \
-       ({                                                                     \
-               int __ret = connect((fd), (addr), (len));                      \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("connect");                                 \
-               __ret;                                                         \
-       })
-
-#define xgetsockname(fd, addr, len)                                            \
-       ({                                                                     \
-               int __ret = getsockname((fd), (addr), (len));                  \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("getsockname");                             \
-               __ret;                                                         \
-       })
-
-#define xgetsockopt(fd, level, name, val, len)                                 \
-       ({                                                                     \
-               int __ret = getsockopt((fd), (level), (name), (val), (len));   \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("getsockopt(" #name ")");                   \
-               __ret;                                                         \
-       })
-
-#define xlisten(fd, backlog)                                                   \
-       ({                                                                     \
-               int __ret = listen((fd), (backlog));                           \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("listen");                                  \
-               __ret;                                                         \
-       })
-
-#define xsetsockopt(fd, level, name, val, len)                                 \
-       ({                                                                     \
-               int __ret = setsockopt((fd), (level), (name), (val), (len));   \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("setsockopt(" #name ")");                   \
-               __ret;                                                         \
-       })
-
-#define xsend(fd, buf, len, flags)                                             \
-       ({                                                                     \
-               ssize_t __ret = send((fd), (buf), (len), (flags));             \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("send");                                    \
-               __ret;                                                         \
-       })
-
-#define xrecv_nonblock(fd, buf, len, flags)                                    \
-       ({                                                                     \
-               ssize_t __ret = recv_timeout((fd), (buf), (len), (flags),      \
-                                            IO_TIMEOUT_SEC);                  \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("recv");                                    \
-               __ret;                                                         \
-       })
-
-#define xsocket(family, sotype, flags)                                         \
-       ({                                                                     \
-               int __ret = socket(family, sotype, flags);                     \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("socket");                                  \
-               __ret;                                                         \
-       })
-
-#define xbpf_map_delete_elem(fd, key)                                          \
-       ({                                                                     \
-               int __ret = bpf_map_delete_elem((fd), (key));                  \
-               if (__ret < 0)                                               \
-                       FAIL_ERRNO("map_delete");                              \
-               __ret;                                                         \
-       })
-
-#define xbpf_map_lookup_elem(fd, key, val)                                     \
-       ({                                                                     \
-               int __ret = bpf_map_lookup_elem((fd), (key), (val));           \
-               if (__ret < 0)                                               \
-                       FAIL_ERRNO("map_lookup");                              \
-               __ret;                                                         \
-       })
-
-#define xbpf_map_update_elem(fd, key, val, flags)                              \
-       ({                                                                     \
-               int __ret = bpf_map_update_elem((fd), (key), (val), (flags));  \
-               if (__ret < 0)                                               \
-                       FAIL_ERRNO("map_update");                              \
-               __ret;                                                         \
-       })
-
-#define xbpf_prog_attach(prog, target, type, flags)                            \
-       ({                                                                     \
-               int __ret =                                                    \
-                       bpf_prog_attach((prog), (target), (type), (flags));    \
-               if (__ret < 0)                                               \
-                       FAIL_ERRNO("prog_attach(" #type ")");                  \
-               __ret;                                                         \
-       })
-
-#define xbpf_prog_detach2(prog, target, type)                                  \
-       ({                                                                     \
-               int __ret = bpf_prog_detach2((prog), (target), (type));        \
-               if (__ret < 0)                                               \
-                       FAIL_ERRNO("prog_detach2(" #type ")");                 \
-               __ret;                                                         \
-       })
-
-#define xpthread_create(thread, attr, func, arg)                               \
-       ({                                                                     \
-               int __ret = pthread_create((thread), (attr), (func), (arg));   \
-               errno = __ret;                                                 \
-               if (__ret)                                                     \
-                       FAIL_ERRNO("pthread_create");                          \
-               __ret;                                                         \
-       })
-
-#define xpthread_join(thread, retval)                                          \
-       ({                                                                     \
-               int __ret = pthread_join((thread), (retval));                  \
-               errno = __ret;                                                 \
-               if (__ret)                                                     \
-                       FAIL_ERRNO("pthread_join");                            \
-               __ret;                                                         \
-       })
-
-static int poll_read(int fd, unsigned int timeout_sec)
-{
-       struct timeval timeout = { .tv_sec = timeout_sec };
-       fd_set rfds;
-       int r;
-
-       FD_ZERO(&rfds);
-       FD_SET(fd, &rfds);
-
-       r = select(fd + 1, &rfds, NULL, NULL, &timeout);
-       if (r == 0)
-               errno = ETIME;
-
-       return r == 1 ? 0 : -1;
-}
-
-static int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len,
-                         unsigned int timeout_sec)
-{
-       if (poll_read(fd, timeout_sec))
-               return -1;
-
-       return accept(fd, addr, len);
-}
-
-static int recv_timeout(int fd, void *buf, size_t len, int flags,
-                       unsigned int timeout_sec)
-{
-       if (poll_read(fd, timeout_sec))
-               return -1;
-
-       return recv(fd, buf, len, flags);
-}
-
-static void init_addr_loopback4(struct sockaddr_storage *ss, socklen_t *len)
-{
-       struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss));
-
-       addr4->sin_family = AF_INET;
-       addr4->sin_port = 0;
-       addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
-       *len = sizeof(*addr4);
-}
-
-static void init_addr_loopback6(struct sockaddr_storage *ss, socklen_t *len)
-{
-       struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss));
-
-       addr6->sin6_family = AF_INET6;
-       addr6->sin6_port = 0;
-       addr6->sin6_addr = in6addr_loopback;
-       *len = sizeof(*addr6);
-}
-
-static void init_addr_loopback_vsock(struct sockaddr_storage *ss, socklen_t *len)
-{
-       struct sockaddr_vm *addr = memset(ss, 0, sizeof(*ss));
-
-       addr->svm_family = AF_VSOCK;
-       addr->svm_port = VMADDR_PORT_ANY;
-       addr->svm_cid = VMADDR_CID_LOCAL;
-       *len = sizeof(*addr);
-}
-
-static void init_addr_loopback(int family, struct sockaddr_storage *ss,
-                              socklen_t *len)
-{
-       switch (family) {
-       case AF_INET:
-               init_addr_loopback4(ss, len);
-               return;
-       case AF_INET6:
-               init_addr_loopback6(ss, len);
-               return;
-       case AF_VSOCK:
-               init_addr_loopback_vsock(ss, len);
-               return;
-       default:
-               FAIL("unsupported address family %d", family);
-       }
-}
-
-static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss)
-{
-       return (struct sockaddr *)ss;
-}
-
-static int enable_reuseport(int s, int progfd)
-{
-       int err, one = 1;
-
-       err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
-       if (err)
-               return -1;
-       err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd,
-                         sizeof(progfd));
-       if (err)
-               return -1;
-
-       return 0;
-}
-
-static int socket_loopback_reuseport(int family, int sotype, int progfd)
-{
-       struct sockaddr_storage addr;
-       socklen_t len;
-       int err, s;
-
-       init_addr_loopback(family, &addr, &len);
-
-       s = xsocket(family, sotype, 0);
-       if (s == -1)
-               return -1;
-
-       if (progfd >= 0)
-               enable_reuseport(s, progfd);
-
-       err = xbind(s, sockaddr(&addr), len);
-       if (err)
-               goto close;
-
-       if (sotype & SOCK_DGRAM)
-               return s;
-
-       err = xlisten(s, SOMAXCONN);
-       if (err)
-               goto close;
-
-       return s;
-close:
-       xclose(s);
-       return -1;
-}
-
-static int socket_loopback(int family, int sotype)
-{
-       return socket_loopback_reuseport(family, sotype, -1);
-}
+#include "sockmap_helpers.h"
 
 static void test_insert_invalid(struct test_sockmap_listen *skel __always_unused,
                                int family, int sotype, int mapfd)
@@ -984,31 +671,12 @@ static const char *redir_mode_str(enum redir_mode mode)
        }
 }
 
-static int add_to_sockmap(int sock_mapfd, int fd1, int fd2)
-{
-       u64 value;
-       u32 key;
-       int err;
-
-       key = 0;
-       value = fd1;
-       err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
-       if (err)
-               return err;
-
-       key = 1;
-       value = fd2;
-       return xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
-}
-
 static void redir_to_connected(int family, int sotype, int sock_mapfd,
                               int verd_mapfd, enum redir_mode mode)
 {
        const char *log_prefix = redir_mode_str(mode);
-       struct sockaddr_storage addr;
        int s, c0, c1, p0, p1;
        unsigned int pass;
-       socklen_t len;
        int err, n;
        u32 key;
        char b;
@@ -1019,36 +687,13 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,
        if (s < 0)
                return;
 
-       len = sizeof(addr);
-       err = xgetsockname(s, sockaddr(&addr), &len);
+       err = create_socket_pairs(s, family, sotype, &c0, &c1, &p0, &p1);
        if (err)
                goto close_srv;
 
-       c0 = xsocket(family, sotype, 0);
-       if (c0 < 0)
-               goto close_srv;
-       err = xconnect(c0, sockaddr(&addr), len);
-       if (err)
-               goto close_cli0;
-
-       p0 = xaccept_nonblock(s, NULL, NULL);
-       if (p0 < 0)
-               goto close_cli0;
-
-       c1 = xsocket(family, sotype, 0);
-       if (c1 < 0)
-               goto close_peer0;
-       err = xconnect(c1, sockaddr(&addr), len);
-       if (err)
-               goto close_cli1;
-
-       p1 = xaccept_nonblock(s, NULL, NULL);
-       if (p1 < 0)
-               goto close_cli1;
-
        err = add_to_sockmap(sock_mapfd, p0, p1);
        if (err)
-               goto close_peer1;
+               goto close;
 
        n = write(mode == REDIR_INGRESS ? c1 : p1, "a", 1);
        if (n < 0)
@@ -1056,12 +701,12 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,
        if (n == 0)
                FAIL("%s: incomplete write", log_prefix);
        if (n < 1)
-               goto close_peer1;
+               goto close;
 
        key = SK_PASS;
        err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
        if (err)
-               goto close_peer1;
+               goto close;
        if (pass != 1)
                FAIL("%s: want pass count 1, have %d", log_prefix, pass);
        n = recv_timeout(c0, &b, 1, 0, IO_TIMEOUT_SEC);
@@ -1070,13 +715,10 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,
        if (n == 0)
                FAIL("%s: incomplete recv", log_prefix);
 
-close_peer1:
+close:
        xclose(p1);
-close_cli1:
        xclose(c1);
-close_peer0:
        xclose(p0);
-close_cli0:
        xclose(c0);
 close_srv:
        xclose(s);
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c b/tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c
new file mode 100644 (file)
index 0000000..2931480
--- /dev/null
@@ -0,0 +1,32 @@
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, 20);
+       __type(key, int);
+       __type(value, int);
+} sock_map_rx SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, 20);
+       __type(key, int);
+       __type(value, int);
+} sock_map_tx SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, 20);
+       __type(key, int);
+       __type(value, int);
+} sock_map_msg SEC(".maps");
+
+SEC("sk_skb")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+       return SK_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
index baf9ebc..99d2ea9 100644 (file)
@@ -191,7 +191,7 @@ SEC("sockops")
 int bpf_sockmap(struct bpf_sock_ops *skops)
 {
        __u32 lport, rport;
-       int op, err, ret;
+       int op, ret;
 
        op = (int) skops->op;
 
@@ -203,10 +203,10 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
                if (lport == 10000) {
                        ret = 1;
 #ifdef SOCKMAP
-                       err = bpf_sock_map_update(skops, &sock_map, &ret,
+                       bpf_sock_map_update(skops, &sock_map, &ret,
                                                  BPF_NOEXIST);
 #else
-                       err = bpf_sock_hash_update(skops, &sock_map, &ret,
+                       bpf_sock_hash_update(skops, &sock_map, &ret,
                                                   BPF_NOEXIST);
 #endif
                }
@@ -218,10 +218,10 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
                if (bpf_ntohl(rport) == 10001) {
                        ret = 10;
 #ifdef SOCKMAP
-                       err = bpf_sock_map_update(skops, &sock_map, &ret,
+                       bpf_sock_map_update(skops, &sock_map, &ret,
                                                  BPF_NOEXIST);
 #else
-                       err = bpf_sock_hash_update(skops, &sock_map, &ret,
+                       bpf_sock_hash_update(skops, &sock_map, &ret,
                                                   BPF_NOEXIST);
 #endif
                }
@@ -230,8 +230,6 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
                break;
        }
 
-       __sink(err);
-
        return 0;
 }
 
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c b/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c
new file mode 100644 (file)
index 0000000..1d86a71
--- /dev/null
@@ -0,0 +1,32 @@
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, 20);
+       __type(key, int);
+       __type(value, int);
+} sock_map_rx SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, 20);
+       __type(key, int);
+       __type(value, int);
+} sock_map_tx SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, 20);
+       __type(key, int);
+       __type(value, int);
+} sock_map_msg SEC(".maps");
+
+SEC("sk_skb")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+       return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
index d6e106f..a1e955d 100644 (file)
@@ -1,7 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 all:
 
-TEST_PROGS := ftracetest
+TEST_PROGS_EXTENDED := ftracetest
+TEST_PROGS := ftracetest-ktap
 TEST_FILES := test.d settings
 EXTRA_CLEAN := $(OUTPUT)/logs/*
 
index c3311c8..2506621 100755 (executable)
@@ -13,6 +13,7 @@ echo "Usage: ftracetest [options] [testcase(s)] [testcase-directory(s)]"
 echo " Options:"
 echo "         -h|--help  Show help message"
 echo "         -k|--keep  Keep passed test logs"
+echo "         -K|--ktap  Output in KTAP format"
 echo "         -v|--verbose Increase verbosity of test messages"
 echo "         -vv        Alias of -v -v (Show all results in stdout)"
 echo "         -vvv       Alias of -v -v -v (Show all commands immediately)"
@@ -85,6 +86,10 @@ parse_opts() { # opts
       KEEP_LOG=1
       shift 1
     ;;
+    --ktap|-K)
+      KTAP=1
+      shift 1
+    ;;
     --verbose|-v|-vv|-vvv)
       if [ $VERBOSE -eq -1 ]; then
        usage "--console can not use with --verbose"
@@ -178,6 +183,7 @@ TEST_DIR=$TOP_DIR/test.d
 TEST_CASES=`find_testcases $TEST_DIR`
 LOG_DIR=$TOP_DIR/logs/`date +%Y%m%d-%H%M%S`/
 KEEP_LOG=0
+KTAP=0
 DEBUG=0
 VERBOSE=0
 UNSUPPORTED_RESULT=0
@@ -229,7 +235,7 @@ prlog() { # messages
     newline=
     shift
   fi
-  printf "$*$newline"
+  [ "$KTAP" != "1" ] && printf "$*$newline"
   [ "$LOG_FILE" ] && printf "$*$newline" | strip_esc >> $LOG_FILE
 }
 catlog() { #file
@@ -260,11 +266,11 @@ TOTAL_RESULT=0
 
 INSTANCE=
 CASENO=0
+CASENAME=
 
 testcase() { # testfile
   CASENO=$((CASENO+1))
-  desc=`grep "^#[ \t]*description:" $1 | cut -f2- -d:`
-  prlog -n "[$CASENO]$INSTANCE$desc"
+  CASENAME=`grep "^#[ \t]*description:" $1 | cut -f2- -d:`
 }
 
 checkreq() { # testfile
@@ -277,40 +283,68 @@ test_on_instance() { # testfile
   grep -q "^#[ \t]*flags:.*instance" $1
 }
 
+ktaptest() { # result comment
+  if [ "$KTAP" != "1" ]; then
+    return
+  fi
+
+  local result=
+  if [ "$1" = "1" ]; then
+    result="ok"
+  else
+    result="not ok"
+  fi
+  shift
+
+  local comment=$*
+  if [ "$comment" != "" ]; then
+    comment="# $comment"
+  fi
+
+  echo $CASENO $result $INSTANCE$CASENAME $comment
+}
+
 eval_result() { # sigval
   case $1 in
     $PASS)
       prlog "  [${color_green}PASS${color_reset}]"
+      ktaptest 1
       PASSED_CASES="$PASSED_CASES $CASENO"
       return 0
     ;;
     $FAIL)
       prlog "  [${color_red}FAIL${color_reset}]"
+      ktaptest 0
       FAILED_CASES="$FAILED_CASES $CASENO"
       return 1 # this is a bug.
     ;;
     $UNRESOLVED)
       prlog "  [${color_blue}UNRESOLVED${color_reset}]"
+      ktaptest 0 UNRESOLVED
       UNRESOLVED_CASES="$UNRESOLVED_CASES $CASENO"
       return $UNRESOLVED_RESULT # depends on use case
     ;;
     $UNTESTED)
       prlog "  [${color_blue}UNTESTED${color_reset}]"
+      ktaptest 1 SKIP
       UNTESTED_CASES="$UNTESTED_CASES $CASENO"
       return 0
     ;;
     $UNSUPPORTED)
       prlog "  [${color_blue}UNSUPPORTED${color_reset}]"
+      ktaptest 1 SKIP
       UNSUPPORTED_CASES="$UNSUPPORTED_CASES $CASENO"
       return $UNSUPPORTED_RESULT # depends on use case
     ;;
     $XFAIL)
       prlog "  [${color_green}XFAIL${color_reset}]"
+      ktaptest 1 XFAIL
       XFAILED_CASES="$XFAILED_CASES $CASENO"
       return 0
     ;;
     *)
       prlog "  [${color_blue}UNDEFINED${color_reset}]"
+      ktaptest 0 error
       UNDEFINED_CASES="$UNDEFINED_CASES $CASENO"
       return 1 # this must be a test bug
     ;;
@@ -371,6 +405,7 @@ __run_test() { # testfile
 run_test() { # testfile
   local testname=`basename $1`
   testcase $1
+  prlog -n "[$CASENO]$INSTANCE$CASENAME"
   if [ ! -z "$LOG_FILE" ] ; then
     local testlog=`mktemp $LOG_DIR/${CASENO}-${testname}-log.XXXXXX`
   else
@@ -405,6 +440,17 @@ run_test() { # testfile
 # load in the helper functions
 . $TEST_DIR/functions
 
+if [ "$KTAP" = "1" ]; then
+  echo "TAP version 13"
+
+  casecount=`echo $TEST_CASES | wc -w`
+  for t in $TEST_CASES; do
+    test_on_instance $t || continue
+    casecount=$((casecount+1))
+  done
+  echo "1..${casecount}"
+fi
+
 # Main loop
 for t in $TEST_CASES; do
   run_test $t
@@ -439,6 +485,17 @@ prlog "# of unsupported: " `echo $UNSUPPORTED_CASES | wc -w`
 prlog "# of xfailed: " `echo $XFAILED_CASES | wc -w`
 prlog "# of undefined(test bug): " `echo $UNDEFINED_CASES | wc -w`
 
+if [ "$KTAP" = "1" ]; then
+  echo -n "# Totals:"
+  echo -n " pass:"`echo $PASSED_CASES | wc -w`
+  echo -n " faii:"`echo $FAILED_CASES | wc -w`
+  echo -n " xfail:"`echo $XFAILED_CASES | wc -w`
+  echo -n " xpass:0"
+  echo -n " skip:"`echo $UNTESTED_CASES $UNSUPPORTED_CASES | wc -w`
+  echo -n " error:"`echo $UNRESOLVED_CASES $UNDEFINED_CASES | wc -w`
+  echo
+fi
+
 cleanup
 
 # if no error, return 0
diff --git a/tools/testing/selftests/ftrace/ftracetest-ktap b/tools/testing/selftests/ftrace/ftracetest-ktap
new file mode 100755 (executable)
index 0000000..b328467
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh -e
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# ftracetest-ktap: Wrapper to integrate ftracetest with the kselftest runner
+#
+# Copyright (C) Arm Ltd., 2023
+
+./ftracetest -K
index a47b26a..0f5e88c 100755 (executable)
@@ -2283,7 +2283,7 @@ EOF
 ################################################################################
 # main
 
-while getopts :t:pP46hv:w: o
+while getopts :t:pP46hvw: o
 do
        case $o in
                t) TESTS=$OPTARG;;
index 7da8ec8..35d89df 100755 (executable)
@@ -68,7 +68,7 @@ setup()
 cleanup()
 {
        $IP link del dev dummy0 &> /dev/null
-       ip netns del ns1
+       ip netns del ns1 &> /dev/null
        ip netns del ns2 &> /dev/null
 }
 
index 1003119..f962823 100755 (executable)
@@ -232,10 +232,14 @@ setup_rt_networking()
        local nsname=rt-${rt}
 
        ip netns add ${nsname}
+
+       ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+       ip netns exec ${nsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
        ip link set veth-rt-${rt} netns ${nsname}
        ip -netns ${nsname} link set veth-rt-${rt} name veth0
 
-       ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0
+       ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0 nodad
        ip -netns ${nsname} link set veth0 up
        ip -netns ${nsname} link set lo up
 
@@ -254,6 +258,12 @@ setup_hs()
 
        # set the networking for the host
        ip netns add ${hsname}
+
+       # disable the rp_filter otherwise the kernel gets confused about how
+       # to route decap ipv4 packets.
+       ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
+       ip netns exec ${rtname} sysctl -wq net.ipv4.conf.default.rp_filter=0
+
        ip -netns ${hsname} link add veth0 type veth peer name ${rtveth}
        ip -netns ${hsname} link set ${rtveth} netns ${rtname}
        ip -netns ${hsname} addr add ${IPv4_HS_NETWORK}.${hs}/24 dev veth0
@@ -272,11 +282,6 @@ setup_hs()
 
        ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.proxy_arp=1
 
-       # disable the rp_filter otherwise the kernel gets confused about how
-       # to route decap ipv4 packets.
-       ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
-       ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.rp_filter=0
-
        ip netns exec ${rtname} sh -c "echo 1 > /proc/sys/net/vrf/strict_mode"
 }
 
index 75af864..50aab6b 100644 (file)
@@ -17,6 +17,7 @@ ENCL_CFLAGS := -Wall -Werror -static -nostdlib -nostartfiles -fPIC \
               -fno-stack-protector -mrdrnd $(INCLUDES)
 
 TEST_CUSTOM_PROGS := $(OUTPUT)/test_sgx
+TEST_FILES := $(OUTPUT)/test_encl.elf
 
 ifeq ($(CAN_BUILD_X86_64), 1)
 all: $(TEST_CUSTOM_PROGS) $(OUTPUT)/test_encl.elf
index cb5c13e..479802a 100644 (file)
@@ -3962,18 +3962,19 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        }
 
        vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
-       r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
-       BUG_ON(r == -EBUSY);
+       r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT);
        if (r)
                goto unlock_vcpu_destroy;
 
        /* Now it's all set up, let userspace reach it */
        kvm_get_kvm(kvm);
        r = create_vcpu_fd(vcpu);
-       if (r < 0) {
-               xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
-               kvm_put_kvm_no_destroy(kvm);
-               goto unlock_vcpu_destroy;
+       if (r < 0)
+               goto kvm_put_xa_release;
+
+       if (KVM_BUG_ON(!!xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
+               r = -EINVAL;
+               goto kvm_put_xa_release;
        }
 
        /*
@@ -3988,6 +3989,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        kvm_create_vcpu_debugfs(vcpu);
        return r;
 
+kvm_put_xa_release:
+       kvm_put_kvm_no_destroy(kvm);
+       xa_release(&kvm->vcpu_array, vcpu->vcpu_idx);
 unlock_vcpu_destroy:
        mutex_unlock(&kvm->lock);
        kvm_dirty_ring_free(&vcpu->dirty_ring);
@@ -5184,7 +5188,20 @@ static void hardware_disable_all(void)
 static int hardware_enable_all(void)
 {
        atomic_t failed = ATOMIC_INIT(0);
-       int r = 0;
+       int r;
+
+       /*
+        * Do not enable hardware virtualization if the system is going down.
+        * If userspace initiated a forced reboot, e.g. reboot -f, then it's
+        * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling
+        * after kvm_reboot() is called.  Note, this relies on system_state
+        * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops
+        * hook instead of registering a dedicated reboot notifier (the latter
+        * runs before system_state is updated).
+        */
+       if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
+           system_state == SYSTEM_RESTART)
+               return -EBUSY;
 
        /*
         * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu()
@@ -5197,6 +5214,8 @@ static int hardware_enable_all(void)
        cpus_read_lock();
        mutex_lock(&kvm_lock);
 
+       r = 0;
+
        kvm_usage_count++;
        if (kvm_usage_count == 1) {
                on_each_cpu(hardware_enable_nolock, &failed, 1);
@@ -5213,26 +5232,24 @@ static int hardware_enable_all(void)
        return r;
 }
 
-static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
-                     void *v)
+static void kvm_shutdown(void)
 {
        /*
-        * Some (well, at least mine) BIOSes hang on reboot if
-        * in vmx root mode.
-        *
-        * And Intel TXT required VMX off for all cpu when system shutdown.
+        * Disable hardware virtualization and set kvm_rebooting to indicate
+        * that KVM has asynchronously disabled hardware virtualization, i.e.
+        * that relevant errors and exceptions aren't entirely unexpected.
+        * Some flavors of hardware virtualization need to be disabled before
+        * transferring control to firmware (to perform shutdown/reboot), e.g.
+        * on x86, virtualization can block INIT interrupts, which are used by
+        * firmware to pull APs back under firmware control.  Note, this path
+        * is used for both shutdown and reboot scenarios, i.e. neither name is
+        * 100% comprehensive.
         */
        pr_info("kvm: exiting hardware virtualization\n");
        kvm_rebooting = true;
        on_each_cpu(hardware_disable_nolock, NULL, 1);
-       return NOTIFY_OK;
 }
 
-static struct notifier_block kvm_reboot_notifier = {
-       .notifier_call = kvm_reboot,
-       .priority = 0,
-};
-
 static int kvm_suspend(void)
 {
        /*
@@ -5263,6 +5280,7 @@ static void kvm_resume(void)
 static struct syscore_ops kvm_syscore_ops = {
        .suspend = kvm_suspend,
        .resume = kvm_resume,
+       .shutdown = kvm_shutdown,
 };
 #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
 static int hardware_enable_all(void)
@@ -5967,7 +5985,6 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
        if (r)
                return r;
 
-       register_reboot_notifier(&kvm_reboot_notifier);
        register_syscore_ops(&kvm_syscore_ops);
 #endif
 
@@ -6039,7 +6056,6 @@ err_cpu_kick_mask:
 err_vcpu_cache:
 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
        unregister_syscore_ops(&kvm_syscore_ops);
-       unregister_reboot_notifier(&kvm_reboot_notifier);
        cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
 #endif
        return r;
@@ -6065,7 +6081,6 @@ void kvm_exit(void)
        kvm_async_pf_deinit();
 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
        unregister_syscore_ops(&kvm_syscore_ops);
-       unregister_reboot_notifier(&kvm_reboot_notifier);
        cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
 #endif
        kvm_irqfd_exit();