Merge tag 'nfs-for-6.6-4' of git://git.linux-nfs.org/projects/anna/linux-nfs
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 20 Oct 2023 21:04:53 +0000 (14:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 20 Oct 2023 21:04:53 +0000 (14:04 -0700)
Pull NFS client fixes from Anna Schumaker:
 "Stable Fix:
   - Fix a pNFS hang in nfs4_evict_inode()

  Fixes:
   - Force update of suid/sgid bits after an NFS v4.2 ALLOCATE op
   - Fix a potential oops in nfs_inode_remove_request()
   - Check the validity of the layout pointer in ff_layout_mirror_prepare_stats()
   - Fix incorrectly marking the pNFS MDS with USE_PNFS_DS in some cases"

* tag 'nfs-for-6.6-4' of git://git.linux-nfs.org/projects/anna/linux-nfs:
  NFSv4.1: fixup use EXCHGID4_FLAG_USE_PNFS_DS for DS server
  pNFS/flexfiles: Check the layout validity in ff_layout_mirror_prepare_stats
  pNFS: Fix a hang in nfs4_evict_inode()
  NFS: Fix potential oops in nfs_inode_remove_request()
  nfs42: client needs to strip file mode's suid/sgid bit after ALLOCATE op

543 files changed:
Documentation/ABI/testing/sysfs-class-firmware
Documentation/core-api/workqueue.rst
Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dma-1.0.yaml
Documentation/devicetree/bindings/iio/adc/adi,ad7292.yaml
Documentation/devicetree/bindings/iio/light/rohm,bu27010.yaml
Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
Documentation/devicetree/bindings/interrupt-controller/renesas,rzg2l-irqc.yaml
Documentation/devicetree/bindings/mmc/sdhci-msm.yaml
Documentation/devicetree/bindings/sound/cirrus,cs42l43.yaml
Documentation/devicetree/bindings/sound/fsl,micfil.yaml
Documentation/devicetree/bindings/sound/rockchip-spdif.yaml
Documentation/filesystems/overlayfs.rst
Documentation/netlink/specs/devlink.yaml
Documentation/networking/representors.rst
Documentation/process/embargoed-hardware-issues.rst
Documentation/trace/fprobe.rst
Documentation/translations/zh_CN/core-api/workqueue.rst
MAINTAINERS
Makefile
arch/arm64/boot/dts/freescale/imx93.dtsi
arch/arm64/boot/dts/mediatek/mt7622.dtsi
arch/arm64/boot/dts/mediatek/mt7986a.dtsi
arch/arm64/boot/dts/mediatek/mt8195-demo.dts
arch/arm64/boot/dts/mediatek/mt8195.dtsi
arch/arm64/boot/dts/qcom/sm8150.dtsi
arch/arm64/include/asm/kvm_arm.h
arch/arm64/kvm/arch_timer.c
arch/arm64/kvm/emulate-nested.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/pmu.c
arch/arm64/kvm/sys_regs.c
arch/ia64/include/asm/cpu.h
arch/ia64/kernel/topology.c
arch/loongarch/include/asm/io.h
arch/loongarch/include/asm/linkage.h
arch/loongarch/include/asm/pgtable-bits.h
arch/loongarch/kernel/entry.S
arch/loongarch/kernel/genex.S
arch/loongarch/kernel/setup.c
arch/loongarch/mm/init.c
arch/loongarch/mm/tlbex.S
arch/mips/kvm/mmu.c
arch/powerpc/include/asm/nohash/32/pte-8xx.h
arch/powerpc/include/asm/nohash/64/pgtable.h
arch/powerpc/include/asm/nohash/pgtable.h
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/head_85xx.S
arch/powerpc/platforms/pseries/hvCall.S
arch/riscv/Makefile
arch/riscv/errata/andes/Makefile
arch/riscv/include/asm/ftrace.h
arch/riscv/include/asm/kprobes.h
arch/riscv/include/asm/uprobes.h
arch/riscv/kernel/irq.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/signal.c
arch/riscv/kernel/traps.c
arch/riscv/net/bpf_jit_comp64.c
arch/s390/kvm/interrupt.c
arch/s390/net/bpf_jit_comp.c
arch/x86/boot/compressed/sev.c
arch/x86/events/utils.c
arch/x86/hyperv/hv_init.c
arch/x86/hyperv/hv_vtl.c
arch/x86/include/asm/cpu.h
arch/x86/include/asm/fpu/api.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mshyperv.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/svm.h
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/resctrl/monitor.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/fpu/xstate.h
arch/x86/kernel/sev-shared.c
arch/x86/kernel/sev.c
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/topology.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/lapic.c
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/svm/avic.c
arch/x86/kvm/svm/nested.c
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/x86.c
block/fops.c
block/sed-opal.c
drivers/accel/ivpu/ivpu_drv.c
drivers/accel/ivpu/ivpu_drv.h
drivers/accel/ivpu/ivpu_fw.c
drivers/accel/ivpu/ivpu_gem.h
drivers/accel/ivpu/ivpu_hw.h
drivers/accel/ivpu/ivpu_hw_37xx.c
drivers/accel/ivpu/ivpu_hw_40xx.c
drivers/accel/ivpu/ivpu_mmu_context.c
drivers/accel/ivpu/ivpu_pm.c
drivers/acpi/acpi_processor.c
drivers/acpi/bus.c
drivers/acpi/ec.c
drivers/acpi/irq.c
drivers/acpi/resource.c
drivers/android/binder.c
drivers/ata/pata_parport/fit3.c
drivers/ata/pata_parport/pata_parport.c
drivers/base/regmap/regmap.c
drivers/bluetooth/btrtl.c
drivers/bluetooth/hci_vhci.c
drivers/counter/counter-chrdev.c
drivers/counter/microchip-tcb-capture.c
drivers/dma-buf/dma-fence-unwrap.c
drivers/dma-buf/sync_file.c
drivers/dma/fsl-edma-common.c
drivers/dma/fsl-edma-common.h
drivers/dma/fsl-edma-main.c
drivers/dma/idxd/device.c
drivers/dma/mediatek/mtk-uart-apdma.c
drivers/dma/ste_dma40.c
drivers/dma/stm32-dma.c
drivers/dma/stm32-mdma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/bridge/ti-sn65dsi86.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/i915/display/intel_cx0_phy.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/mediatek/mtk_drm_gem.c
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
drivers/gpu/drm/msm/dp/dp_ctrl.c
drivers/gpu/drm/msm/dp/dp_link.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/msm_mdss.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
drivers/gpu/drm/panel/panel-edp.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/tiny/simpledrm.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hwtracing/coresight/coresight-tmc-etr.c
drivers/iio/adc/ad7192.c
drivers/iio/adc/imx8qxp-adc.c
drivers/iio/addac/Kconfig
drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
drivers/iio/dac/ad3552r.c
drivers/iio/frequency/admv1013.c
drivers/iio/imu/bno055/Kconfig
drivers/iio/light/vcnl4000.c
drivers/iio/pressure/bmp280-core.c
drivers/iio/pressure/dps310.c
drivers/iio/pressure/ms5611_core.c
drivers/iio/proximity/irsd200.c
drivers/input/joystick/xpad.c
drivers/input/misc/powermate.c
drivers/input/mouse/elantech.c
drivers/input/mouse/psmouse-smbus.c
drivers/input/mouse/synaptics.c
drivers/input/rmi4/rmi_smbus.c
drivers/input/serio/i8042-acpipnpio.h
drivers/input/touchscreen/goodix.c
drivers/irqchip/irq-gic-common.h
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-renesas-rzg2l.c
drivers/irqchip/irq-riscv-intc.c
drivers/irqchip/irq-stm32-exti.c
drivers/irqchip/qcom-pdc.c
drivers/mcb/mcb-core.c
drivers/mcb/mcb-parse.c
drivers/md/dm-crypt.c
drivers/media/i2c/ov8858.c
drivers/media/pci/intel/ipu-bridge.c
drivers/media/platform/xilinx/xilinx-vipp.c
drivers/media/v4l2-core/v4l2-subdev.c
drivers/mmc/core/block.c
drivers/mmc/core/mmc.c
drivers/mmc/core/sdio.c
drivers/mmc/host/mtk-sd.c
drivers/mmc/host/sdhci-pci-gli.c
drivers/mmc/host/sdhci-sprd.c
drivers/mtd/maps/physmap-core.c
drivers/mtd/nand/raw/arasan-nand-controller.c
drivers/mtd/nand/raw/marvell_nand.c
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/nand/raw/nand_jedec.c
drivers/mtd/nand/raw/nand_onfi.c
drivers/mtd/nand/raw/pl35x-nand-controller.c
drivers/mtd/nand/raw/qcom_nandc.c
drivers/mtd/nand/spi/micron.c
drivers/net/bonding/bond_main.c
drivers/net/can/Kconfig
drivers/net/can/flexcan/flexcan-core.c
drivers/net/can/flexcan/flexcan.h
drivers/net/can/m_can/tcan4x5x-core.c
drivers/net/can/sja1000/sja1000.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/qca/qca8k-8xxx.c
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/ice/ice_lag.c
drivers/net/ethernet/intel/ice/ice_lag.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
drivers/net/ethernet/netronome/nfp/flower/cmsg.c
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
drivers/net/ethernet/netronome/nfp/flower/main.h
drivers/net/ethernet/netronome/nfp/flower/metadata.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
drivers/net/ethernet/qlogic/qed/qed_ll2.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/rswitch.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/Makefile
drivers/net/ethernet/ti/icssg/icssg_config.c
drivers/net/ethernet/ti/icssg/icssg_stats.c
drivers/net/ethernet/ti/k3-cppi-desc-pool.c
drivers/net/ieee802154/ca8210.c
drivers/net/macsec.c
drivers/net/mdio/mdio-mux.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/mscc/mscc_macsec.c
drivers/net/tun.c
drivers/net/usb/dm9601.c
drivers/net/usb/smsc95xx.c
drivers/net/virtio_net.c
drivers/net/wwan/iosm/iosm_ipc_imem.c
drivers/net/wwan/iosm/iosm_ipc_imem.h
drivers/net/wwan/iosm/iosm_ipc_pcie.c
drivers/net/wwan/iosm/iosm_ipc_port.c
drivers/net/wwan/iosm/iosm_ipc_trace.c
drivers/net/wwan/iosm/iosm_ipc_wwan.c
drivers/net/xen-netback/interface.c
drivers/nvme/host/auth.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/target/fabrics-cmd-auth.c
drivers/nvme/target/tcp.c
drivers/perf/riscv_pmu.c
drivers/perf/riscv_pmu_sbi.c
drivers/phy/freescale/phy-fsl-lynx-28g.c
drivers/pinctrl/nuvoton/pinctrl-wpcm450.c
drivers/pinctrl/pinctrl-lantiq.h
drivers/pinctrl/qcom/pinctrl-lpass-lpi.c
drivers/pinctrl/renesas/Kconfig
drivers/pinctrl/starfive/pinctrl-starfive-jh7110-aon.c
drivers/pinctrl/starfive/pinctrl-starfive-jh7110-sys.c
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.h
drivers/pinctrl/tegra/pinctrl-tegra.c
drivers/pinctrl/tegra/pinctrl-tegra.h
drivers/pmdomain/imx/scu-pd.c
drivers/power/supply/qcom_battmgr.c
drivers/s390/net/Kconfig
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/scsi_scan.c
drivers/soc/renesas/Kconfig
drivers/soundwire/Makefile
drivers/soundwire/bus.c
drivers/soundwire/bus_type.c
drivers/soundwire/irq.c [new file with mode: 0644]
drivers/soundwire/irq.h [new file with mode: 0644]
drivers/spi/spi-npcm-fiu.c
drivers/tee/amdtee/core.c
drivers/thunderbolt/icm.c
drivers/thunderbolt/switch.c
drivers/thunderbolt/tmu.c
drivers/thunderbolt/xdomain.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/serial_core.c
drivers/ufs/core/ufshcd.c
drivers/usb/cdns3/cdnsp-gadget.c
drivers/usb/cdns3/core.h
drivers/usb/core/hub.c
drivers/usb/core/hub.h
drivers/usb/dwc3/core.c
drivers/usb/gadget/function/f_ncm.c
drivers/usb/gadget/udc/udc-xilinx.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.h
drivers/usb/misc/onboard_usb_hub.c
drivers/usb/misc/onboard_usb_hub.h
drivers/usb/musb/musb_debugfs.c
drivers/usb/musb/musb_host.c
drivers/usb/typec/altmodes/displayport.c
drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
drivers/usb/typec/ucsi/psy.c
drivers/usb/typec/ucsi/ucsi.c
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/core/cfbcopyarea.c
drivers/video/fbdev/core/syscopyarea.c
drivers/video/fbdev/mmp/hw/mmp_ctrl.h
drivers/video/fbdev/omap/omapfb_main.c
drivers/video/fbdev/sa1100fb.c
drivers/video/fbdev/uvesafb.c
drivers/xen/events/events_base.c
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-inode.h
fs/btrfs/super.c
fs/btrfs/volumes.c
fs/ceph/crypto.c
fs/ceph/file.c
fs/ceph/inode.c
fs/fs_context.c
fs/namei.c
fs/notify/fanotify/fanotify_user.c
fs/ntfs3/attrib.c
fs/ntfs3/attrlist.c
fs/ntfs3/bitmap.c
fs/ntfs3/dir.c
fs/ntfs3/file.c
fs/ntfs3/frecord.c
fs/ntfs3/fslog.c
fs/ntfs3/fsntfs.c
fs/ntfs3/index.c
fs/ntfs3/inode.c
fs/ntfs3/namei.c
fs/ntfs3/ntfs.h
fs/ntfs3/ntfs_fs.h
fs/ntfs3/record.c
fs/ntfs3/super.c
fs/ntfs3/xattr.c
fs/overlayfs/params.c
fs/quota/dquot.c
fs/smb/client/cached_dir.c
fs/smb/client/cached_dir.h
fs/smb/server/smb2pdu.c
fs/smb/server/vfs_cache.c
fs/xfs/libxfs/xfs_ag.c
fs/xfs/scrub/xfile.c
fs/xfs/xfs_extent_busy.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_notify_failure.c
include/acpi/processor.h
include/asm-generic/mshyperv.h
include/drm/gpu_scheduler.h
include/kvm/arm_arch_timer.h
include/linux/cgroup-defs.h
include/linux/cpu.h
include/linux/dma-fence.h
include/linux/fs.h
include/linux/fs_context.h
include/linux/mcb.h
include/linux/mtd/jedec.h
include/linux/mtd/onfi.h
include/linux/mtd/rawnand.h
include/linux/quota.h
include/linux/quotaops.h
include/linux/skbuff.h
include/linux/virtio_net.h
include/net/bluetooth/hci_mon.h
include/net/macsec.h
include/net/netns/xfrm.h
include/net/sock.h
include/net/tcp.h
include/sound/soc-dapm.h
include/sound/soc.h
include/trace/events/neigh.h
include/uapi/drm/nouveau_drm.h
include/uapi/linux/if_packet.h
include/video/mmp_disp.h
include/video/uvesafb.h
io_uring/io_uring.c
kernel/auditsc.c
kernel/bpf/mprog.c
kernel/bpf/syscall.c
kernel/bpf/tcx.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup-v1.c
kernel/printk/printk.c
kernel/sched/fair.c
kernel/trace/fprobe.c
kernel/workqueue.c
mm/slab_common.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sock.c
net/bluetooth/hci_sync.c
net/can/isotp.c
net/ceph/messenger.c
net/core/dev.c
net/core/dev.h
net/core/pktgen.c
net/core/rtnetlink.c
net/core/stream.c
net/devlink/health.c
net/ipv4/af_inet.c
net/ipv4/esp4.c
net/ipv4/fib_semantics.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_hashtables.c
net/ipv4/tcp.c
net/ipv4/tcp_bpf.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/tcp_recovery.c
net/ipv6/esp6.c
net/ipv6/xfrm6_policy.c
net/mac80211/key.c
net/mctp/route.c
net/mptcp/protocol.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_log.c
net/netfilter/nft_inner.c
net/netfilter/nft_payload.c
net/netfilter/nft_set_pipapo.h
net/netfilter/nft_set_rbtree.c
net/nfc/llcp_core.c
net/nfc/nci/core.c
net/nfc/nci/spi.c
net/packet/af_packet.c
net/rfkill/core.c
net/rfkill/rfkill-gpio.c
net/sched/cls_u32.c
net/sched/sch_hfsc.c
net/smc/Kconfig
net/smc/af_smc.c
net/smc/smc_ib.c
net/smc/smc_ib.h
net/smc/smc_stats.h
net/tls/tls_main.c
net/tls/tls_sw.c
net/wireless/core.c
net/xdp/xsk_queue.c
net/xfrm/xfrm_interface_core.c
net/xfrm/xfrm_policy.c
security/keys/trusted-keys/trusted_core.c
sound/pci/hda/cs35l41_hda.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/codecs/cs35l56.c
sound/soc/codecs/cs42l42-sdw.c
sound/soc/codecs/cs42l43-jack.c
sound/soc/codecs/da7219-aad.c
sound/soc/codecs/hdmi-codec.c
sound/soc/codecs/lpass-wsa-macro.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5682-i2c.c
sound/soc/codecs/tas2780.c
sound/soc/codecs/tlv320adc3xxx.c
sound/soc/codecs/wcd938x-sdw.c
sound/soc/codecs/wcd938x.c
sound/soc/dwc/dwc-i2s.c
sound/soc/fsl/fsl-asoc-card.c
sound/soc/fsl/fsl_sai.c
sound/soc/generic/simple-card-utils.c
sound/soc/generic/simple-card.c
sound/soc/intel/boards/sof_es8336.c
sound/soc/intel/boards/sof_sdw.c
sound/soc/intel/common/soc-acpi-intel-adl-match.c
sound/soc/intel/common/soc-acpi-intel-mtl-match.c
sound/soc/pxa/pxa-ssp.c
sound/soc/soc-component.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/soc-generic-dmaengine-pcm.c
sound/soc/sof/amd/pci-rmb.c
sound/soc/ti/ams-delta.c
sound/usb/mixer.c
sound/usb/quirks.c
tools/arch/x86/include/uapi/asm/unistd_32.h
tools/hv/hv_kvp_daemon.c
tools/hv/hv_set_ifconfig.sh
tools/net/ynl/generated/devlink-user.c
tools/testing/selftests/bpf/prog_tests/tc_helpers.h
tools/testing/selftests/bpf/prog_tests/tc_links.c
tools/testing/selftests/bpf/prog_tests/tc_opts.c
tools/testing/selftests/bpf/prog_tests/timer.c
tools/testing/selftests/bpf/progs/timer_failure.c [new file with mode: 0644]
tools/testing/selftests/kvm/include/ucall_common.h
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/lib/guest_sprintf.c
tools/testing/selftests/kvm/lib/x86_64/apic.c
tools/testing/selftests/kvm/memslot_perf_test.c
tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c
tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.sh
tools/testing/selftests/kvm/x86_64/state_test.c
tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/mptcp/mptcp_join.sh
tools/testing/selftests/net/netns-name.sh [new file with mode: 0755]
tools/testing/selftests/net/openvswitch/openvswitch.sh
tools/testing/selftests/net/openvswitch/ovs-dpctl.py
tools/testing/selftests/netfilter/nft_audit.sh
tools/testing/selftests/riscv/mm/Makefile
tools/testing/selftests/riscv/mm/mmap_bottomup.c [moved from tools/testing/selftests/riscv/mm/testcases/mmap_bottomup.c with 97% similarity]
tools/testing/selftests/riscv/mm/mmap_default.c [moved from tools/testing/selftests/riscv/mm/testcases/mmap_default.c with 97% similarity]
tools/testing/selftests/riscv/mm/mmap_test.h [moved from tools/testing/selftests/riscv/mm/testcases/mmap_test.h with 100% similarity]
tools/testing/selftests/riscv/mm/run_mmap.sh [moved from tools/testing/selftests/riscv/mm/testcases/run_mmap.sh with 100% similarity]

index 978d3d5..fba87a5 100644 (file)
@@ -1,7 +1,7 @@
 What:          /sys/class/firmware/.../data
 Date:          July 2022
 KernelVersion: 5.19
-Contact:       Russ Weight <russell.h.weight@intel.com>
+Contact:       Russ Weight <russ.weight@linux.dev>
 Description:   The data sysfs file is used for firmware-fallback and for
                firmware uploads. Cat a firmware image to this sysfs file
                after you echo 1 to the loading sysfs file. When the firmware
@@ -13,7 +13,7 @@ Description:  The data sysfs file is used for firmware-fallback and for
 What:          /sys/class/firmware/.../cancel
 Date:          July 2022
 KernelVersion: 5.19
-Contact:       Russ Weight <russell.h.weight@intel.com>
+Contact:       Russ Weight <russ.weight@linux.dev>
 Description:   Write-only. For firmware uploads, write a "1" to this file to
                request that the transfer of firmware data to the lower-level
                device be canceled. This request will be rejected (EBUSY) if
@@ -23,7 +23,7 @@ Description:  Write-only. For firmware uploads, write a "1" to this file to
 What:          /sys/class/firmware/.../error
 Date:          July 2022
 KernelVersion: 5.19
-Contact:       Russ Weight <russell.h.weight@intel.com>
+Contact:       Russ Weight <russ.weight@linux.dev>
 Description:   Read-only. Returns a string describing a failed firmware
                upload. This string will be in the form of <STATUS>:<ERROR>,
                where <STATUS> will be one of the status strings described
@@ -37,7 +37,7 @@ Description:  Read-only. Returns a string describing a failed firmware
 What:          /sys/class/firmware/.../loading
 Date:          July 2022
 KernelVersion: 5.19
-Contact:       Russ Weight <russell.h.weight@intel.com>
+Contact:       Russ Weight <russ.weight@linux.dev>
 Description:   The loading sysfs file is used for both firmware-fallback and
                for firmware uploads. Echo 1 onto the loading file to indicate
                you are writing a firmware file to the data sysfs node. Echo
@@ -49,7 +49,7 @@ Description:  The loading sysfs file is used for both firmware-fallback and
 What:          /sys/class/firmware/.../remaining_size
 Date:          July 2022
 KernelVersion: 5.19
-Contact:       Russ Weight <russell.h.weight@intel.com>
+Contact:       Russ Weight <russ.weight@linux.dev>
 Description:   Read-only. For firmware upload, this file contains the size
                of the firmware data that remains to be transferred to the
                lower-level device driver. The size value is initialized to
@@ -62,7 +62,7 @@ Description:  Read-only. For firmware upload, this file contains the size
 What:          /sys/class/firmware/.../status
 Date:          July 2022
 KernelVersion: 5.19
-Contact:       Russ Weight <russell.h.weight@intel.com>
+Contact:       Russ Weight <russ.weight@linux.dev>
 Description:   Read-only. Returns a string describing the current status of
                a firmware upload. The string will be one of the following:
                idle, "receiving", "preparing", "transferring", "programming".
@@ -70,7 +70,7 @@ Description:  Read-only. Returns a string describing the current status of
 What:          /sys/class/firmware/.../timeout
 Date:          July 2022
 KernelVersion: 5.19
-Contact:       Russ Weight <russell.h.weight@intel.com>
+Contact:       Russ Weight <russ.weight@linux.dev>
 Description:   This file supports the timeout mechanism for firmware
                fallback.  This file has no affect on firmware uploads. For
                more information on timeouts please see the documentation
index 5d7b01a..0046af0 100644 (file)
@@ -244,7 +244,7 @@ unbound worker-pools and only one work item could be active at any given
 time thus achieving the same ordering property as ST wq.
 
 In the current implementation the above configuration only guarantees
-ST behavior within a given NUMA node. Instead ``alloc_ordered_queue()`` should
+ST behavior within a given NUMA node. Instead ``alloc_ordered_workqueue()`` should
 be used to achieve system-wide ST behavior.
 
 
@@ -390,7 +390,7 @@ The default affinity scope can be changed with the module parameter
 scope can be changed using ``apply_workqueue_attrs()``.
 
 If ``WQ_SYSFS`` is set, the workqueue will have the following affinity scope
-related interface files under its ``/sys/devices/virtual/WQ_NAME/``
+related interface files under its ``/sys/devices/virtual/workqueue/WQ_NAME/``
 directory.
 
 ``affinity_scope``
index 23ada8f..769ce23 100644 (file)
@@ -13,6 +13,8 @@ description: |
 
 maintainers:
   - Michael Tretter <m.tretter@pengutronix.de>
+  - Harini Katakam <harini.katakam@amd.com>
+  - Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
 
 allOf:
   - $ref: ../dma-controller.yaml#
@@ -65,6 +67,7 @@ required:
   - interrupts
   - clocks
   - clock-names
+  - xlnx,bus-width
 
 additionalProperties: false
 
index 7cc4ddc..2aa1f4b 100644 (file)
@@ -61,7 +61,7 @@ patternProperties:
     required:
       - reg
 
-    additionalProperties: true
+    additionalProperties: false
 
 allOf:
   - $ref: /schemas/spi/spi-peripheral-props.yaml#
index 8376d64..bed42d5 100644 (file)
@@ -45,5 +45,6 @@ examples:
       light-sensor@38 {
         compatible = "rohm,bu27010";
         reg = <0x38>;
+        vdd-supply = <&vdd>;
       };
     };
index 2bc3847..0f4a062 100644 (file)
@@ -106,6 +106,12 @@ properties:
     $ref: /schemas/types.yaml#/definitions/uint32
     maximum: 4096
 
+  dma-noncoherent:
+    description:
+      Present if the GIC redistributors permit programming shareability
+      and cacheability attributes but are connected to a non-coherent
+      downstream interconnect.
+
   msi-controller:
     description:
       Only present if the Message Based Interrupt functionality is
@@ -193,6 +199,12 @@ patternProperties:
       compatible:
         const: arm,gic-v3-its
 
+      dma-noncoherent:
+        description:
+          Present if the GIC ITS permits programming shareability and
+          cacheability attributes but is connected to a non-coherent
+          downstream interconnect.
+
       msi-controller: true
 
       "#msi-cells":
index 95033cb..b417341 100644 (file)
@@ -37,6 +37,7 @@ properties:
           - renesas,intc-ex-r8a77990    # R-Car E3
           - renesas,intc-ex-r8a77995    # R-Car D3
           - renesas,intc-ex-r8a779a0    # R-Car V3U
+          - renesas,intc-ex-r8a779f0    # R-Car S4-8
           - renesas,intc-ex-r8a779g0    # R-Car V4H
       - const: renesas,irqc
 
index 33b90e9..2ef3081 100644 (file)
@@ -19,20 +19,19 @@ description: |
     - NMI edge select (NMI is not treated as NMI exception and supports fall edge and
       stand-up edge detection interrupts)
 
-allOf:
-  - $ref: /schemas/interrupt-controller.yaml#
-
 properties:
   compatible:
     items:
       - enum:
+          - renesas,r9a07g043u-irqc   # RZ/G2UL
           - renesas,r9a07g044-irqc    # RZ/G2{L,LC}
           - renesas,r9a07g054-irqc    # RZ/V2L
       - const: renesas,rzg2l-irqc
 
   '#interrupt-cells':
-    description: The first cell should contain external interrupt number (IRQ0-7) and the
-                 second cell is used to specify the flag.
+    description: The first cell should contain a macro RZG2L_{NMI,IRQX} included in the
+                 include/dt-bindings/interrupt-controller/irqc-rzg2l.h and the second
+                 cell is used to specify the flag.
     const: 2
 
   '#address-cells':
@@ -44,7 +43,96 @@ properties:
     maxItems: 1
 
   interrupts:
-    maxItems: 41
+    minItems: 41
+    items:
+      - description: NMI interrupt
+      - description: IRQ0 interrupt
+      - description: IRQ1 interrupt
+      - description: IRQ2 interrupt
+      - description: IRQ3 interrupt
+      - description: IRQ4 interrupt
+      - description: IRQ5 interrupt
+      - description: IRQ6 interrupt
+      - description: IRQ7 interrupt
+      - description: GPIO interrupt, TINT0
+      - description: GPIO interrupt, TINT1
+      - description: GPIO interrupt, TINT2
+      - description: GPIO interrupt, TINT3
+      - description: GPIO interrupt, TINT4
+      - description: GPIO interrupt, TINT5
+      - description: GPIO interrupt, TINT6
+      - description: GPIO interrupt, TINT7
+      - description: GPIO interrupt, TINT8
+      - description: GPIO interrupt, TINT9
+      - description: GPIO interrupt, TINT10
+      - description: GPIO interrupt, TINT11
+      - description: GPIO interrupt, TINT12
+      - description: GPIO interrupt, TINT13
+      - description: GPIO interrupt, TINT14
+      - description: GPIO interrupt, TINT15
+      - description: GPIO interrupt, TINT16
+      - description: GPIO interrupt, TINT17
+      - description: GPIO interrupt, TINT18
+      - description: GPIO interrupt, TINT19
+      - description: GPIO interrupt, TINT20
+      - description: GPIO interrupt, TINT21
+      - description: GPIO interrupt, TINT22
+      - description: GPIO interrupt, TINT23
+      - description: GPIO interrupt, TINT24
+      - description: GPIO interrupt, TINT25
+      - description: GPIO interrupt, TINT26
+      - description: GPIO interrupt, TINT27
+      - description: GPIO interrupt, TINT28
+      - description: GPIO interrupt, TINT29
+      - description: GPIO interrupt, TINT30
+      - description: GPIO interrupt, TINT31
+      - description: Bus error interrupt
+
+  interrupt-names:
+    minItems: 41
+    items:
+      - const: nmi
+      - const: irq0
+      - const: irq1
+      - const: irq2
+      - const: irq3
+      - const: irq4
+      - const: irq5
+      - const: irq6
+      - const: irq7
+      - const: tint0
+      - const: tint1
+      - const: tint2
+      - const: tint3
+      - const: tint4
+      - const: tint5
+      - const: tint6
+      - const: tint7
+      - const: tint8
+      - const: tint9
+      - const: tint10
+      - const: tint11
+      - const: tint12
+      - const: tint13
+      - const: tint14
+      - const: tint15
+      - const: tint16
+      - const: tint17
+      - const: tint18
+      - const: tint19
+      - const: tint20
+      - const: tint21
+      - const: tint22
+      - const: tint23
+      - const: tint24
+      - const: tint25
+      - const: tint26
+      - const: tint27
+      - const: tint28
+      - const: tint29
+      - const: tint30
+      - const: tint31
+      - const: bus-err
 
   clocks:
     maxItems: 2
@@ -72,6 +160,23 @@ required:
   - power-domains
   - resets
 
+allOf:
+  - $ref: /schemas/interrupt-controller.yaml#
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: renesas,r9a07g043u-irqc
+    then:
+      properties:
+        interrupts:
+          minItems: 42
+        interrupt-names:
+          minItems: 42
+      required:
+        - interrupt-names
+
 unevaluatedProperties: false
 
 examples:
@@ -80,55 +185,66 @@ examples:
     #include <dt-bindings/clock/r9a07g044-cpg.h>
 
     irqc: interrupt-controller@110a0000 {
-            compatible = "renesas,r9a07g044-irqc", "renesas,rzg2l-irqc";
-            reg = <0x110a0000 0x10000>;
-            #interrupt-cells = <2>;
-            #address-cells = <0>;
-            interrupt-controller;
-            interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 452 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 453 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 454 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 455 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 457 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 459 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 460 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
-                         <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
-            clocks = <&cpg CPG_MOD R9A07G044_IA55_CLK>,
-                     <&cpg CPG_MOD R9A07G044_IA55_PCLK>;
-            clock-names = "clk", "pclk";
-            power-domains = <&cpg>;
-            resets = <&cpg R9A07G044_IA55_RESETN>;
+        compatible = "renesas,r9a07g044-irqc", "renesas,rzg2l-irqc";
+        reg = <0x110a0000 0x10000>;
+        #interrupt-cells = <2>;
+        #address-cells = <0>;
+        interrupt-controller;
+        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 444 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 445 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 446 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 447 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 448 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 449 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 450 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 451 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 452 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 453 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 454 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 455 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 456 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 457 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 458 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 459 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 460 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 461 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 462 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 463 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 464 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 465 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 466 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 467 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 468 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 469 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 470 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 471 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 472 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 473 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 474 IRQ_TYPE_LEVEL_HIGH>,
+                     <GIC_SPI 475 IRQ_TYPE_LEVEL_HIGH>;
+        interrupt-names = "nmi",
+                          "irq0", "irq1", "irq2", "irq3",
+                          "irq4", "irq5", "irq6", "irq7",
+                          "tint0", "tint1", "tint2", "tint3",
+                          "tint4", "tint5", "tint6", "tint7",
+                          "tint8", "tint9", "tint10", "tint11",
+                          "tint12", "tint13", "tint14", "tint15",
+                          "tint16", "tint17", "tint18", "tint19",
+                          "tint20", "tint21", "tint22", "tint23",
+                          "tint24", "tint25", "tint26", "tint27",
+                          "tint28", "tint29", "tint30", "tint31";
+        clocks = <&cpg CPG_MOD R9A07G044_IA55_CLK>,
+                 <&cpg CPG_MOD R9A07G044_IA55_PCLK>;
+        clock-names = "clk", "pclk";
+        power-domains = <&cpg>;
+        resets = <&cpg R9A07G044_IA55_RESETN>;
     };
index 80141eb..10f34aa 100644 (file)
@@ -69,7 +69,7 @@ properties:
     maxItems: 4
 
   clocks:
-    minItems: 3
+    minItems: 2
     items:
       - description: Main peripheral bus clock, PCLK/HCLK - AHB Bus clock
       - description: SDC MMC clock, MCLK
index 7a6de93..4118aa5 100644 (file)
@@ -82,7 +82,7 @@ properties:
     description:
       Current at which the headset micbias sense clamp will engage, 0 to
       disable.
-    enum: [ 0, 14, 23, 41, 50, 60, 68, 86, 95 ]
+    enum: [ 0, 14, 24, 43, 52, 61, 71, 90, 99 ]
     default: 0
 
   cirrus,bias-ramp-ms:
index 4b99a18..b7e6058 100644 (file)
@@ -56,6 +56,9 @@ properties:
       - const: clkext3
     minItems: 2
 
+  "#sound-dai-cells":
+    const: 0
+
 required:
   - compatible
   - reg
index 4f51b2f..c3c989e 100644 (file)
@@ -26,6 +26,7 @@ properties:
       - const: rockchip,rk3568-spdif
       - items:
           - enum:
+              - rockchip,rk3128-spdif
               - rockchip,rk3188-spdif
               - rockchip,rk3288-spdif
               - rockchip,rk3308-spdif
index cdefbe7..5b93268 100644 (file)
@@ -339,6 +339,18 @@ The specified lower directories will be stacked beginning from the
 rightmost one and going left.  In the above example lower1 will be the
 top, lower2 the middle and lower3 the bottom layer.
 
+Note: directory names containing colons can be provided as lower layer by
+escaping the colons with a single backslash.  For example:
+
+  mount -t overlay overlay -olowerdir=/a\:lower\:\:dir /merged
+
+Since kernel version v6.5, directory names containing colons can also
+be provided as lower layer using the fsconfig syscall from new mount api:
+
+  fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir", "/a:lower::dir", 0);
+
+In the latter case, colons in lower layer directory names will be escaped
+as an octal characters (\072) when displayed in /proc/self/mountinfo.
 
 Metadata only copy up
 ---------------------
index d1ebcd9..065661a 100644 (file)
@@ -323,7 +323,7 @@ operations:
             - dev-name
             - sb-index
         reply: &sb-get-reply
-          value: 11
+          value: 13
           attributes: *sb-id-attrs
       dump:
         request:
@@ -350,7 +350,7 @@ operations:
             - sb-index
             - sb-pool-index
         reply: &sb-pool-get-reply
-          value: 15
+          value: 17
           attributes: *sb-pool-id-attrs
       dump:
         request:
@@ -378,7 +378,7 @@ operations:
             - sb-index
             - sb-pool-index
         reply: &sb-port-pool-get-reply
-          value: 19
+          value: 21
           attributes: *sb-port-pool-id-attrs
       dump:
         request:
@@ -407,7 +407,7 @@ operations:
             - sb-pool-type
             - sb-tc-index
         reply: &sb-tc-pool-bind-get-reply
-          value: 23
+          value: 25
           attributes: *sb-tc-pool-bind-id-attrs
       dump:
         request:
@@ -538,7 +538,7 @@ operations:
             - dev-name
             - trap-name
         reply: &trap-get-reply
-          value: 61
+          value: 63
           attributes: *trap-id-attrs
       dump:
         request:
@@ -564,7 +564,7 @@ operations:
             - dev-name
             - trap-group-name
         reply: &trap-group-get-reply
-          value: 65
+          value: 67
           attributes: *trap-group-id-attrs
       dump:
         request:
@@ -590,7 +590,7 @@ operations:
             - dev-name
             - trap-policer-id
         reply: &trap-policer-get-reply
-          value: 69
+          value: 71
           attributes: *trap-policer-id-attrs
       dump:
         request:
@@ -617,7 +617,7 @@ operations:
             - port-index
             - rate-node-name
         reply: &rate-get-reply
-          value: 74
+          value: 76
           attributes: *rate-id-attrs
       dump:
         request:
@@ -643,7 +643,7 @@ operations:
             - dev-name
             - linecard-index
         reply: &linecard-get-reply
-          value: 78
+          value: 80
           attributes: *linecard-id-attrs
       dump:
         request:
index ee1f5cd..decb39c 100644 (file)
@@ -162,9 +162,11 @@ How are representors identified?
 The representor netdevice should *not* directly refer to a PCIe device (e.g.
 through ``net_dev->dev.parent`` / ``SET_NETDEV_DEV()``), either of the
 representee or of the switchdev function.
-Instead, it should implement the ``ndo_get_devlink_port()`` netdevice op, which
-the kernel uses to provide the ``phys_switch_id`` and ``phys_port_name`` sysfs
-nodes.  (Some legacy drivers implement ``ndo_get_port_parent_id()`` and
+Instead, the driver should use the ``SET_NETDEV_DEVLINK_PORT`` macro to
+assign a devlink port instance to the netdevice before registering the
+netdevice; the kernel uses the devlink port to provide the ``phys_switch_id``
+and ``phys_port_name`` sysfs nodes.
+(Some legacy drivers implement ``ndo_get_port_parent_id()`` and
 ``ndo_get_phys_port_name()`` directly, but this is deprecated.)  See
 :ref:`Documentation/networking/devlink/devlink-port.rst <devlink_port>` for the
 details of this API.
index ac7c52f..31000f0 100644 (file)
@@ -25,15 +25,15 @@ Contact
 The Linux kernel hardware security team is separate from the regular Linux
 kernel security team.
 
-The team only handles the coordination of embargoed hardware security
-issues.  Reports of pure software security bugs in the Linux kernel are not
+The team only handles developing fixes for embargoed hardware security
+issues. Reports of pure software security bugs in the Linux kernel are not
 handled by this team and the reporter will be guided to contact the regular
 Linux kernel security team (:ref:`Documentation/admin-guide/
 <securitybugs>`) instead.
 
 The team can be contacted by email at <hardware-security@kernel.org>. This
-is a private list of security officers who will help you to coordinate an
-issue according to our documented process.
+is a private list of security officers who will help you to coordinate a
+fix according to our documented process.
 
 The list is encrypted and email to the list can be sent by either PGP or
 S/MIME encrypted and must be signed with the reporter's PGP key or S/MIME
@@ -132,11 +132,11 @@ other hardware could be affected.
 
 The hardware security team will provide an incident-specific encrypted
 mailing-list which will be used for initial discussion with the reporter,
-further disclosure and coordination.
+further disclosure, and coordination of fixes.
 
 The hardware security team will provide the disclosing party a list of
 developers (domain experts) who should be informed initially about the
-issue after confirming with the developers  that they will adhere to this
+issue after confirming with the developers that they will adhere to this
 Memorandum of Understanding and the documented process. These developers
 form the initial response team and will be responsible for handling the
 issue after initial contact. The hardware security team is supporting the
@@ -209,13 +209,18 @@ five work days this is taken as silent acknowledgement.
 After acknowledgement or resolution of an objection the expert is disclosed
 by the incident team and brought into the development process.
 
+List participants may not communicate about the issue outside of the
+private mailing list. List participants may not use any shared resources
+(e.g. employer build farms, CI systems, etc) when working on patches.
+
 
 Coordinated release
 """""""""""""""""""
 
 The involved parties will negotiate the date and time where the embargo
 ends. At that point the prepared mitigations are integrated into the
-relevant kernel trees and published.
+relevant kernel trees and published. There is no pre-notification process:
+fixes are published in public and available to everyone at the same time.
 
 While we understand that hardware security issues need coordinated embargo
 time, the embargo time should be constrained to the minimum time which is
index 7a89551..196f523 100644 (file)
@@ -91,9 +91,9 @@ The prototype of the entry/exit callback function are as follows:
 
 .. code-block:: c
 
- int entry_callback(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs, void *entry_data);
+ int entry_callback(struct fprobe *fp, unsigned long entry_ip, unsigned long ret_ip, struct pt_regs *regs, void *entry_data);
 
- void exit_callback(struct fprobe *fp, unsigned long entry_ip, struct pt_regs *regs, void *entry_data);
+ void exit_callback(struct fprobe *fp, unsigned long entry_ip, unsigned long ret_ip, struct pt_regs *regs, void *entry_data);
 
 Note that the @entry_ip is saved at function entry and passed to exit handler.
 If the entry callback function returns !0, the corresponding exit callback will be cancelled.
@@ -108,6 +108,10 @@ If the entry callback function returns !0, the corresponding exit callback will
         Note that this may not be the actual entry address of the function but
         the address where the ftrace is instrumented.
 
+@ret_ip
+        This is the return address that the traced function will return to,
+        somewhere in the caller. This can be used at both entry and exit.
+
 @regs
         This is the `pt_regs` data structure at the entry and exit. Note that
         the instruction pointer of @regs may be different from the @entry_ip
index 6c1b5ec..7fac6f7 100644 (file)
@@ -202,7 +202,7 @@ workqueue将自动创建与属性相匹配的后备工作者池。调节并发
 同的排序属性。
 
 在目前的实现中,上述配置只保证了特定NUMA节点内的ST行为。相反,
-``alloc_ordered_queue()`` 应该被用来实现全系统的ST行为。
+``alloc_ordered_workqueue()`` 应该被用来实现全系统的ST行为。
 
 
 执行场景示例
index 6c4cce4..7a7bd8b 100644 (file)
@@ -1584,6 +1584,17 @@ F:       arch/arm/include/asm/arch_timer.h
 F:     arch/arm64/include/asm/arch_timer.h
 F:     drivers/clocksource/arm_arch_timer.c
 
+ARM GENERIC INTERRUPT CONTROLLER DRIVERS
+M:     Marc Zyngier <maz@kernel.org>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     Documentation/devicetree/bindings/interrupt-controller/arm,gic*
+F:     arch/arm/include/asm/arch_gicv3.h
+F:     arch/arm64/include/asm/arch_gicv3.h
+F:     drivers/irqchip/irq-gic*.[ch]
+F:     include/linux/irqchip/arm-gic*.h
+F:     include/linux/irqchip/arm-vgic-info.h
+
 ARM HDLCD DRM DRIVER
 M:     Liviu Dudau <liviu.dudau@arm.com>
 S:     Supported
@@ -2210,21 +2221,28 @@ F:      arch/arm/boot/dts/ti/omap/omap3-igep*
 ARM/INTEL IXP4XX ARM ARCHITECTURE
 M:     Linus Walleij <linusw@kernel.org>
 M:     Imre Kaloz <kaloz@openwrt.org>
-M:     Krzysztof Halasa <khalasa@piap.pl>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/arm/intel-ixp4xx.yaml
 F:     Documentation/devicetree/bindings/gpio/intel,ixp4xx-gpio.txt
 F:     Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
 F:     Documentation/devicetree/bindings/memory-controllers/intel,ixp4xx-expansion*
+F:     Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml
 F:     Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
 F:     arch/arm/boot/dts/intel/ixp/
 F:     arch/arm/mach-ixp4xx/
 F:     drivers/bus/intel-ixp4xx-eb.c
+F:     drivers/char/hw_random/ixp4xx-rng.c
 F:     drivers/clocksource/timer-ixp4xx.c
 F:     drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
 F:     drivers/gpio/gpio-ixp4xx.c
 F:     drivers/irqchip/irq-ixp4xx.c
+F:     drivers/net/ethernet/xscale/ixp4xx_eth.c
+F:     drivers/net/wan/ixp4xx_hss.c
+F:     drivers/soc/ixp4xx/ixp4xx-npe.c
+F:     drivers/soc/ixp4xx/ixp4xx-qmgr.c
+F:     include/linux/soc/ixp4xx/npe.h
+F:     include/linux/soc/ixp4xx/qmgr.h
 
 ARM/INTEL KEEMBAY ARCHITECTURE
 M:     Paul J. Murphy <paul.j.murphy@intel.com>
@@ -2326,7 +2344,7 @@ F:        drivers/rtc/rtc-mt7622.c
 
 ARM/Mediatek SoC support
 M:     Matthias Brugger <matthias.bgg@gmail.com>
-R:     AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+M:     AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
 L:     linux-kernel@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-mediatek@lists.infradead.org (moderated for non-subscribers)
@@ -8092,7 +8110,7 @@ F:        include/linux/arm_ffa.h
 
 FIRMWARE LOADER (request_firmware)
 M:     Luis Chamberlain <mcgrof@kernel.org>
-M:     Russ Weight <russell.h.weight@intel.com>
+M:     Russ Weight <russ.weight@linux.dev>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 F:     Documentation/firmware_class/
@@ -10619,22 +10637,6 @@ L:     linux-crypto@vger.kernel.org
 S:     Maintained
 F:     drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
 
-INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT
-M:     Krzysztof Halasa <khalasa@piap.pl>
-S:     Maintained
-F:     drivers/net/ethernet/xscale/ixp4xx_eth.c
-F:     drivers/net/wan/ixp4xx_hss.c
-F:     drivers/soc/ixp4xx/ixp4xx-npe.c
-F:     drivers/soc/ixp4xx/ixp4xx-qmgr.c
-F:     include/linux/soc/ixp4xx/npe.h
-F:     include/linux/soc/ixp4xx/qmgr.h
-
-INTEL IXP4XX RANDOM NUMBER GENERATOR SUPPORT
-M:     Deepak Saxena <dsaxena@plexity.net>
-S:     Maintained
-F:     Documentation/devicetree/bindings/rng/intel,ixp46x-rng.yaml
-F:     drivers/char/hw_random/ixp4xx-rng.c
-
 INTEL KEEM BAY DRM DRIVER
 M:     Anitha Chrisanthus <anitha.chrisanthus@intel.com>
 M:     Edmund Dea <edmund.j.dea@intel.com>
@@ -11060,7 +11062,7 @@ F:      Documentation/devicetree/bindings/sound/irondevice,*
 F:     sound/soc/codecs/sma*
 
 IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
-M:     Marc Zyngier <maz@kernel.org>
+M:     Thomas Gleixner <tglx@linutronix.de>
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 F:     Documentation/core-api/irq/irq-domain.rst
@@ -11079,7 +11081,6 @@ F:      lib/group_cpus.c
 
 IRQCHIP DRIVERS
 M:     Thomas Gleixner <tglx@linutronix.de>
-M:     Marc Zyngier <maz@kernel.org>
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@@ -20483,6 +20484,7 @@ F:      include/dt-bindings/clock/starfive?jh71*.h
 STARFIVE JH71X0 PINCTRL DRIVERS
 M:     Emil Renner Berthing <kernel@esmil.dk>
 M:     Jianlong Huang <jianlong.huang@starfivetech.com>
+M:     Hal Feng <hal.feng@starfivetech.com>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/pinctrl/starfive,jh71*.yaml
index 88ebf65..a3e52e1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION = -rc6
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
index 6f85a05..dcf6e48 100644 (file)
                        #size-cells = <1>;
                        ranges;
 
-                       anomix_ns_gpr: syscon@44210000 {
+                       aonmix_ns_gpr: syscon@44210000 {
                                compatible = "fsl,imx93-aonmix-ns-syscfg", "syscon";
                                reg = <0x44210000 0x1000>;
                        };
                                assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
                                assigned-clock-rates = <40000000>;
                                fsl,clk-source = /bits/ 8 <0>;
+                               fsl,stop-mode = <&aonmix_ns_gpr 0x14 0>;
                                status = "disabled";
                        };
 
                                assigned-clock-parents = <&clk IMX93_CLK_SYS_PLL_PFD1_DIV2>;
                                assigned-clock-rates = <40000000>;
                                fsl,clk-source = /bits/ 8 <0>;
+                               fsl,stop-mode = <&wakeupmix_gpr 0x0c 2>;
                                status = "disabled";
                        };
 
index 36ef2db..3ee9266 100644 (file)
                status = "disabled";
        };
 
-       sata_phy: t-phy@1a243000 {
+       sata_phy: t-phy {
                compatible = "mediatek,mt7622-tphy",
                             "mediatek,generic-tphy-v1";
                #address-cells = <2>;
index 68539ea..24eda00 100644 (file)
                        };
                };
 
-               pcie_phy: t-phy@11c00000 {
+               pcie_phy: t-phy {
                        compatible = "mediatek,mt7986-tphy",
                                     "mediatek,generic-tphy-v2";
                        #address-cells = <2>;
index b2485dd..5d63508 100644 (file)
@@ -48,7 +48,7 @@
 
        memory@40000000 {
                device_type = "memory";
-               reg = <0 0x40000000 0 0x80000000>;
+               reg = <0 0x40000000 0x2 0x00000000>;
        };
 
        reserved-memory {
                #size-cells = <2>;
                ranges;
 
-               /* 2 MiB reserved for ARM Trusted Firmware (BL31) */
-               bl31_secmon_reserved: secmon@54600000 {
-                       no-map;
-                       reg = <0 0x54600000 0x0 0x200000>;
-               };
-
-               /* 12 MiB reserved for OP-TEE (BL32)
+               /*
+                * 12 MiB reserved for OP-TEE (BL32)
                 * +-----------------------+ 0x43e0_0000
                 * |      SHMEM 2MiB       |
                 * +-----------------------+ 0x43c0_0000
                        no-map;
                        reg = <0 0x43200000 0 0x00c00000>;
                };
+
+               scp_mem: memory@50000000 {
+                       compatible = "shared-dma-pool";
+                       reg = <0 0x50000000 0 0x2900000>;
+                       no-map;
+               };
+
+               vpu_mem: memory@53000000 {
+                       compatible = "shared-dma-pool";
+                       reg = <0 0x53000000 0 0x1400000>; /* 20 MB */
+               };
+
+               /* 2 MiB reserved for ARM Trusted Firmware (BL31) */
+               bl31_secmon_mem: memory@54600000 {
+                       no-map;
+                       reg = <0 0x54600000 0x0 0x200000>;
+               };
+
+               snd_dma_mem: memory@60000000 {
+                       compatible = "shared-dma-pool";
+                       reg = <0 0x60000000 0 0x1100000>;
+                       no-map;
+               };
+
+               apu_mem: memory@62000000 {
+                       compatible = "shared-dma-pool";
+                       reg = <0 0x62000000 0 0x1400000>; /* 20 MB */
+               };
        };
 };
 
index a9e52b5..54c674c 100644 (file)
                interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH 0>;
                cpus = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>,
                       <&cpu4>, <&cpu5>, <&cpu6>, <&cpu7>;
+               status = "fail";
        };
 
        dmic_codec: dmic-codec {
index a7c3020..06c5300 100644 (file)
 
                pdc: interrupt-controller@b220000 {
                        compatible = "qcom,sm8150-pdc", "qcom,pdc";
-                       reg = <0 0x0b220000 0 0x400>;
+                       reg = <0 0x0b220000 0 0x30000>;
                        qcom,pdc-ranges = <0 480 94>, <94 609 31>,
                                          <125 63 1>;
                        #interrupt-cells = <2>;
index 5882b24..1095c66 100644 (file)
  */
 #define __HFGRTR_EL2_RES0      (GENMASK(63, 56) | GENMASK(53, 51))
 #define __HFGRTR_EL2_MASK      GENMASK(49, 0)
-#define __HFGRTR_EL2_nMASK     (GENMASK(55, 54) | BIT(50))
+#define __HFGRTR_EL2_nMASK     (GENMASK(58, 57) | GENMASK(55, 54) | BIT(50))
 
 #define __HFGWTR_EL2_RES0      (GENMASK(63, 56) | GENMASK(53, 51) |    \
                                 BIT(46) | BIT(42) | BIT(40) | BIT(28) | \
                                 GENMASK(26, 25) | BIT(21) | BIT(18) |  \
                                 GENMASK(15, 14) | GENMASK(10, 9) | BIT(2))
 #define __HFGWTR_EL2_MASK      GENMASK(49, 0)
-#define __HFGWTR_EL2_nMASK     (GENMASK(55, 54) | BIT(50))
+#define __HFGWTR_EL2_nMASK     (GENMASK(58, 57) | GENMASK(55, 54) | BIT(50))
 
 #define __HFGITR_EL2_RES0      GENMASK(63, 57)
 #define __HFGITR_EL2_MASK      GENMASK(54, 0)
index 6dcdae4..a1e2422 100644 (file)
@@ -55,11 +55,6 @@ static struct irq_ops arch_timer_irq_ops = {
        .get_input_level = kvm_arch_timer_get_input_level,
 };
 
-static bool has_cntpoff(void)
-{
-       return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
-}
-
 static int nr_timers(struct kvm_vcpu *vcpu)
 {
        if (!vcpu_has_nv(vcpu))
@@ -180,7 +175,7 @@ u64 kvm_phys_timer_read(void)
        return timecounter->cc->read(timecounter->cc);
 }
 
-static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
+void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map)
 {
        if (vcpu_has_nv(vcpu)) {
                if (is_hyp_ctxt(vcpu)) {
@@ -548,8 +543,7 @@ static void timer_save_state(struct arch_timer_context *ctx)
                timer_set_ctl(ctx, read_sysreg_el0(SYS_CNTP_CTL));
                cval = read_sysreg_el0(SYS_CNTP_CVAL);
 
-               if (!has_cntpoff())
-                       cval -= timer_get_offset(ctx);
+               cval -= timer_get_offset(ctx);
 
                timer_set_cval(ctx, cval);
 
@@ -636,8 +630,7 @@ static void timer_restore_state(struct arch_timer_context *ctx)
                cval = timer_get_cval(ctx);
                offset = timer_get_offset(ctx);
                set_cntpoff(offset);
-               if (!has_cntpoff())
-                       cval += offset;
+               cval += offset;
                write_sysreg_el0(cval, SYS_CNTP_CVAL);
                isb();
                write_sysreg_el0(timer_get_ctl(ctx), SYS_CNTP_CTL);
index 9ced1bf..ee902ff 100644 (file)
@@ -977,6 +977,8 @@ enum fg_filter_id {
 
 static const struct encoding_to_trap_config encoding_to_fgt[] __initconst = {
        /* HFGRTR_EL2, HFGWTR_EL2 */
+       SR_FGT(SYS_PIR_EL1,             HFGxTR, nPIR_EL1, 0),
+       SR_FGT(SYS_PIRE0_EL1,           HFGxTR, nPIRE0_EL1, 0),
        SR_FGT(SYS_TPIDR2_EL0,          HFGxTR, nTPIDR2_EL0, 0),
        SR_FGT(SYS_SMPRI_EL1,           HFGxTR, nSMPRI_EL1, 0),
        SR_FGT(SYS_ACCDATA_EL1,         HFGxTR, nACCDATA_EL1, 0),
index 6537f58..448b170 100644 (file)
@@ -39,6 +39,26 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
 
        ___activate_traps(vcpu);
 
+       if (has_cntpoff()) {
+               struct timer_map map;
+
+               get_timer_map(vcpu, &map);
+
+               /*
+                * We're entrering the guest. Reload the correct
+                * values from memory now that TGE is clear.
+                */
+               if (map.direct_ptimer == vcpu_ptimer(vcpu))
+                       val = __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0);
+               if (map.direct_ptimer == vcpu_hptimer(vcpu))
+                       val = __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2);
+
+               if (map.direct_ptimer) {
+                       write_sysreg_el0(val, SYS_CNTP_CVAL);
+                       isb();
+               }
+       }
+
        val = read_sysreg(cpacr_el1);
        val |= CPACR_ELx_TTA;
        val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
@@ -77,6 +97,30 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
 
        write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
 
+       if (has_cntpoff()) {
+               struct timer_map map;
+               u64 val, offset;
+
+               get_timer_map(vcpu, &map);
+
+               /*
+                * We're exiting the guest. Save the latest CVAL value
+                * to memory and apply the offset now that TGE is set.
+                */
+               val = read_sysreg_el0(SYS_CNTP_CVAL);
+               if (map.direct_ptimer == vcpu_ptimer(vcpu))
+                       __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = val;
+               if (map.direct_ptimer == vcpu_hptimer(vcpu))
+                       __vcpu_sys_reg(vcpu, CNTHP_CVAL_EL2) = val;
+
+               offset = read_sysreg_s(SYS_CNTPOFF_EL2);
+
+               if (map.direct_ptimer && offset) {
+                       write_sysreg_el0(val + offset, SYS_CNTP_CVAL);
+                       isb();
+               }
+       }
+
        /*
         * ARM errata 1165522 and 1530923 require the actual execution of the
         * above before we can switch to the EL2/EL0 translation regime used by
index 0eea225..a243934 100644 (file)
@@ -39,7 +39,7 @@ void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr)
 {
        struct kvm_pmu_events *pmu = kvm_get_pmu_events();
 
-       if (!kvm_arm_support_pmu_v3() || !pmu || !kvm_pmu_switch_needed(attr))
+       if (!kvm_arm_support_pmu_v3() || !kvm_pmu_switch_needed(attr))
                return;
 
        if (!attr->exclude_host)
@@ -55,7 +55,7 @@ void kvm_clr_pmu_events(u32 clr)
 {
        struct kvm_pmu_events *pmu = kvm_get_pmu_events();
 
-       if (!kvm_arm_support_pmu_v3() || !pmu)
+       if (!kvm_arm_support_pmu_v3())
                return;
 
        pmu->events_host &= ~clr;
index e92ec81..0afd613 100644 (file)
@@ -2122,8 +2122,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
 
        { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
-       { SYS_DESC(SYS_PIRE0_EL1), access_vm_reg, reset_unknown, PIRE0_EL1 },
-       { SYS_DESC(SYS_PIR_EL1), access_vm_reg, reset_unknown, PIR_EL1 },
+       { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 },
+       { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 },
        { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
 
        { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
index db125df..642d716 100644 (file)
@@ -15,9 +15,4 @@ DECLARE_PER_CPU(struct ia64_cpu, cpu_devices);
 
 DECLARE_PER_CPU(int, cpu_state);
 
-#ifdef CONFIG_HOTPLUG_CPU
-extern int arch_register_cpu(int num);
-extern void arch_unregister_cpu(int);
-#endif
-
 #endif /* _ASM_IA64_CPU_H_ */
index 94a848b..741863a 100644 (file)
@@ -59,7 +59,7 @@ void __ref arch_unregister_cpu(int num)
 }
 EXPORT_SYMBOL(arch_unregister_cpu);
 #else
-static int __init arch_register_cpu(int num)
+int __init arch_register_cpu(int num)
 {
        return register_cpu(&sysfs_cpus[num].cpu, num);
 }
index 0dcb36b..c486c23 100644 (file)
@@ -52,10 +52,9 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
  * @offset:    bus address of the memory
  * @size:      size of the resource to map
  */
-extern pgprot_t pgprot_wc;
-
 #define ioremap_wc(offset, size)       \
-       ioremap_prot((offset), (size), pgprot_val(pgprot_wc))
+       ioremap_prot((offset), (size),  \
+               pgprot_val(wc_enabled ? PAGE_KERNEL_WUC : PAGE_KERNEL_SUC))
 
 #define ioremap_cache(offset, size)    \
        ioremap_prot((offset), (size), pgprot_val(PAGE_KERNEL))
index 81b0c4c..e2eca1a 100644 (file)
        .cfi_endproc;                                   \
        SYM_END(name, SYM_T_FUNC)
 
+#define SYM_CODE_START(name)                           \
+       SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)      \
+       .cfi_startproc;
+
+#define SYM_CODE_END(name)                             \
+       .cfi_endproc;                                   \
+       SYM_END(name, SYM_T_NONE)
+
 #endif
index 35348d4..21319c1 100644 (file)
@@ -105,13 +105,15 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
        return __pgprot(prot);
 }
 
+extern bool wc_enabled;
+
 #define pgprot_writecombine pgprot_writecombine
 
 static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
 {
        unsigned long prot = pgprot_val(_prot);
 
-       prot = (prot & ~_CACHE_MASK) | _CACHE_WUC;
+       prot = (prot & ~_CACHE_MASK) | (wc_enabled ? _CACHE_WUC : _CACHE_SUC);
 
        return __pgprot(prot);
 }
index 65518bb..1ec8e4c 100644 (file)
@@ -18,7 +18,7 @@
        .text
        .cfi_sections   .debug_frame
        .align  5
-SYM_FUNC_START(handle_syscall)
+SYM_CODE_START(handle_syscall)
        csrrd           t0, PERCPU_BASE_KS
        la.pcrel        t1, kernelsp
        add.d           t1, t1, t0
@@ -71,7 +71,7 @@ SYM_FUNC_START(handle_syscall)
        bl              do_syscall
 
        RESTORE_ALL_AND_RET
-SYM_FUNC_END(handle_syscall)
+SYM_CODE_END(handle_syscall)
 _ASM_NOKPROBE(handle_syscall)
 
 SYM_CODE_START(ret_from_fork)
index 78f0663..2bb3aa2 100644 (file)
@@ -31,7 +31,7 @@ SYM_FUNC_START(__arch_cpu_idle)
 1:     jr      ra
 SYM_FUNC_END(__arch_cpu_idle)
 
-SYM_FUNC_START(handle_vint)
+SYM_CODE_START(handle_vint)
        BACKUP_T0T1
        SAVE_ALL
        la_abs  t1, __arch_cpu_idle
@@ -46,11 +46,11 @@ SYM_FUNC_START(handle_vint)
        la_abs  t0, do_vint
        jirl    ra, t0, 0
        RESTORE_ALL_AND_RET
-SYM_FUNC_END(handle_vint)
+SYM_CODE_END(handle_vint)
 
-SYM_FUNC_START(except_vec_cex)
+SYM_CODE_START(except_vec_cex)
        b       cache_parity_error
-SYM_FUNC_END(except_vec_cex)
+SYM_CODE_END(except_vec_cex)
 
        .macro  build_prep_badv
        csrrd   t0, LOONGARCH_CSR_BADV
@@ -66,7 +66,7 @@ SYM_FUNC_END(except_vec_cex)
 
        .macro  BUILD_HANDLER exception handler prep
        .align  5
-       SYM_FUNC_START(handle_\exception)
+       SYM_CODE_START(handle_\exception)
        666:
        BACKUP_T0T1
        SAVE_ALL
@@ -76,7 +76,7 @@ SYM_FUNC_END(except_vec_cex)
        jirl    ra, t0, 0
        668:
        RESTORE_ALL_AND_RET
-       SYM_FUNC_END(handle_\exception)
+       SYM_CODE_END(handle_\exception)
        SYM_DATA(unwind_hint_\exception, .word 668b - 666b)
        .endm
 
@@ -93,7 +93,7 @@ SYM_FUNC_END(except_vec_cex)
        BUILD_HANDLER watch watch none
        BUILD_HANDLER reserved reserved none    /* others */
 
-SYM_FUNC_START(handle_sys)
+SYM_CODE_START(handle_sys)
        la_abs  t0, handle_syscall
        jr      t0
-SYM_FUNC_END(handle_sys)
+SYM_CODE_END(handle_sys)
index 7783f0a..aed6591 100644 (file)
@@ -161,19 +161,19 @@ static void __init smbios_parse(void)
 }
 
 #ifdef CONFIG_ARCH_WRITECOMBINE
-pgprot_t pgprot_wc = PAGE_KERNEL_WUC;
+bool wc_enabled = true;
 #else
-pgprot_t pgprot_wc = PAGE_KERNEL_SUC;
+bool wc_enabled = false;
 #endif
 
-EXPORT_SYMBOL(pgprot_wc);
+EXPORT_SYMBOL(wc_enabled);
 
 static int __init setup_writecombine(char *p)
 {
        if (!strcmp(p, "on"))
-               pgprot_wc = PAGE_KERNEL_WUC;
+               wc_enabled = true;
        else if (!strcmp(p, "off"))
-               pgprot_wc = PAGE_KERNEL_SUC;
+               wc_enabled = false;
        else
                pr_warn("Unknown writecombine setting \"%s\".\n", p);
 
index f3fe8c0..4dd5342 100644 (file)
@@ -43,11 +43,11 @@ void copy_user_highpage(struct page *to, struct page *from,
 {
        void *vfrom, *vto;
 
-       vto = kmap_atomic(to);
-       vfrom = kmap_atomic(from);
+       vfrom = kmap_local_page(from);
+       vto = kmap_local_page(to);
        copy_page(vto, vfrom);
-       kunmap_atomic(vfrom);
-       kunmap_atomic(vto);
+       kunmap_local(vfrom);
+       kunmap_local(vto);
        /* Make sure this page is cleared on other CPU's too before using it */
        smp_wmb();
 }
@@ -240,6 +240,7 @@ pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
 pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
 #ifndef __PAGETABLE_PUD_FOLDED
 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+EXPORT_SYMBOL(invalid_pud_table);
 #endif
 #ifndef __PAGETABLE_PMD_FOLDED
 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
index ca17dd3..d5d682f 100644 (file)
@@ -17,7 +17,7 @@
 #define PTRS_PER_PTE_BITS      (PAGE_SHIFT - 3)
 
        .macro tlb_do_page_fault, write
-       SYM_FUNC_START(tlb_do_page_fault_\write)
+       SYM_CODE_START(tlb_do_page_fault_\write)
        SAVE_ALL
        csrrd           a2, LOONGARCH_CSR_BADV
        move            a0, sp
        li.w            a1, \write
        bl              do_page_fault
        RESTORE_ALL_AND_RET
-       SYM_FUNC_END(tlb_do_page_fault_\write)
+       SYM_CODE_END(tlb_do_page_fault_\write)
        .endm
 
        tlb_do_page_fault 0
        tlb_do_page_fault 1
 
-SYM_FUNC_START(handle_tlb_protect)
+SYM_CODE_START(handle_tlb_protect)
        BACKUP_T0T1
        SAVE_ALL
        move            a0, sp
@@ -41,9 +41,9 @@ SYM_FUNC_START(handle_tlb_protect)
        la_abs          t0, do_page_fault
        jirl            ra, t0, 0
        RESTORE_ALL_AND_RET
-SYM_FUNC_END(handle_tlb_protect)
+SYM_CODE_END(handle_tlb_protect)
 
-SYM_FUNC_START(handle_tlb_load)
+SYM_CODE_START(handle_tlb_load)
        csrwr           t0, EXCEPTION_KS0
        csrwr           t1, EXCEPTION_KS1
        csrwr           ra, EXCEPTION_KS2
@@ -187,16 +187,16 @@ nopage_tlb_load:
        csrrd           ra, EXCEPTION_KS2
        la_abs          t0, tlb_do_page_fault_0
        jr              t0
-SYM_FUNC_END(handle_tlb_load)
+SYM_CODE_END(handle_tlb_load)
 
-SYM_FUNC_START(handle_tlb_load_ptw)
+SYM_CODE_START(handle_tlb_load_ptw)
        csrwr           t0, LOONGARCH_CSR_KS0
        csrwr           t1, LOONGARCH_CSR_KS1
        la_abs          t0, tlb_do_page_fault_0
        jr              t0
-SYM_FUNC_END(handle_tlb_load_ptw)
+SYM_CODE_END(handle_tlb_load_ptw)
 
-SYM_FUNC_START(handle_tlb_store)
+SYM_CODE_START(handle_tlb_store)
        csrwr           t0, EXCEPTION_KS0
        csrwr           t1, EXCEPTION_KS1
        csrwr           ra, EXCEPTION_KS2
@@ -343,16 +343,16 @@ nopage_tlb_store:
        csrrd           ra, EXCEPTION_KS2
        la_abs          t0, tlb_do_page_fault_1
        jr              t0
-SYM_FUNC_END(handle_tlb_store)
+SYM_CODE_END(handle_tlb_store)
 
-SYM_FUNC_START(handle_tlb_store_ptw)
+SYM_CODE_START(handle_tlb_store_ptw)
        csrwr           t0, LOONGARCH_CSR_KS0
        csrwr           t1, LOONGARCH_CSR_KS1
        la_abs          t0, tlb_do_page_fault_1
        jr              t0
-SYM_FUNC_END(handle_tlb_store_ptw)
+SYM_CODE_END(handle_tlb_store_ptw)
 
-SYM_FUNC_START(handle_tlb_modify)
+SYM_CODE_START(handle_tlb_modify)
        csrwr           t0, EXCEPTION_KS0
        csrwr           t1, EXCEPTION_KS1
        csrwr           ra, EXCEPTION_KS2
@@ -497,16 +497,16 @@ nopage_tlb_modify:
        csrrd           ra, EXCEPTION_KS2
        la_abs          t0, tlb_do_page_fault_1
        jr              t0
-SYM_FUNC_END(handle_tlb_modify)
+SYM_CODE_END(handle_tlb_modify)
 
-SYM_FUNC_START(handle_tlb_modify_ptw)
+SYM_CODE_START(handle_tlb_modify_ptw)
        csrwr           t0, LOONGARCH_CSR_KS0
        csrwr           t1, LOONGARCH_CSR_KS1
        la_abs          t0, tlb_do_page_fault_1
        jr              t0
-SYM_FUNC_END(handle_tlb_modify_ptw)
+SYM_CODE_END(handle_tlb_modify_ptw)
 
-SYM_FUNC_START(handle_tlb_refill)
+SYM_CODE_START(handle_tlb_refill)
        csrwr           t0, LOONGARCH_CSR_TLBRSAVE
        csrrd           t0, LOONGARCH_CSR_PGD
        lddir           t0, t0, 3
@@ -521,4 +521,4 @@ SYM_FUNC_START(handle_tlb_refill)
        tlbfill
        csrrd           t0, LOONGARCH_CSR_TLBRSAVE
        ertn
-SYM_FUNC_END(handle_tlb_refill)
+SYM_CODE_END(handle_tlb_refill)
index 7b2ac13..467ee6b 100644 (file)
@@ -592,7 +592,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
        gfn_t gfn = gpa >> PAGE_SHIFT;
        int srcu_idx, err;
        kvm_pfn_t pfn;
-       pte_t *ptep, entry, old_pte;
+       pte_t *ptep, entry;
        bool writeable;
        unsigned long prot_bits;
        unsigned long mmu_seq;
@@ -664,7 +664,6 @@ retry:
        entry = pfn_pte(pfn, __pgprot(prot_bits));
 
        /* Write the PTE */
-       old_pte = *ptep;
        set_pte(ptep, entry);
 
        err = 0;
index 21f681e..e6fe1d5 100644 (file)
@@ -94,6 +94,13 @@ static inline pte_t pte_wrprotect(pte_t pte)
 
 #define pte_wrprotect pte_wrprotect
 
+static inline int pte_read(pte_t pte)
+{
+       return (pte_val(pte) & _PAGE_RO) != _PAGE_NA;
+}
+
+#define pte_read pte_read
+
 static inline int pte_write(pte_t pte)
 {
        return !(pte_val(pte) & _PAGE_RO);
index 5cd9acf..eb6891e 100644 (file)
@@ -197,7 +197,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
 {
        unsigned long old;
 
-       if (pte_young(*ptep))
+       if (!pte_young(*ptep))
                return 0;
        old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
        return (old & _PAGE_ACCESSED) != 0;
index 56ea482..c721478 100644 (file)
@@ -25,7 +25,9 @@ static inline int pte_write(pte_t pte)
        return pte_val(pte) & _PAGE_RW;
 }
 #endif
+#ifndef pte_read
 static inline int pte_read(pte_t pte)          { return 1; }
+#endif
 static inline int pte_dirty(pte_t pte)         { return pte_val(pte) & _PAGE_DIRTY; }
 static inline int pte_special(pte_t pte)       { return pte_val(pte) & _PAGE_SPECIAL; }
 static inline int pte_none(pte_t pte)          { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
index 9692acb..7eda33a 100644 (file)
@@ -137,8 +137,9 @@ ret_from_syscall:
        lis     r4,icache_44x_need_flush@ha
        lwz     r5,icache_44x_need_flush@l(r4)
        cmplwi  cr0,r5,0
-       bne-    2f
+       bne-    .L44x_icache_flush
 #endif /* CONFIG_PPC_47x */
+.L44x_icache_flush_return:
        kuep_unlock
        lwz     r4,_LINK(r1)
        lwz     r5,_CCR(r1)
@@ -172,10 +173,11 @@ syscall_exit_finish:
        b       1b
 
 #ifdef CONFIG_44x
-2:     li      r7,0
+.L44x_icache_flush:
+       li      r7,0
        iccci   r0,r0
        stw     r7,icache_44x_need_flush@l(r4)
-       b       1b
+       b       .L44x_icache_flush_return
 #endif  /* CONFIG_44x */
 
        .globl  ret_from_fork
index 97e9ea0..0f1641a 100644 (file)
@@ -395,7 +395,7 @@ interrupt_base:
 #ifdef CONFIG_PPC_FPU
        FP_UNAVAILABLE_EXCEPTION
 #else
-       EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, unknown_exception)
+       EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, emulation_assist_interrupt)
 #endif
 
        /* System Call Interrupt */
index bae45b3..2b0cac6 100644 (file)
@@ -184,9 +184,6 @@ _GLOBAL_TOC(plpar_hcall)
 plpar_hcall_trace:
        HCALL_INST_PRECALL(R5)
 
-       std     r4,STK_PARAM(R4)(r1)
-       mr      r0,r4
-
        mr      r4,r5
        mr      r5,r6
        mr      r6,r7
@@ -196,7 +193,7 @@ plpar_hcall_trace:
 
        HVSC
 
-       ld      r12,STK_PARAM(R4)(r1)
+       ld      r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1)
        std     r4,0(r12)
        std     r5,8(r12)
        std     r6,16(r12)
@@ -296,9 +293,6 @@ _GLOBAL_TOC(plpar_hcall9)
 plpar_hcall9_trace:
        HCALL_INST_PRECALL(R5)
 
-       std     r4,STK_PARAM(R4)(r1)
-       mr      r0,r4
-
        mr      r4,r5
        mr      r5,r6
        mr      r6,r7
index 1329e06..b43a6bb 100644 (file)
@@ -6,7 +6,6 @@
 # for more details.
 #
 
-OBJCOPYFLAGS    := -O binary
 LDFLAGS_vmlinux := -z norelro
 ifeq ($(CONFIG_RELOCATABLE),y)
        LDFLAGS_vmlinux += -shared -Bsymbolic -z notext --emit-relocs
index 2d644e1..6278c38 100644 (file)
@@ -1 +1,5 @@
+ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
+CFLAGS_errata.o := -mcmodel=medany
+endif
+
 obj-y += errata.o
index 740a979..2b2f5df 100644 (file)
@@ -31,6 +31,27 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
        return addr;
 }
 
+/*
+ * Let's do like x86/arm64 and ignore the compat syscalls.
+ */
+#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
+static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
+{
+       return is_compat_task();
+}
+
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+static inline bool arch_syscall_match_sym_name(const char *sym,
+                                              const char *name)
+{
+       /*
+        * Since all syscall functions have __riscv_ prefix, we must skip it.
+        * However, as we described above, we decided to ignore compat
+        * syscalls, so we don't care about __riscv_compat_ prefix here.
+        */
+       return !strcmp(sym + 8, name);
+}
+
 struct dyn_arch_ftrace {
 };
 #endif
index e7882cc..78ea44f 100644 (file)
@@ -40,6 +40,15 @@ void arch_remove_kprobe(struct kprobe *p);
 int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
 bool kprobe_breakpoint_handler(struct pt_regs *regs);
 bool kprobe_single_step_handler(struct pt_regs *regs);
-
+#else
+static inline bool kprobe_breakpoint_handler(struct pt_regs *regs)
+{
+       return false;
+}
+
+static inline bool kprobe_single_step_handler(struct pt_regs *regs)
+{
+       return false;
+}
 #endif /* CONFIG_KPROBES */
 #endif /* _ASM_RISCV_KPROBES_H */
index f2183e0..3fc7ded 100644 (file)
@@ -34,7 +34,18 @@ struct arch_uprobe {
        bool simulate;
 };
 
+#ifdef CONFIG_UPROBES
 bool uprobe_breakpoint_handler(struct pt_regs *regs);
 bool uprobe_single_step_handler(struct pt_regs *regs);
-
+#else
+static inline bool uprobe_breakpoint_handler(struct pt_regs *regs)
+{
+       return false;
+}
+
+static inline bool uprobe_single_step_handler(struct pt_regs *regs)
+{
+       return false;
+}
+#endif /* CONFIG_UPROBES */
 #endif /* _ASM_RISCV_UPROBES_H */
index a8efa05..9cc0a76 100644 (file)
@@ -60,7 +60,7 @@ static void init_irq_stacks(void)
 }
 #endif /* CONFIG_VMAP_STACK */
 
-#ifdef CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
 void do_softirq_own_stack(void)
 {
 #ifdef CONFIG_IRQ_STACKS
@@ -92,7 +92,7 @@ void do_softirq_own_stack(void)
 #endif
                __do_softirq();
 }
-#endif /* CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK */
+#endif /* CONFIG_SOFTIRQ_ON_OWN_STACK */
 
 #else
 static void init_irq_stacks(void) {}
index e600aab..aac853a 100644 (file)
@@ -173,19 +173,6 @@ static void __init init_resources(void)
        if (ret < 0)
                goto error;
 
-#ifdef CONFIG_KEXEC_CORE
-       if (crashk_res.start != crashk_res.end) {
-               ret = add_resource(&iomem_resource, &crashk_res);
-               if (ret < 0)
-                       goto error;
-       }
-       if (crashk_low_res.start != crashk_low_res.end) {
-               ret = add_resource(&iomem_resource, &crashk_low_res);
-               if (ret < 0)
-                       goto error;
-       }
-#endif
-
 #ifdef CONFIG_CRASH_DUMP
        if (elfcorehdr_size > 0) {
                elfcorehdr_res.start = elfcorehdr_addr;
index 180d951..21a4d0e 100644 (file)
@@ -311,13 +311,6 @@ static inline void __user *get_sigframe(struct ksignal *ksig,
        /* Align the stack frame. */
        sp &= ~0xfUL;
 
-       /*
-        * Fail if the size of the altstack is not large enough for the
-        * sigframe construction.
-        */
-       if (current->sas_ss_size && sp < current->sas_ss_sp)
-               return (void __user __force *)-1UL;
-
        return (void __user *)sp;
 }
 
index 19807c4..fae8f61 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/kdebug.h>
 #include <linux/uaccess.h>
 #include <linux/kprobes.h>
+#include <linux/uprobes.h>
+#include <asm/uprobes.h>
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/irq.h>
@@ -247,22 +249,28 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
        return GET_INSN_LENGTH(insn);
 }
 
+static bool probe_single_step_handler(struct pt_regs *regs)
+{
+       bool user = user_mode(regs);
+
+       return user ? uprobe_single_step_handler(regs) : kprobe_single_step_handler(regs);
+}
+
+static bool probe_breakpoint_handler(struct pt_regs *regs)
+{
+       bool user = user_mode(regs);
+
+       return user ? uprobe_breakpoint_handler(regs) : kprobe_breakpoint_handler(regs);
+}
+
 void handle_break(struct pt_regs *regs)
 {
-#ifdef CONFIG_KPROBES
-       if (kprobe_single_step_handler(regs))
+       if (probe_single_step_handler(regs))
                return;
 
-       if (kprobe_breakpoint_handler(regs))
-               return;
-#endif
-#ifdef CONFIG_UPROBES
-       if (uprobe_single_step_handler(regs))
+       if (probe_breakpoint_handler(regs))
                return;
 
-       if (uprobe_breakpoint_handler(regs))
-               return;
-#endif
        current->thread.bad_cause = regs->cause;
 
        if (user_mode(regs))
index ecd3ae6..8581693 100644 (file)
@@ -245,7 +245,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
        emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
        /* Set return value. */
        if (!is_tail_call)
-               emit_mv(RV_REG_A0, RV_REG_A5, ctx);
+               emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
        emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
                  is_tail_call ? (RV_FENTRY_NINSNS + 1) * 4 : 0, /* skip reserved nops and TCC init */
                  ctx);
@@ -759,8 +759,10 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
        if (ret)
                return ret;
 
-       if (save_ret)
-               emit_sd(RV_REG_FP, -retval_off, regmap[BPF_REG_0], ctx);
+       if (save_ret) {
+               emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
+               emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
+       }
 
        /* update branch with beqz */
        if (ctx->insns) {
@@ -853,7 +855,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
 
        save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
        if (save_ret) {
-               stack_size += 8;
+               stack_size += 16; /* Save both A5 (BPF R0) and A0 */
                retval_off = stack_size;
        }
 
@@ -957,6 +959,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
                if (ret)
                        goto out;
                emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
+               emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx);
                im->ip_after_call = ctx->insns + ctx->ninsns;
                /* 2 nops reserved for auipc+jalr pair */
                emit(rv_nop(), ctx);
@@ -988,8 +991,10 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
        if (flags & BPF_TRAMP_F_RESTORE_REGS)
                restore_args(nregs, args_off, ctx);
 
-       if (save_ret)
+       if (save_ret) {
                emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
+               emit_ld(regmap[BPF_REG_0], -(retval_off - 8), RV_REG_FP, ctx);
+       }
 
        emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
 
@@ -1515,7 +1520,8 @@ out_be:
                if (ret)
                        return ret;
 
-               emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
+               if (insn->src_reg != BPF_PSEUDO_CALL)
+                       emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
                break;
        }
        /* tail call */
index c1b47d6..efaebba 100644 (file)
@@ -303,11 +303,6 @@ static inline u8 gisa_get_ipm_or_restore_iam(struct kvm_s390_gisa_interrupt *gi)
        return 0;
 }
 
-static inline int gisa_in_alert_list(struct kvm_s390_gisa *gisa)
-{
-       return READ_ONCE(gisa->next_alert) != (u32)virt_to_phys(gisa);
-}
-
 static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
 {
        set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
@@ -3216,11 +3211,12 @@ void kvm_s390_gisa_destroy(struct kvm *kvm)
 
        if (!gi->origin)
                return;
-       if (gi->alert.mask)
-               KVM_EVENT(3, "vm 0x%pK has unexpected iam 0x%02x",
-                         kvm, gi->alert.mask);
-       while (gisa_in_alert_list(gi->origin))
-               cpu_relax();
+       WARN(gi->alert.mask != 0x00,
+            "unexpected non zero alert.mask 0x%02x",
+            gi->alert.mask);
+       gi->alert.mask = 0x00;
+       if (gisa_set_iam(gi->origin, gi->alert.mask))
+               process_gib_alert_list();
        hrtimer_cancel(&gi->timer);
        gi->origin = NULL;
        VM_EVENT(kvm, 3, "gisa 0x%pK destroyed", gisa);
index 2861e33..e507692 100644 (file)
@@ -2066,6 +2066,7 @@ struct bpf_tramp_jit {
                                 * func_addr's original caller
                                 */
        int stack_size;         /* Trampoline stack size */
+       int backchain_off;      /* Offset of backchain */
        int stack_args_off;     /* Offset of stack arguments for calling
                                 * func_addr, has to be at the top
                                 */
@@ -2086,9 +2087,10 @@ struct bpf_tramp_jit {
                                 * for __bpf_prog_enter() return value and
                                 * func_addr respectively
                                 */
-       int r14_off;            /* Offset of saved %r14 */
        int run_ctx_off;        /* Offset of struct bpf_tramp_run_ctx */
        int tccnt_off;          /* Offset of saved tailcall counter */
+       int r14_off;            /* Offset of saved %r14, has to be at the
+                                * bottom */
        int do_fexit;           /* do_fexit: label */
 };
 
@@ -2247,8 +2249,12 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
         * Calculate the stack layout.
         */
 
-       /* Reserve STACK_FRAME_OVERHEAD bytes for the callees. */
+       /*
+        * Allocate STACK_FRAME_OVERHEAD bytes for the callees. As the s390x
+        * ABI requires, put our backchain at the end of the allocated memory.
+        */
        tjit->stack_size = STACK_FRAME_OVERHEAD;
+       tjit->backchain_off = tjit->stack_size - sizeof(u64);
        tjit->stack_args_off = alloc_stack(tjit, nr_stack_args * sizeof(u64));
        tjit->reg_args_off = alloc_stack(tjit, nr_reg_args * sizeof(u64));
        tjit->ip_off = alloc_stack(tjit, sizeof(u64));
@@ -2256,16 +2262,25 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
        tjit->bpf_args_off = alloc_stack(tjit, nr_bpf_args * sizeof(u64));
        tjit->retval_off = alloc_stack(tjit, sizeof(u64));
        tjit->r7_r8_off = alloc_stack(tjit, 2 * sizeof(u64));
-       tjit->r14_off = alloc_stack(tjit, sizeof(u64));
        tjit->run_ctx_off = alloc_stack(tjit,
                                        sizeof(struct bpf_tramp_run_ctx));
        tjit->tccnt_off = alloc_stack(tjit, sizeof(u64));
-       /* The caller has already reserved STACK_FRAME_OVERHEAD bytes. */
-       tjit->stack_size -= STACK_FRAME_OVERHEAD;
+       tjit->r14_off = alloc_stack(tjit, sizeof(u64) * 2);
+       /*
+        * In accordance with the s390x ABI, the caller has allocated
+        * STACK_FRAME_OVERHEAD bytes for us. 8 of them contain the caller's
+        * backchain, and the rest we can use.
+        */
+       tjit->stack_size -= STACK_FRAME_OVERHEAD - sizeof(u64);
        tjit->orig_stack_args_off = tjit->stack_size + STACK_FRAME_OVERHEAD;
 
+       /* lgr %r1,%r15 */
+       EMIT4(0xb9040000, REG_1, REG_15);
        /* aghi %r15,-stack_size */
        EMIT4_IMM(0xa70b0000, REG_15, -tjit->stack_size);
+       /* stg %r1,backchain_off(%r15) */
+       EMIT6_DISP_LH(0xe3000000, 0x0024, REG_1, REG_0, REG_15,
+                     tjit->backchain_off);
        /* mvc tccnt_off(4,%r15),stack_size+STK_OFF_TCCNT(%r15) */
        _EMIT6(0xd203f000 | tjit->tccnt_off,
               0xf000 | (tjit->stack_size + STK_OFF_TCCNT));
index dc8c876..80d76ae 100644 (file)
@@ -103,6 +103,16 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
        return ES_OK;
 }
 
+static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+{
+       return ES_OK;
+}
+
+static bool fault_in_kernel_space(unsigned long address)
+{
+       return false;
+}
+
 #undef __init
 #define __init
 
index 76b1f8b..dab4ed1 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <asm/insn.h>
+#include <linux/mm.h>
 
 #include "perf_event.h"
 
@@ -132,9 +133,9 @@ static int get_branch_type(unsigned long from, unsigned long to, int abort,
                 * The LBR logs any address in the IP, even if the IP just
                 * faulted. This means userspace can control the from address.
                 * Ensure we don't blindly read any address by validating it is
-                * a known text address.
+                * a known text address and not a vsyscall address.
                 */
-               if (kernel_text_address(from)) {
+               if (kernel_text_address(from) && !in_gate_area_no_mm(from)) {
                        addr = (void *)from;
                        /*
                         * Assume we can get the maximum possible size
index 783ed33..21556ad 100644 (file)
@@ -7,6 +7,8 @@
  * Author : K. Y. Srinivasan <kys@microsoft.com>
  */
 
+#define pr_fmt(fmt)  "Hyper-V: " fmt
+
 #include <linux/efi.h>
 #include <linux/types.h>
 #include <linux/bitfield.h>
@@ -191,7 +193,7 @@ void set_hv_tscchange_cb(void (*cb)(void))
        struct hv_tsc_emulation_control emu_ctrl = {.enabled = 1};
 
        if (!hv_reenlightenment_available()) {
-               pr_warn("Hyper-V: reenlightenment support is unavailable\n");
+               pr_warn("reenlightenment support is unavailable\n");
                return;
        }
 
@@ -394,6 +396,7 @@ static void __init hv_get_partition_id(void)
        local_irq_restore(flags);
 }
 
+#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE)
 static u8 __init get_vtl(void)
 {
        u64 control = HV_HYPERCALL_REP_COMP_1 | HVCALL_GET_VP_REGISTERS;
@@ -416,13 +419,16 @@ static u8 __init get_vtl(void)
        if (hv_result_success(ret)) {
                ret = output->as64.low & HV_X64_VTL_MASK;
        } else {
-               pr_err("Failed to get VTL(%lld) and set VTL to zero by default.\n", ret);
-               ret = 0;
+               pr_err("Failed to get VTL(error: %lld) exiting...\n", ret);
+               BUG();
        }
 
        local_irq_restore(flags);
        return ret;
 }
+#else
+static inline u8 get_vtl(void) { return 0; }
+#endif
 
 /*
  * This function is to be invoked early in the boot sequence after the
@@ -564,7 +570,7 @@ skip_hypercall_pg_init:
        if (cpu_feature_enabled(X86_FEATURE_IBT) &&
            *(u32 *)hv_hypercall_pg != gen_endbr()) {
                setup_clear_cpu_cap(X86_FEATURE_IBT);
-               pr_warn("Hyper-V: Disabling IBT because of Hyper-V bug\n");
+               pr_warn("Disabling IBT because of Hyper-V bug\n");
        }
 #endif
 
@@ -604,8 +610,10 @@ skip_hypercall_pg_init:
        hv_query_ext_cap(0);
 
        /* Find the VTL */
-       if (!ms_hyperv.paravisor_present && hv_isolation_type_snp())
-               ms_hyperv.vtl = get_vtl();
+       ms_hyperv.vtl = get_vtl();
+
+       if (ms_hyperv.vtl > 0) /* non default VTL */
+               hv_vtl_early_init();
 
        return;
 
index 36a5622..999f5ac 100644 (file)
@@ -215,7 +215,7 @@ static int hv_vtl_wakeup_secondary_cpu(int apicid, unsigned long start_eip)
        return hv_vtl_bringup_vcpu(vp_id, start_eip);
 }
 
-static int __init hv_vtl_early_init(void)
+int __init hv_vtl_early_init(void)
 {
        /*
         * `boot_cpu_has` returns the runtime feature support,
@@ -230,4 +230,3 @@ static int __init hv_vtl_early_init(void)
 
        return 0;
 }
-early_initcall(hv_vtl_early_init);
index 3a233eb..25050d9 100644 (file)
@@ -28,8 +28,6 @@ struct x86_cpu {
 };
 
 #ifdef CONFIG_HOTPLUG_CPU
-extern int arch_register_cpu(int num);
-extern void arch_unregister_cpu(int);
 extern void soft_restart_cpu(void);
 #endif
 
index 31089b8..a2be3ae 100644 (file)
@@ -157,7 +157,8 @@ static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) {
 static inline void fpu_sync_guest_vmexit_xfd_state(void) { }
 #endif
 
-extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru);
+extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
+                                          unsigned int size, u64 xfeatures, u32 pkru);
 extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru);
 
 static inline void fpstate_set_confidential(struct fpu_guest *gfpu)
index 17715cb..70d1394 100644 (file)
@@ -528,7 +528,6 @@ struct kvm_pmu {
        u64 raw_event_mask;
        struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC];
        struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED];
-       struct irq_work irq_work;
 
        /*
         * Overlay the bitmap with a 64-bit atomic so that all bits can be
index 033b53f..896445e 100644 (file)
@@ -340,8 +340,10 @@ static inline u64 hv_get_non_nested_register(unsigned int reg) { return 0; }
 
 #ifdef CONFIG_HYPERV_VTL_MODE
 void __init hv_vtl_init_platform(void);
+int __init hv_vtl_early_init(void);
 #else
 static inline void __init hv_vtl_init_platform(void) {}
+static inline int __init hv_vtl_early_init(void) { return 0; }
 #endif
 
 #include <asm-generic/mshyperv.h>
index 1d11135..b37abb5 100644 (file)
 /* AMD Last Branch Record MSRs */
 #define MSR_AMD64_LBR_SELECT                   0xc000010e
 
-/* Fam 17h MSRs */
-#define MSR_F17H_IRPERF                        0xc00000e9
+/* Zen4 */
+#define MSR_ZEN4_BP_CFG                        0xc001102e
+#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
 
+/* Zen 2 */
 #define MSR_ZEN2_SPECTRAL_CHICKEN      0xc00110e3
 #define MSR_ZEN2_SPECTRAL_CHICKEN_BIT  BIT_ULL(1)
 
+/* Fam 17h MSRs */
+#define MSR_F17H_IRPERF                        0xc00000e9
+
 /* Fam 16h MSRs */
 #define MSR_F16H_L2I_PERF_CTL          0xc0010230
 #define MSR_F16H_L2I_PERF_CTR          0xc0010231
index ad98dd1..c31c633 100644 (file)
@@ -129,7 +129,6 @@ void native_smp_send_reschedule(int cpu);
 void native_send_call_func_ipi(const struct cpumask *mask);
 void native_send_call_func_single_ipi(int cpu);
 
-bool smp_park_other_cpus_in_init(void);
 void smp_store_cpu_info(int id);
 
 asmlinkage __visible void smp_reboot_interrupt(void);
index 19bf955..3ac0ffc 100644 (file)
@@ -268,6 +268,7 @@ enum avic_ipi_failure_cause {
        AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
        AVIC_IPI_FAILURE_INVALID_TARGET,
        AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
+       AVIC_IPI_FAILURE_INVALID_IPI_VECTOR,
 };
 
 #define AVIC_PHYSICAL_MAX_INDEX_MASK   GENMASK_ULL(8, 0)
index 517ee01..73be393 100644 (file)
@@ -403,6 +403,17 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
        u8 insn_buff[MAX_PATCH_LEN];
 
        DPRINTK(ALT, "alt table %px, -> %px", start, end);
+
+       /*
+        * In the case CONFIG_X86_5LEVEL=y, KASAN_SHADOW_START is defined using
+        * cpu_feature_enabled(X86_FEATURE_LA57) and is therefore patched here.
+        * During the process, KASAN becomes confused seeing partial LA57
+        * conversion and triggers a false-positive out-of-bound report.
+        *
+        * Disable KASAN until the patching is complete.
+        */
+       kasan_disable_current();
+
        /*
         * The scan order should be from start to end. A later scanned
         * alternative code can overwrite previously scanned alternative code.
@@ -452,6 +463,8 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
 
                text_poke_early(instr, insn_buff, insn_buff_sz);
        }
+
+       kasan_enable_current();
 }
 
 static inline bool is_jcc32(struct insn *insn)
index 03ef962..ece2b5b 100644 (file)
@@ -80,6 +80,10 @@ static const int amd_div0[] =
        AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf),
                           AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf));
 
+static const int amd_erratum_1485[] =
+       AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x19, 0x10, 0x0, 0x1f, 0xf),
+                          AMD_MODEL_RANGE(0x19, 0x60, 0x0, 0xaf, 0xf));
+
 static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
 {
        int osvw_id = *erratum++;
@@ -1149,6 +1153,10 @@ static void init_amd(struct cpuinfo_x86 *c)
                pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n");
                setup_force_cpu_bug(X86_BUG_DIV0);
        }
+
+       if (!cpu_has(c, X86_FEATURE_HYPERVISOR) &&
+            cpu_has_amd_erratum(c, amd_erratum_1485))
+               msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
 }
 
 #ifdef CONFIG_X86_32
index ded1fc7..f136ac0 100644 (file)
@@ -30,15 +30,15 @@ struct rmid_entry {
        struct list_head                list;
 };
 
-/**
- * @rmid_free_lru    A least recently used list of free RMIDs
+/*
+ * @rmid_free_lru - A least recently used list of free RMIDs
  *     These RMIDs are guaranteed to have an occupancy less than the
  *     threshold occupancy
  */
 static LIST_HEAD(rmid_free_lru);
 
-/**
- * @rmid_limbo_count     count of currently unused but (potentially)
+/*
+ * @rmid_limbo_count - count of currently unused but (potentially)
  *     dirty RMIDs.
  *     This counts RMIDs that no one is currently using but that
  *     may have a occupancy value > resctrl_rmid_realloc_threshold. User can
@@ -46,7 +46,7 @@ static LIST_HEAD(rmid_free_lru);
  */
 static unsigned int rmid_limbo_count;
 
-/**
+/*
  * @rmid_entry - The entry in the limbo and free lists.
  */
 static struct rmid_entry       *rmid_ptrs;
index a86d370..a21a4d0 100644 (file)
@@ -369,14 +369,15 @@ int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
 EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
 
 void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
-                                   unsigned int size, u32 pkru)
+                                   unsigned int size, u64 xfeatures, u32 pkru)
 {
        struct fpstate *kstate = gfpu->fpstate;
        union fpregs_state *ustate = buf;
        struct membuf mb = { .p = buf, .left = size };
 
        if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
-               __copy_xstate_to_uabi_buf(mb, kstate, pkru, XSTATE_COPY_XSAVE);
+               __copy_xstate_to_uabi_buf(mb, kstate, xfeatures, pkru,
+                                         XSTATE_COPY_XSAVE);
        } else {
                memcpy(&ustate->fxsave, &kstate->regs.fxsave,
                       sizeof(ustate->fxsave));
index cadf687..ef69061 100644 (file)
@@ -1049,6 +1049,7 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
  * __copy_xstate_to_uabi_buf - Copy kernel saved xstate to a UABI buffer
  * @to:                membuf descriptor
  * @fpstate:   The fpstate buffer from which to copy
+ * @xfeatures: The mask of xfeatures to save (XSAVE mode only)
  * @pkru_val:  The PKRU value to store in the PKRU component
  * @copy_mode: The requested copy mode
  *
@@ -1059,7 +1060,8 @@ static void copy_feature(bool from_xstate, struct membuf *to, void *xstate,
  * It supports partial copy but @to.pos always starts from zero.
  */
 void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
-                              u32 pkru_val, enum xstate_copy_mode copy_mode)
+                              u64 xfeatures, u32 pkru_val,
+                              enum xstate_copy_mode copy_mode)
 {
        const unsigned int off_mxcsr = offsetof(struct fxregs_state, mxcsr);
        struct xregs_state *xinit = &init_fpstate.regs.xsave;
@@ -1083,7 +1085,7 @@ void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
                break;
 
        case XSTATE_COPY_XSAVE:
-               header.xfeatures &= fpstate->user_xfeatures;
+               header.xfeatures &= fpstate->user_xfeatures & xfeatures;
                break;
        }
 
@@ -1185,6 +1187,7 @@ void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
                             enum xstate_copy_mode copy_mode)
 {
        __copy_xstate_to_uabi_buf(to, tsk->thread.fpu.fpstate,
+                                 tsk->thread.fpu.fpstate->user_xfeatures,
                                  tsk->thread.pkru, copy_mode);
 }
 
@@ -1536,10 +1539,7 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
                fpregs_restore_userregs();
 
        newfps->xfeatures = curfps->xfeatures | xfeatures;
-
-       if (!guest_fpu)
-               newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
-
+       newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
        newfps->xfd = curfps->xfd & ~xfeatures;
 
        /* Do the final updates within the locked region */
index a4ecb04..3518fb2 100644 (file)
@@ -43,7 +43,8 @@ enum xstate_copy_mode {
 
 struct membuf;
 extern void __copy_xstate_to_uabi_buf(struct membuf to, struct fpstate *fpstate,
-                                     u32 pkru_val, enum xstate_copy_mode copy_mode);
+                                     u64 xfeatures, u32 pkru_val,
+                                     enum xstate_copy_mode copy_mode);
 extern void copy_xstate_to_uabi_buf(struct membuf to, struct task_struct *tsk,
                                    enum xstate_copy_mode mode);
 extern int copy_uabi_from_kernel_to_xstate(struct fpstate *fpstate, const void *kbuf, u32 *pkru);
index dcf325b..ccb0915 100644 (file)
@@ -632,6 +632,23 @@ fail:
        sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
 }
 
+static enum es_result vc_insn_string_check(struct es_em_ctxt *ctxt,
+                                          unsigned long address,
+                                          bool write)
+{
+       if (user_mode(ctxt->regs) && fault_in_kernel_space(address)) {
+               ctxt->fi.vector     = X86_TRAP_PF;
+               ctxt->fi.error_code = X86_PF_USER;
+               ctxt->fi.cr2        = address;
+               if (write)
+                       ctxt->fi.error_code |= X86_PF_WRITE;
+
+               return ES_EXCEPTION;
+       }
+
+       return ES_OK;
+}
+
 static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
                                          void *src, char *buf,
                                          unsigned int data_size,
@@ -639,7 +656,12 @@ static enum es_result vc_insn_string_read(struct es_em_ctxt *ctxt,
                                          bool backwards)
 {
        int i, b = backwards ? -1 : 1;
-       enum es_result ret = ES_OK;
+       unsigned long address = (unsigned long)src;
+       enum es_result ret;
+
+       ret = vc_insn_string_check(ctxt, address, false);
+       if (ret != ES_OK)
+               return ret;
 
        for (i = 0; i < count; i++) {
                void *s = src + (i * data_size * b);
@@ -660,7 +682,12 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
                                           bool backwards)
 {
        int i, s = backwards ? -1 : 1;
-       enum es_result ret = ES_OK;
+       unsigned long address = (unsigned long)dst;
+       enum es_result ret;
+
+       ret = vc_insn_string_check(ctxt, address, true);
+       if (ret != ES_OK)
+               return ret;
 
        for (i = 0; i < count; i++) {
                void *d = dst + (i * data_size * s);
@@ -696,6 +723,9 @@ static enum es_result vc_insn_string_write(struct es_em_ctxt *ctxt,
 static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
 {
        struct insn *insn = &ctxt->insn;
+       size_t size;
+       u64 port;
+
        *exitinfo = 0;
 
        switch (insn->opcode.bytes[0]) {
@@ -704,7 +734,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
        case 0x6d:
                *exitinfo |= IOIO_TYPE_INS;
                *exitinfo |= IOIO_SEG_ES;
-               *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+               port       = ctxt->regs->dx & 0xffff;
                break;
 
        /* OUTS opcodes */
@@ -712,41 +742,43 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
        case 0x6f:
                *exitinfo |= IOIO_TYPE_OUTS;
                *exitinfo |= IOIO_SEG_DS;
-               *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+               port       = ctxt->regs->dx & 0xffff;
                break;
 
        /* IN immediate opcodes */
        case 0xe4:
        case 0xe5:
                *exitinfo |= IOIO_TYPE_IN;
-               *exitinfo |= (u8)insn->immediate.value << 16;
+               port       = (u8)insn->immediate.value & 0xffff;
                break;
 
        /* OUT immediate opcodes */
        case 0xe6:
        case 0xe7:
                *exitinfo |= IOIO_TYPE_OUT;
-               *exitinfo |= (u8)insn->immediate.value << 16;
+               port       = (u8)insn->immediate.value & 0xffff;
                break;
 
        /* IN register opcodes */
        case 0xec:
        case 0xed:
                *exitinfo |= IOIO_TYPE_IN;
-               *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+               port       = ctxt->regs->dx & 0xffff;
                break;
 
        /* OUT register opcodes */
        case 0xee:
        case 0xef:
                *exitinfo |= IOIO_TYPE_OUT;
-               *exitinfo |= (ctxt->regs->dx & 0xffff) << 16;
+               port       = ctxt->regs->dx & 0xffff;
                break;
 
        default:
                return ES_DECODE_FAILED;
        }
 
+       *exitinfo |= port << 16;
+
        switch (insn->opcode.bytes[0]) {
        case 0x6c:
        case 0x6e:
@@ -756,12 +788,15 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
        case 0xee:
                /* Single byte opcodes */
                *exitinfo |= IOIO_DATA_8;
+               size       = 1;
                break;
        default:
                /* Length determined by instruction parsing */
                *exitinfo |= (insn->opnd_bytes == 2) ? IOIO_DATA_16
                                                     : IOIO_DATA_32;
+               size       = (insn->opnd_bytes == 2) ? 2 : 4;
        }
+
        switch (insn->addr_bytes) {
        case 2:
                *exitinfo |= IOIO_ADDR_16;
@@ -777,7 +812,7 @@ static enum es_result vc_ioio_exitinfo(struct es_em_ctxt *ctxt, u64 *exitinfo)
        if (insn_has_rep_prefix(insn))
                *exitinfo |= IOIO_REP;
 
-       return ES_OK;
+       return vc_ioio_check(ctxt, (u16)port, size);
 }
 
 static enum es_result vc_handle_ioio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
index d8c1e3b..6395bfd 100644 (file)
@@ -524,6 +524,33 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt
        return ES_OK;
 }
 
+static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
+{
+       BUG_ON(size > 4);
+
+       if (user_mode(ctxt->regs)) {
+               struct thread_struct *t = &current->thread;
+               struct io_bitmap *iobm = t->io_bitmap;
+               size_t idx;
+
+               if (!iobm)
+                       goto fault;
+
+               for (idx = port; idx < port + size; ++idx) {
+                       if (test_bit(idx, iobm->bitmap))
+                               goto fault;
+               }
+       }
+
+       return ES_OK;
+
+fault:
+       ctxt->fi.vector = X86_TRAP_GP;
+       ctxt->fi.error_code = 0;
+
+       return ES_EXCEPTION;
+}
+
 /* Include code shared with pre-decompression boot stage */
 #include "sev-shared.c"
 
@@ -1508,6 +1535,9 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
                        return ES_DECODE_FAILED;
        }
 
+       if (user_mode(ctxt->regs))
+               return ES_UNSUPPORTED;
+
        switch (mmio) {
        case INSN_MMIO_WRITE:
                memcpy(ghcb->shared_buffer, reg_data, bytes);
index 6eb06d0..96a771f 100644 (file)
@@ -131,7 +131,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
 }
 
 /*
- * Disable virtualization, APIC etc. and park the CPU in a HLT loop
+ * this function calls the 'stop' function on all other CPUs in the system.
  */
 DEFINE_IDTENTRY_SYSVEC(sysvec_reboot)
 {
@@ -172,17 +172,13 @@ static void native_stop_other_cpus(int wait)
         * 2) Wait for all other CPUs to report that they reached the
         *    HLT loop in stop_this_cpu()
         *
-        * 3) If the system uses INIT/STARTUP for CPU bringup, then
-        *    send all present CPUs an INIT vector, which brings them
-        *    completely out of the way.
+        * 3) If #2 timed out send an NMI to the CPUs which did not
+        *    yet report
         *
-        * 4) If #3 is not possible and #2 timed out send an NMI to the
-        *    CPUs which did not yet report
-        *
-        * 5) Wait for all other CPUs to report that they reached the
+        * 4) Wait for all other CPUs to report that they reached the
         *    HLT loop in stop_this_cpu()
         *
-        * #4 can obviously race against a CPU reaching the HLT loop late.
+        * #3 can obviously race against a CPU reaching the HLT loop late.
         * That CPU will have reported already and the "have all CPUs
         * reached HLT" condition will be true despite the fact that the
         * other CPU is still handling the NMI. Again, there is no
@@ -198,7 +194,7 @@ static void native_stop_other_cpus(int wait)
                /*
                 * Don't wait longer than a second for IPI completion. The
                 * wait request is not checked here because that would
-                * prevent an NMI/INIT shutdown in case that not all
+                * prevent an NMI shutdown attempt in case that not all
                 * CPUs reach shutdown state.
                 */
                timeout = USEC_PER_SEC;
@@ -206,27 +202,7 @@ static void native_stop_other_cpus(int wait)
                        udelay(1);
        }
 
-       /*
-        * Park all other CPUs in INIT including "offline" CPUs, if
-        * possible. That's a safe place where they can't resume execution
-        * of HLT and then execute the HLT loop from overwritten text or
-        * page tables.
-        *
-        * The only downside is a broadcast MCE, but up to the point where
-        * the kexec() kernel brought all APs online again an MCE will just
-        * make HLT resume and handle the MCE. The machine crashes and burns
-        * due to overwritten text, page tables and data. So there is a
-        * choice between fire and frying pan. The result is pretty much
-        * the same. Chose frying pan until x86 provides a sane mechanism
-        * to park a CPU.
-        */
-       if (smp_park_other_cpus_in_init())
-               goto done;
-
-       /*
-        * If park with INIT was not possible and the REBOOT_VECTOR didn't
-        * take all secondary CPUs offline, try with the NMI.
-        */
+       /* if the REBOOT_VECTOR didn't work, try with the NMI */
        if (!cpumask_empty(&cpus_stop_mask)) {
                /*
                 * If NMI IPI is enabled, try to register the stop handler
@@ -249,7 +225,6 @@ static void native_stop_other_cpus(int wait)
                        udelay(1);
        }
 
-done:
        local_irq_save(flags);
        disable_local_APIC();
        mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
index 48e0406..2a187c0 100644 (file)
@@ -1240,33 +1240,6 @@ void arch_thaw_secondary_cpus_end(void)
        cache_aps_init();
 }
 
-bool smp_park_other_cpus_in_init(void)
-{
-       unsigned int cpu, this_cpu = smp_processor_id();
-       unsigned int apicid;
-
-       if (apic->wakeup_secondary_cpu_64 || apic->wakeup_secondary_cpu)
-               return false;
-
-       /*
-        * If this is a crash stop which does not execute on the boot CPU,
-        * then this cannot use the INIT mechanism because INIT to the boot
-        * CPU will reset the machine.
-        */
-       if (this_cpu)
-               return false;
-
-       for_each_cpu_and(cpu, &cpus_booted_once_mask, cpu_present_mask) {
-               if (cpu == this_cpu)
-                       continue;
-               apicid = apic->cpu_present_to_apicid(cpu);
-               if (apicid == BAD_APICID)
-                       continue;
-               send_init_sequence(apicid);
-       }
-       return true;
-}
-
 /*
  * Early setup to make printk work.
  */
index ca004e2..0bab031 100644 (file)
@@ -54,7 +54,7 @@ void arch_unregister_cpu(int num)
 EXPORT_SYMBOL(arch_unregister_cpu);
 #else /* CONFIG_HOTPLUG_CPU */
 
-static int __init arch_register_cpu(int num)
+int __init arch_register_cpu(int num)
 {
        return register_cpu(&per_cpu(cpu_devices, num).cpu, num);
 }
index 0544e30..773132c 100644 (file)
@@ -360,14 +360,6 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
        vcpu->arch.guest_supported_xcr0 =
                cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
 
-       /*
-        * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
-        * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
-        * supported by the host.
-        */
-       vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
-                                                      XFEATURE_MASK_FPSSE;
-
        kvm_update_pv_runtime(vcpu);
 
        vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
index dcd60b3..3e977db 100644 (file)
@@ -2759,13 +2759,17 @@ int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
 {
        u32 reg = kvm_lapic_get_reg(apic, lvt_type);
        int vector, mode, trig_mode;
+       int r;
 
        if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
                vector = reg & APIC_VECTOR_MASK;
                mode = reg & APIC_MODE_MASK;
                trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
-               return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
-                                       NULL);
+
+               r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
+               if (r && lvt_type == APIC_LVTPC)
+                       kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
+               return r;
        }
        return 0;
 }
index edb89b5..9ae07db 100644 (file)
@@ -93,14 +93,6 @@ void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops)
 #undef __KVM_X86_PMU_OP
 }
 
-static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
-{
-       struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
-       struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
-
-       kvm_pmu_deliver_pmi(vcpu);
-}
-
 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
 {
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
@@ -124,20 +116,7 @@ static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
                __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
        }
 
-       if (!pmc->intr || skip_pmi)
-               return;
-
-       /*
-        * Inject PMI. If vcpu was in a guest mode during NMI PMI
-        * can be ejected on a guest mode re-entry. Otherwise we can't
-        * be sure that vcpu wasn't executing hlt instruction at the
-        * time of vmexit and is not going to re-enter guest mode until
-        * woken up. So we should wake it, but this is impossible from
-        * NMI context. Do it from irq work instead.
-        */
-       if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
-               irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
-       else
+       if (pmc->intr && !skip_pmi)
                kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
 }
 
@@ -675,9 +654,6 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
 
 void kvm_pmu_reset(struct kvm_vcpu *vcpu)
 {
-       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
-
-       irq_work_sync(&pmu->irq_work);
        static_call(kvm_x86_pmu_reset)(vcpu);
 }
 
@@ -687,7 +663,6 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu)
 
        memset(pmu, 0, sizeof(*pmu));
        static_call(kvm_x86_pmu_init)(vcpu);
-       init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
        pmu->event_count = 0;
        pmu->need_cleanup = false;
        kvm_pmu_refresh(vcpu);
index 7d9ba30..1d64113 100644 (file)
@@ -74,6 +74,12 @@ static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
        return counter & pmc_bitmask(pmc);
 }
 
+static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
+{
+       pmc->counter += val - pmc_read_counter(pmc);
+       pmc->counter &= pmc_bitmask(pmc);
+}
+
 static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
 {
        if (pmc->perf_event) {
index 2092db8..4b74ea9 100644 (file)
@@ -529,8 +529,11 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
        case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
                WARN_ONCE(1, "Invalid backing page\n");
                break;
+       case AVIC_IPI_FAILURE_INVALID_IPI_VECTOR:
+               /* Invalid IPI with vector < 16 */
+               break;
        default:
-               pr_err("Unknown IPI interception\n");
+               vcpu_unimpl(vcpu, "Unknown avic incomplete IPI interception\n");
        }
 
        return 1;
index dd496c9..3fea8c4 100644 (file)
@@ -1253,6 +1253,9 @@ void svm_leave_nested(struct kvm_vcpu *vcpu)
 
                nested_svm_uninit_mmu_context(vcpu);
                vmcb_mark_all_dirty(svm->vmcb);
+
+               if (kvm_apicv_activated(vcpu->kvm))
+                       kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
        }
 
        kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
index cef5a3d..373ff6a 100644 (file)
@@ -160,7 +160,7 @@ static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        /* MSR_PERFCTRn */
        pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
        if (pmc) {
-               pmc->counter += data - pmc_read_counter(pmc);
+               pmc_write_counter(pmc, data);
                pmc_update_sample_period(pmc);
                return 0;
        }
index 9507df9..beea99c 100644 (file)
@@ -691,7 +691,7 @@ static int svm_hardware_enable(void)
         */
        if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
                struct sev_es_save_area *hostsa;
-               u32 msr_hi;
+               u32 __maybe_unused msr_hi;
 
                hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
 
@@ -913,8 +913,7 @@ void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
        if (intercept == svm->x2avic_msrs_intercepted)
                return;
 
-       if (!x2avic_enabled ||
-           !apic_x2apic_mode(svm->vcpu.arch.apic))
+       if (!x2avic_enabled)
                return;
 
        for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {
index f2efa0b..820d3e1 100644 (file)
@@ -436,11 +436,11 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        if (!msr_info->host_initiated &&
                            !(msr & MSR_PMC_FULL_WIDTH_BIT))
                                data = (s64)(s32)data;
-                       pmc->counter += data - pmc_read_counter(pmc);
+                       pmc_write_counter(pmc, data);
                        pmc_update_sample_period(pmc);
                        break;
                } else if ((pmc = get_fixed_pmc(pmu, msr))) {
-                       pmc->counter += data - pmc_read_counter(pmc);
+                       pmc_write_counter(pmc, data);
                        pmc_update_sample_period(pmc);
                        break;
                } else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
index 9f18b06..41cce50 100644 (file)
@@ -5382,26 +5382,37 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
-                                        struct kvm_xsave *guest_xsave)
-{
-       if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
-               return;
-
-       fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
-                                      guest_xsave->region,
-                                      sizeof(guest_xsave->region),
-                                      vcpu->arch.pkru);
-}
 
 static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
                                          u8 *state, unsigned int size)
 {
+       /*
+        * Only copy state for features that are enabled for the guest.  The
+        * state itself isn't problematic, but setting bits in the header for
+        * features that are supported in *this* host but not exposed to the
+        * guest can result in KVM_SET_XSAVE failing when live migrating to a
+        * compatible host without the features that are NOT exposed to the
+        * guest.
+        *
+        * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
+        * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
+        * supported by the host.
+        */
+       u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 |
+                            XFEATURE_MASK_FPSSE;
+
        if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
                return;
 
-       fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
-                                      state, size, vcpu->arch.pkru);
+       fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
+                                      supported_xcr0, vcpu->arch.pkru);
+}
+
+static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
+                                        struct kvm_xsave *guest_xsave)
+{
+       return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
+                                            sizeof(guest_xsave->region));
 }
 
 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
@@ -12843,6 +12854,9 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
                return true;
 #endif
 
+       if (kvm_test_request(KVM_REQ_PMI, vcpu))
+               return true;
+
        if (kvm_arch_interrupt_allowed(vcpu) &&
            (kvm_cpu_has_interrupt(vcpu) ||
            kvm_guest_apic_has_interrupt(vcpu)))
index acff3d5..73e4274 100644 (file)
@@ -772,24 +772,35 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
 
        filemap_invalidate_lock(inode->i_mapping);
 
-       /* Invalidate the page cache, including dirty pages. */
-       error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
-       if (error)
-               goto fail;
-
+       /*
+        * Invalidate the page cache, including dirty pages, for valid
+        * de-allocate mode calls to fallocate().
+        */
        switch (mode) {
        case FALLOC_FL_ZERO_RANGE:
        case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
+               error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
+               if (error)
+                       goto fail;
+
                error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
                                             len >> SECTOR_SHIFT, GFP_KERNEL,
                                             BLKDEV_ZERO_NOUNMAP);
                break;
        case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
+               error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
+               if (error)
+                       goto fail;
+
                error = blkdev_issue_zeroout(bdev, start >> SECTOR_SHIFT,
                                             len >> SECTOR_SHIFT, GFP_KERNEL,
                                             BLKDEV_ZERO_NOFALLBACK);
                break;
        case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE | FALLOC_FL_NO_HIDE_STALE:
+               error = truncate_bdev_range(bdev, file_to_blk_mode(file), start, end);
+               if (error)
+                       goto fail;
+
                error = blkdev_issue_discard(bdev, start >> SECTOR_SHIFT,
                                             len >> SECTOR_SHIFT, GFP_KERNEL);
                break;
index 6d7f25d..04f38a3 100644 (file)
@@ -2888,12 +2888,11 @@ static int opal_lock_unlock(struct opal_dev *dev,
        if (lk_unlk->session.who > OPAL_USER9)
                return -EINVAL;
 
-       ret = opal_get_key(dev, &lk_unlk->session.opal_key);
-       if (ret)
-               return ret;
        mutex_lock(&dev->dev_lock);
        opal_lock_check_for_saved_key(dev, lk_unlk);
-       ret = __opal_lock_unlock(dev, lk_unlk);
+       ret = opal_get_key(dev, &lk_unlk->session.opal_key);
+       if (!ret)
+               ret = __opal_lock_unlock(dev, lk_unlk);
        mutex_unlock(&dev->dev_lock);
 
        return ret;
index 467a602..7e93596 100644 (file)
@@ -367,14 +367,19 @@ int ivpu_boot(struct ivpu_device *vdev)
        return 0;
 }
 
-int ivpu_shutdown(struct ivpu_device *vdev)
+void ivpu_prepare_for_reset(struct ivpu_device *vdev)
 {
-       int ret;
-
        ivpu_hw_irq_disable(vdev);
        disable_irq(vdev->irq);
        ivpu_ipc_disable(vdev);
        ivpu_mmu_disable(vdev);
+}
+
+int ivpu_shutdown(struct ivpu_device *vdev)
+{
+       int ret;
+
+       ivpu_prepare_for_reset(vdev);
 
        ret = ivpu_hw_power_down(vdev);
        if (ret)
index 03b3d65..2adc349 100644 (file)
@@ -151,6 +151,7 @@ void ivpu_file_priv_put(struct ivpu_file_priv **link);
 
 int ivpu_boot(struct ivpu_device *vdev);
 int ivpu_shutdown(struct ivpu_device *vdev);
+void ivpu_prepare_for_reset(struct ivpu_device *vdev);
 
 static inline u8 ivpu_revision(struct ivpu_device *vdev)
 {
index 0191cf8..a277bba 100644 (file)
@@ -220,8 +220,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
        if (ret)
                return ret;
 
-       fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size,
-                                        DRM_IVPU_BO_CACHED | DRM_IVPU_BO_NOSNOOP);
+       fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
        if (!fw->mem) {
                ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
                return -ENOMEM;
@@ -331,7 +330,7 @@ int ivpu_fw_load(struct ivpu_device *vdev)
                memset(start, 0, size);
        }
 
-       clflush_cache_range(fw->mem->kvaddr, fw->mem->base.size);
+       wmb(); /* Flush WC buffers after writing fw->mem */
 
        return 0;
 }
@@ -433,7 +432,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
        if (!ivpu_fw_is_cold_boot(vdev)) {
                boot_params->save_restore_ret_address = 0;
                vdev->pm->is_warmboot = true;
-               clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
+               wmb(); /* Flush WC buffers after writing save_restore_ret_address */
                return;
        }
 
@@ -495,7 +494,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
        boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
        boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
 
-       clflush_cache_range(vdev->fw->mem->kvaddr, SZ_4K);
+       wmb(); /* Flush WC buffers after writing bootparams */
 
        ivpu_fw_boot_params_print(vdev, boot_params);
 }
index f413058..6b0ceda 100644 (file)
@@ -8,8 +8,6 @@
 #include <drm/drm_gem.h>
 #include <drm/drm_mm.h>
 
-#define DRM_IVPU_BO_NOSNOOP       0x10000000
-
 struct dma_buf;
 struct ivpu_bo_ops;
 struct ivpu_file_priv;
@@ -85,9 +83,6 @@ static inline u32 ivpu_bo_cache_mode(struct ivpu_bo *bo)
 
 static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo)
 {
-       if (bo->flags & DRM_IVPU_BO_NOSNOOP)
-               return false;
-
        return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED;
 }
 
index ab34123..1079e06 100644 (file)
@@ -13,6 +13,7 @@ struct ivpu_hw_ops {
        int (*power_up)(struct ivpu_device *vdev);
        int (*boot_fw)(struct ivpu_device *vdev);
        int (*power_down)(struct ivpu_device *vdev);
+       int (*reset)(struct ivpu_device *vdev);
        bool (*is_idle)(struct ivpu_device *vdev);
        void (*wdt_disable)(struct ivpu_device *vdev);
        void (*diagnose_failure)(struct ivpu_device *vdev);
@@ -91,6 +92,13 @@ static inline int ivpu_hw_power_down(struct ivpu_device *vdev)
        return vdev->hw->ops->power_down(vdev);
 };
 
+static inline int ivpu_hw_reset(struct ivpu_device *vdev)
+{
+       ivpu_dbg(vdev, PM, "HW reset\n");
+
+       return vdev->hw->ops->reset(vdev);
+};
+
 static inline void ivpu_hw_wdt_disable(struct ivpu_device *vdev)
 {
        vdev->hw->ops->wdt_disable(vdev);
index 9eae1c2..9760194 100644 (file)
@@ -1029,6 +1029,7 @@ const struct ivpu_hw_ops ivpu_hw_37xx_ops = {
        .power_up = ivpu_hw_37xx_power_up,
        .is_idle = ivpu_hw_37xx_is_idle,
        .power_down = ivpu_hw_37xx_power_down,
+       .reset = ivpu_hw_37xx_reset,
        .boot_fw = ivpu_hw_37xx_boot_fw,
        .wdt_disable = ivpu_hw_37xx_wdt_disable,
        .diagnose_failure = ivpu_hw_37xx_diagnose_failure,
index 8bdb59a..85171a4 100644 (file)
@@ -1179,6 +1179,7 @@ const struct ivpu_hw_ops ivpu_hw_40xx_ops = {
        .power_up = ivpu_hw_40xx_power_up,
        .is_idle = ivpu_hw_40xx_is_idle,
        .power_down = ivpu_hw_40xx_power_down,
+       .reset = ivpu_hw_40xx_reset,
        .boot_fw = ivpu_hw_40xx_boot_fw,
        .wdt_disable = ivpu_hw_40xx_wdt_disable,
        .diagnose_failure = ivpu_hw_40xx_diagnose_failure,
index 1d2e554..ce94f40 100644 (file)
@@ -11,6 +11,7 @@
 #include "ivpu_mmu.h"
 #include "ivpu_mmu_context.h"
 
+#define IVPU_MMU_VPU_ADDRESS_MASK        GENMASK(47, 12)
 #define IVPU_MMU_PGD_INDEX_MASK          GENMASK(47, 39)
 #define IVPU_MMU_PUD_INDEX_MASK          GENMASK(38, 30)
 #define IVPU_MMU_PMD_INDEX_MASK          GENMASK(29, 21)
@@ -328,12 +329,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
 
        if (!IS_ALIGNED(vpu_addr, IVPU_MMU_PAGE_SIZE))
                return -EINVAL;
-       /*
-        * VPU is only 32 bit, but DMA engine is 38 bit
-        * Ranges < 2 GB are reserved for VPU internal registers
-        * Limit range to 8 GB
-        */
-       if (vpu_addr < SZ_2G || vpu_addr > SZ_8G)
+
+       if (vpu_addr & ~IVPU_MMU_VPU_ADDRESS_MASK)
                return -EINVAL;
 
        prot = IVPU_MMU_ENTRY_MAPPED;
index e6f27da..ffff249 100644 (file)
@@ -261,7 +261,8 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
        ivpu_dbg(vdev, PM, "Pre-reset..\n");
        atomic_inc(&vdev->pm->reset_counter);
        atomic_set(&vdev->pm->in_reset, 1);
-       ivpu_shutdown(vdev);
+       ivpu_prepare_for_reset(vdev);
+       ivpu_hw_reset(vdev);
        ivpu_pm_prepare_cold_boot(vdev);
        ivpu_jobs_abort_all(vdev);
        ivpu_dbg(vdev, PM, "Pre-reset done.\n");
index c711db8..0f5218e 100644 (file)
@@ -12,6 +12,7 @@
 #define pr_fmt(fmt) "ACPI: " fmt
 
 #include <linux/acpi.h>
+#include <linux/cpu.h>
 #include <linux/device.h>
 #include <linux/dmi.h>
 #include <linux/kernel.h>
index f41dda2..a4aa53b 100644 (file)
@@ -1410,10 +1410,10 @@ static int __init acpi_init(void)
        acpi_init_ffh();
 
        pci_mmcfg_late_init();
-       acpi_arm_init();
        acpi_viot_early_init();
        acpi_hest_init();
        acpi_ghes_init();
+       acpi_arm_init();
        acpi_scan_init();
        acpi_ec_init();
        acpi_debugfs_init();
index 660834a..c95d0ed 100644 (file)
@@ -1915,6 +1915,17 @@ static const struct dmi_system_id ec_dmi_table[] __initconst = {
        },
        {
                /*
+                * HP Pavilion Gaming Laptop 15-dk1xxx
+                * https://github.com/systemd/systemd/issues/28942
+                */
+               .callback = ec_honor_dsdt_gpe,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Gaming Laptop 15-dk1xxx"),
+               },
+       },
+       {
+               /*
                 * Samsung hardware
                 * https://bugzilla.kernel.org/show_bug.cgi?id=44161
                 */
index c2c786e..1687483 100644 (file)
@@ -57,6 +57,7 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
                      int polarity)
 {
        struct irq_fwspec fwspec;
+       unsigned int irq;
 
        fwspec.fwnode = acpi_get_gsi_domain_id(gsi);
        if (WARN_ON(!fwspec.fwnode)) {
@@ -68,7 +69,11 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger,
        fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
        fwspec.param_count = 2;
 
-       return irq_create_fwspec_mapping(&fwspec);
+       irq = irq_create_fwspec_mapping(&fwspec);
+       if (!irq)
+               return -EINVAL;
+
+       return irq;
 }
 EXPORT_SYMBOL_GPL(acpi_register_gsi);
 
index 32cfa3f..297a885 100644 (file)
@@ -440,6 +440,13 @@ static const struct dmi_system_id asus_laptop[] = {
                },
        },
        {
+               .ident = "Asus ExpertBook B1402CBA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "B1402CBA"),
+               },
+       },
+       {
                .ident = "Asus ExpertBook B1502CBA",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
@@ -500,16 +507,23 @@ static const struct dmi_system_id maingear_laptop[] = {
 
 static const struct dmi_system_id pcspecialist_laptop[] = {
        {
-               .ident = "PCSpecialist Elimina Pro 16 M",
-               /*
-                * Some models have product-name "Elimina Pro 16 M",
-                * others "GM6BGEQ". Match on board-name to match both.
-                */
+               /* TongFang GM6BGEQ / PCSpecialist Elimina Pro 16 M, RTX 3050 */
                .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"),
                        DMI_MATCH(DMI_BOARD_NAME, "GM6BGEQ"),
                },
        },
+       {
+               /* TongFang GM6BG5Q, RTX 4050 */
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "GM6BG5Q"),
+               },
+       },
+       {
+               /* TongFang GM6BG0Q / PCSpecialist Elimina Pro 16 M, RTX 4060 */
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "GM6BG0Q"),
+               },
+       },
        { }
 };
 
index 367afac..92128aa 100644 (file)
@@ -4812,6 +4812,8 @@ static void binder_release_work(struct binder_proc *proc,
                                "undelivered TRANSACTION_ERROR: %u\n",
                                e->cmd);
                } break;
+               case BINDER_WORK_TRANSACTION_PENDING:
+               case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT:
                case BINDER_WORK_TRANSACTION_COMPLETE: {
                        binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
                                "undelivered TRANSACTION_COMPLETE\n");
index bad7aa9..d2b81cf 100644 (file)
@@ -9,11 +9,6 @@
  *
  * The TD-2000 and certain older devices use a different protocol.
  * Try the fit2 protocol module with them.
- *
- * NB:  The FIT adapters do not appear to support the control
- * registers.  So, we map ALT_STATUS to STATUS and NO-OP writes
- * to the device control register - this means that IDE reset
- * will not work on these devices.
  */
 
 #include <linux/module.h>
@@ -37,8 +32,7 @@
 
 static void fit3_write_regr(struct pi_adapter *pi, int cont, int regr, int val)
 {
-       if (cont == 1)
-               return;
+       regr += cont << 3;
 
        switch (pi->mode) {
        case 0:
@@ -59,11 +53,7 @@ static int fit3_read_regr(struct pi_adapter *pi, int cont, int regr)
 {
        int  a, b;
 
-       if (cont) {
-               if (regr != 6)
-                       return 0xff;
-               regr = 7;
-       }
+       regr += cont << 3;
 
        switch (pi->mode) {
        case 0:
index 1af64d4..a7adfdc 100644 (file)
@@ -51,6 +51,13 @@ static void pata_parport_dev_select(struct ata_port *ap, unsigned int device)
        ata_sff_pause(ap);
 }
 
+static void pata_parport_set_devctl(struct ata_port *ap, u8 ctl)
+{
+       struct pi_adapter *pi = ap->host->private_data;
+
+       pi->proto->write_regr(pi, 1, 6, ctl);
+}
+
 static bool pata_parport_devchk(struct ata_port *ap, unsigned int device)
 {
        struct pi_adapter *pi = ap->host->private_data;
@@ -64,7 +71,7 @@ static bool pata_parport_devchk(struct ata_port *ap, unsigned int device)
        pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0xaa);
        pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0x55);
 
-       pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 055);
+       pi->proto->write_regr(pi, 0, ATA_REG_NSECT, 0x55);
        pi->proto->write_regr(pi, 0, ATA_REG_LBAL, 0xaa);
 
        nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
@@ -73,6 +80,72 @@ static bool pata_parport_devchk(struct ata_port *ap, unsigned int device)
        return (nsect == 0x55) && (lbal == 0xaa);
 }
 
+static int pata_parport_wait_after_reset(struct ata_link *link,
+                                        unsigned int devmask,
+                                        unsigned long deadline)
+{
+       struct ata_port *ap = link->ap;
+       struct pi_adapter *pi = ap->host->private_data;
+       unsigned int dev0 = devmask & (1 << 0);
+       unsigned int dev1 = devmask & (1 << 1);
+       int rc, ret = 0;
+
+       ata_msleep(ap, ATA_WAIT_AFTER_RESET);
+
+       /* always check readiness of the master device */
+       rc = ata_sff_wait_ready(link, deadline);
+       if (rc) {
+               /*
+                * some adapters return bogus values if master device is not
+                * present, so don't abort now if a slave device is present
+                */
+               if (!dev1)
+                       return rc;
+               ret = -ENODEV;
+       }
+
+       /*
+        * if device 1 was found in ata_devchk, wait for register
+        * access briefly, then wait for BSY to clear.
+        */
+       if (dev1) {
+               int i;
+
+               pata_parport_dev_select(ap, 1);
+
+               /*
+                * Wait for register access.  Some ATAPI devices fail
+                * to set nsect/lbal after reset, so don't waste too
+                * much time on it.  We're gonna wait for !BSY anyway.
+                */
+               for (i = 0; i < 2; i++) {
+                       u8 nsect, lbal;
+
+                       nsect = pi->proto->read_regr(pi, 0, ATA_REG_NSECT);
+                       lbal = pi->proto->read_regr(pi, 0, ATA_REG_LBAL);
+                       if (nsect == 1 && lbal == 1)
+                               break;
+                       /* give drive a breather */
+                       ata_msleep(ap, 50);
+               }
+
+               rc = ata_sff_wait_ready(link, deadline);
+               if (rc) {
+                       if (rc != -ENODEV)
+                               return rc;
+                       ret = rc;
+               }
+       }
+
+       pata_parport_dev_select(ap, 0);
+       if (dev1)
+               pata_parport_dev_select(ap, 1);
+       if (dev0)
+               pata_parport_dev_select(ap, 0);
+
+       return ret;
+}
+
 static int pata_parport_bus_softreset(struct ata_port *ap, unsigned int devmask,
                                      unsigned long deadline)
 {
@@ -87,7 +160,7 @@ static int pata_parport_bus_softreset(struct ata_port *ap, unsigned int devmask,
        ap->last_ctl = ap->ctl;
 
        /* wait the port to become ready */
-       return ata_sff_wait_after_reset(&ap->link, devmask, deadline);
+       return pata_parport_wait_after_reset(&ap->link, devmask, deadline);
 }
 
 static int pata_parport_softreset(struct ata_link *link, unsigned int *classes,
@@ -252,6 +325,7 @@ static struct ata_port_operations pata_parport_port_ops = {
        .hardreset              = NULL,
 
        .sff_dev_select         = pata_parport_dev_select,
+       .sff_set_devctl         = pata_parport_set_devctl,
        .sff_check_status       = pata_parport_check_status,
        .sff_check_altstatus    = pata_parport_check_altstatus,
        .sff_tf_load            = pata_parport_tf_load,
index 884cb51..234a84e 100644 (file)
@@ -1478,7 +1478,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
 
        /* If the user didn't specify a name match any */
        if (data)
-               return !strcmp((*r)->name, data);
+               return (*r)->name && !strcmp((*r)->name, data);
        else
                return 1;
 }
index 84c2c2e..277d039 100644 (file)
@@ -962,13 +962,10 @@ static void btrtl_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb)
        skb_put_data(skb, buf, strlen(buf));
 }
 
-static int btrtl_register_devcoredump_support(struct hci_dev *hdev)
+static void btrtl_register_devcoredump_support(struct hci_dev *hdev)
 {
-       int err;
+       hci_devcd_register(hdev, btrtl_coredump, btrtl_dmp_hdr, NULL);
 
-       err = hci_devcd_register(hdev, btrtl_coredump, btrtl_dmp_hdr, NULL);
-
-       return err;
 }
 
 void btrtl_set_driver_name(struct hci_dev *hdev, const char *driver_name)
@@ -1255,8 +1252,7 @@ int btrtl_download_firmware(struct hci_dev *hdev,
        }
 
 done:
-       if (!err)
-               err = btrtl_register_devcoredump_support(hdev);
+       btrtl_register_devcoredump_support(hdev);
 
        return err;
 }
index 40e2b9f..f3892e9 100644 (file)
@@ -74,7 +74,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
        struct vhci_data *data = hci_get_drvdata(hdev);
 
        memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
+
+       mutex_lock(&data->open_mutex);
        skb_queue_tail(&data->readq, skb);
+       mutex_unlock(&data->open_mutex);
 
        wake_up_interruptible(&data->read_wait);
        return 0;
index 80acdf6..afc94d0 100644 (file)
@@ -247,8 +247,8 @@ static int counter_get_ext(const struct counter_comp *const ext,
                if (*id == component_id)
                        return 0;
 
-               if (ext->type == COUNTER_COMP_ARRAY) {
-                       element = ext->priv;
+               if (ext[*ext_idx].type == COUNTER_COMP_ARRAY) {
+                       element = ext[*ext_idx].priv;
 
                        if (component_id - *id < element->length)
                                return 0;
index 975e431..b3e615c 100644 (file)
@@ -97,7 +97,7 @@ static int mchp_tc_count_function_write(struct counter_device *counter,
                priv->qdec_mode = 0;
                /* Set highest rate based on whether soc has gclk or not */
                bmr &= ~(ATMEL_TC_QDEN | ATMEL_TC_POSEN);
-               if (priv->tc_cfg->has_gclk)
+               if (!priv->tc_cfg->has_gclk)
                        cmr |= ATMEL_TC_TIMER_CLOCK2;
                else
                        cmr |= ATMEL_TC_TIMER_CLOCK1;
index c625bb2..628af51 100644 (file)
@@ -76,16 +76,11 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
                dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
                        if (!dma_fence_is_signaled(tmp)) {
                                ++count;
-                       } else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
-                                           &tmp->flags)) {
-                               if (ktime_after(tmp->timestamp, timestamp))
-                                       timestamp = tmp->timestamp;
                        } else {
-                               /*
-                                * Use the current time if the fence is
-                                * currently signaling.
-                                */
-                               timestamp = ktime_get();
+                               ktime_t t = dma_fence_timestamp(tmp);
+
+                               if (ktime_after(t, timestamp))
+                                       timestamp = t;
                        }
                }
        }
index af57799..2e9a316 100644 (file)
@@ -268,13 +268,10 @@ static int sync_fill_fence_info(struct dma_fence *fence,
                sizeof(info->driver_name));
 
        info->status = dma_fence_get_status(fence);
-       while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
-              !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
-               cpu_relax();
        info->timestamp_ns =
-               test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ?
-               ktime_to_ns(fence->timestamp) :
-               ktime_set(0, 0);
+               dma_fence_is_signaled(fence) ?
+                       ktime_to_ns(dma_fence_timestamp(fence)) :
+                       ktime_set(0, 0);
 
        return info->status;
 }
index a0f5741..6a3abe5 100644 (file)
@@ -92,8 +92,14 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
 
        edma_writel_chreg(fsl_chan, val, ch_sbr);
 
-       if (flags & FSL_EDMA_DRV_HAS_CHMUX)
-               edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
+       if (flags & FSL_EDMA_DRV_HAS_CHMUX) {
+               /*
+                * ch_mux: With the exception of 0, attempts to write a value
+                * already in use will be forced to 0.
+                */
+               if (!edma_readl_chreg(fsl_chan, ch_mux))
+                       edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
+       }
 
        val = edma_readl_chreg(fsl_chan, ch_csr);
        val |= EDMA_V3_CH_CSR_ERQ;
@@ -448,12 +454,25 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
 
        edma_write_tcdreg(fsl_chan, tcd->dlast_sga, dlast_sga);
 
+       csr = le16_to_cpu(tcd->csr);
+
        if (fsl_chan->is_sw) {
-               csr = le16_to_cpu(tcd->csr);
                csr |= EDMA_TCD_CSR_START;
                tcd->csr = cpu_to_le16(csr);
        }
 
+       /*
+        * Must clear CHn_CSR[DONE] bit before enable TCDn_CSR[ESG] at EDMAv3
+        * eDMAv4 have not such requirement.
+        * Change MLINK need clear CHn_CSR[DONE] for both eDMAv3 and eDMAv4.
+        */
+       if (((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_SG) &&
+               (csr & EDMA_TCD_CSR_E_SG)) ||
+           ((fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_CLEAR_DONE_E_LINK) &&
+               (csr & EDMA_TCD_CSR_E_LINK)))
+               edma_writel_chreg(fsl_chan, edma_readl_chreg(fsl_chan, ch_csr), ch_csr);
+
+
        edma_write_tcdreg(fsl_chan, tcd->csr, csr);
 }
 
index 3cc0cc8..40d50cc 100644 (file)
@@ -183,11 +183,23 @@ struct fsl_edma_desc {
 #define FSL_EDMA_DRV_BUS_8BYTE         BIT(10)
 #define FSL_EDMA_DRV_DEV_TO_DEV                BIT(11)
 #define FSL_EDMA_DRV_ALIGN_64BYTE      BIT(12)
+/* Need clean CHn_CSR DONE before enable TCD's ESG */
+#define FSL_EDMA_DRV_CLEAR_DONE_E_SG   BIT(13)
+/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
+#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
 
 #define FSL_EDMA_DRV_EDMA3     (FSL_EDMA_DRV_SPLIT_REG |       \
                                 FSL_EDMA_DRV_BUS_8BYTE |       \
                                 FSL_EDMA_DRV_DEV_TO_DEV |      \
-                                FSL_EDMA_DRV_ALIGN_64BYTE)
+                                FSL_EDMA_DRV_ALIGN_64BYTE |    \
+                                FSL_EDMA_DRV_CLEAR_DONE_E_SG | \
+                                FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
+
+#define FSL_EDMA_DRV_EDMA4     (FSL_EDMA_DRV_SPLIT_REG |       \
+                                FSL_EDMA_DRV_BUS_8BYTE |       \
+                                FSL_EDMA_DRV_DEV_TO_DEV |      \
+                                FSL_EDMA_DRV_ALIGN_64BYTE |    \
+                                FSL_EDMA_DRV_CLEAR_DONE_E_LINK)
 
 struct fsl_edma_drvdata {
        u32                     dmamuxs; /* only used before v3 */
index 63d48d0..8c4ed70 100644 (file)
@@ -154,18 +154,20 @@ static struct dma_chan *fsl_edma3_xlate(struct of_phandle_args *dma_spec,
                fsl_chan = to_fsl_edma_chan(chan);
                i = fsl_chan - fsl_edma->chans;
 
-               chan = dma_get_slave_channel(chan);
-               chan->device->privatecnt++;
                fsl_chan->priority = dma_spec->args[1];
                fsl_chan->is_rxchan = dma_spec->args[2] & ARGS_RX;
                fsl_chan->is_remote = dma_spec->args[2] & ARGS_REMOTE;
                fsl_chan->is_multi_fifo = dma_spec->args[2] & ARGS_MULTI_FIFO;
 
                if (!b_chmux && i == dma_spec->args[0]) {
+                       chan = dma_get_slave_channel(chan);
+                       chan->device->privatecnt++;
                        mutex_unlock(&fsl_edma->fsl_edma_mutex);
                        return chan;
                } else if (b_chmux && !fsl_chan->srcid) {
                        /* if controller support channel mux, choose a free channel */
+                       chan = dma_get_slave_channel(chan);
+                       chan->device->privatecnt++;
                        fsl_chan->srcid = dma_spec->args[0];
                        mutex_unlock(&fsl_edma->fsl_edma_mutex);
                        return chan;
@@ -355,7 +357,7 @@ static struct fsl_edma_drvdata imx93_data3 = {
 };
 
 static struct fsl_edma_drvdata imx93_data4 = {
-       .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
+       .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
        .chreg_space_sz = 0x8000,
        .chreg_off = 0x10000,
        .setup_irq = fsl_edma3_irq_init,
index 22d6f4e..8f754f9 100644 (file)
@@ -477,6 +477,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
        union idxd_command_reg cmd;
        DECLARE_COMPLETION_ONSTACK(done);
        u32 stat;
+       unsigned long flags;
 
        if (idxd_device_is_halted(idxd)) {
                dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
@@ -490,7 +491,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
        cmd.operand = operand;
        cmd.int_req = 1;
 
-       spin_lock(&idxd->cmd_lock);
+       spin_lock_irqsave(&idxd->cmd_lock, flags);
        wait_event_lock_irq(idxd->cmd_waitq,
                            !test_bit(IDXD_FLAG_CMD_RUNNING, &idxd->flags),
                            idxd->cmd_lock);
@@ -507,7 +508,7 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
         * After command submitted, release lock and go to sleep until
         * the command completes via interrupt.
         */
-       spin_unlock(&idxd->cmd_lock);
+       spin_unlock_irqrestore(&idxd->cmd_lock, flags);
        wait_for_completion(&done);
        stat = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET);
        spin_lock(&idxd->cmd_lock);
index c51dc01..06d12ac 100644 (file)
@@ -450,9 +450,8 @@ static int mtk_uart_apdma_device_pause(struct dma_chan *chan)
        mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
        mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
 
-       synchronize_irq(c->irq);
-
        spin_unlock_irqrestore(&c->vc.lock, flags);
+       synchronize_irq(c->irq);
 
        return 0;
 }
index 89e8250..002833f 100644 (file)
@@ -3668,6 +3668,7 @@ static int __init d40_probe(struct platform_device *pdev)
                regulator_disable(base->lcpa_regulator);
                regulator_put(base->lcpa_regulator);
        }
+       pm_runtime_disable(base->dev);
 
  report_failure:
        d40_err(dev, "probe failed\n");
index 5c36811..0b30151 100644 (file)
@@ -1113,8 +1113,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
                chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
 
        /* Activate Double Buffer Mode if DMA triggers STM32 MDMA and more than 1 sg */
-       if (chan->trig_mdma && sg_len > 1)
+       if (chan->trig_mdma && sg_len > 1) {
                chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
+               chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
+       }
 
        for_each_sg(sgl, sg, sg_len, i) {
                ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
@@ -1387,11 +1389,12 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
 
        residue = stm32_dma_get_remaining_bytes(chan);
 
-       if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) {
+       if ((chan->desc->cyclic || chan->trig_mdma) && !stm32_dma_is_current_sg(chan)) {
                n_sg++;
                if (n_sg == chan->desc->num_sgs)
                        n_sg = 0;
-               residue = sg_req->len;
+               if (!chan->trig_mdma)
+                       residue = sg_req->len;
        }
 
        /*
@@ -1401,7 +1404,7 @@ static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
         * residue = remaining bytes from NDTR + remaining
         * periods/sg to be transferred
         */
-       if (!chan->desc->cyclic || n_sg != 0)
+       if ((!chan->desc->cyclic && !chan->trig_mdma) || n_sg != 0)
                for (i = n_sg; i < desc->num_sgs; i++)
                        residue += desc->sg_req[i].len;
 
index 0de2340..bae08b3 100644 (file)
@@ -777,8 +777,6 @@ static int stm32_mdma_setup_xfer(struct stm32_mdma_chan *chan,
        /* Enable interrupts */
        ccr &= ~STM32_MDMA_CCR_IRQ_MASK;
        ccr |= STM32_MDMA_CCR_TEIE | STM32_MDMA_CCR_CTCIE;
-       if (sg_len > 1)
-               ccr |= STM32_MDMA_CCR_BTIE;
        desc->ccr = ccr;
 
        return 0;
@@ -1236,6 +1234,10 @@ static int stm32_mdma_resume(struct dma_chan *c)
        unsigned long flags;
        u32 status, reg;
 
+       /* Transfer can be terminated */
+       if (!chan->desc || (stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id)) & STM32_MDMA_CCR_EN))
+               return -EPERM;
+
        hwdesc = chan->desc->node[chan->curr_hwdesc].hwdesc;
 
        spin_lock_irqsave(&chan->vchan.lock, flags);
@@ -1316,21 +1318,35 @@ static int stm32_mdma_slave_config(struct dma_chan *c,
 
 static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
                                      struct stm32_mdma_desc *desc,
-                                     u32 curr_hwdesc)
+                                     u32 curr_hwdesc,
+                                     struct dma_tx_state *state)
 {
        struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
        struct stm32_mdma_hwdesc *hwdesc;
-       u32 cbndtr, residue, modulo, burst_size;
+       u32 cisr, clar, cbndtr, residue, modulo, burst_size;
        int i;
 
+       cisr = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
+
        residue = 0;
-       for (i = curr_hwdesc + 1; i < desc->count; i++) {
+       /* Get the next hw descriptor to process from current transfer */
+       clar = stm32_mdma_read(dmadev, STM32_MDMA_CLAR(chan->id));
+       for (i = desc->count - 1; i >= 0; i--) {
                hwdesc = desc->node[i].hwdesc;
+
+               if (hwdesc->clar == clar)
+                       break;/* Current transfer found, stop cumulating */
+
+               /* Cumulate residue of unprocessed hw descriptors */
                residue += STM32_MDMA_CBNDTR_BNDT(hwdesc->cbndtr);
        }
        cbndtr = stm32_mdma_read(dmadev, STM32_MDMA_CBNDTR(chan->id));
        residue += cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
 
+       state->in_flight_bytes = 0;
+       if (chan->chan_config.m2m_hw && (cisr & STM32_MDMA_CISR_CRQA))
+               state->in_flight_bytes = cbndtr & STM32_MDMA_CBNDTR_BNDT_MASK;
+
        if (!chan->mem_burst)
                return residue;
 
@@ -1360,11 +1376,10 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
 
        vdesc = vchan_find_desc(&chan->vchan, cookie);
        if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
-               residue = stm32_mdma_desc_residue(chan, chan->desc,
-                                                 chan->curr_hwdesc);
+               residue = stm32_mdma_desc_residue(chan, chan->desc, chan->curr_hwdesc, state);
        else if (vdesc)
-               residue = stm32_mdma_desc_residue(chan,
-                                                 to_stm32_mdma_desc(vdesc), 0);
+               residue = stm32_mdma_desc_residue(chan, to_stm32_mdma_desc(vdesc), 0, state);
+
        dma_set_residue(state, residue);
 
        spin_unlock_irqrestore(&chan->vchan.lock, flags);
index 0dc9c65..aac52d9 100644 (file)
@@ -47,7 +47,6 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
 bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
 {
        switch (ctx_prio) {
-       case AMDGPU_CTX_PRIORITY_UNSET:
        case AMDGPU_CTX_PRIORITY_VERY_LOW:
        case AMDGPU_CTX_PRIORITY_LOW:
        case AMDGPU_CTX_PRIORITY_NORMAL:
@@ -55,6 +54,7 @@ bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
        case AMDGPU_CTX_PRIORITY_VERY_HIGH:
                return true;
        default:
+       case AMDGPU_CTX_PRIORITY_UNSET:
                return false;
        }
 }
@@ -64,7 +64,8 @@ amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
 {
        switch (ctx_prio) {
        case AMDGPU_CTX_PRIORITY_UNSET:
-               return DRM_SCHED_PRIORITY_UNSET;
+               pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL");
+               return DRM_SCHED_PRIORITY_NORMAL;
 
        case AMDGPU_CTX_PRIORITY_VERY_LOW:
                return DRM_SCHED_PRIORITY_MIN;
index 1221059..ba3a87c 100644 (file)
@@ -403,7 +403,10 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
                                continue;
                }
 
-               r = amdgpu_vm_clear_freed(adev, vm, NULL);
+               /* Reserve fences for two SDMA page table updates */
+               r = dma_resv_reserve_fences(resv, 2);
+               if (!r)
+                       r = amdgpu_vm_clear_freed(adev, vm, NULL);
                if (!r)
                        r = amdgpu_vm_handle_moved(adev, vm);
 
index da4be0b..8eee5d7 100644 (file)
@@ -142,6 +142,10 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev)
        int r;
        int size;
 
+       /* SI HW does not have doorbells, skip allocation */
+       if (adev->doorbell.num_kernel_doorbells == 0)
+               return 0;
+
        /* Reserve first num_kernel_doorbells (page-aligned) for kernel ops */
        size = ALIGN(adev->doorbell.num_kernel_doorbells * sizeof(u32), PAGE_SIZE);
 
index f3ee83c..d28e21b 100644 (file)
@@ -252,7 +252,7 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
        struct amdgpu_res_cursor cursor;
 
-       if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
+       if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM)
                return false;
 
        amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
index f5daadc..82f2599 100644 (file)
@@ -1090,7 +1090,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
                        struct drm_gem_object *gobj = dma_buf->priv;
                        struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
 
-                       if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
+                       if (abo->tbo.resource &&
+                           abo->tbo.resource->mem_type == TTM_PL_VRAM)
                                bo = gem_to_amdgpu_bo(gobj);
                }
                mem = bo->tbo.resource;
index 3a9077b..d08e60d 100644 (file)
@@ -1262,6 +1262,9 @@ static void disable_vbios_mode_if_required(
                if (stream == NULL)
                        continue;
 
+               if (stream->apply_seamless_boot_optimization)
+                       continue;
+
                // only looking for first odm pipe
                if (pipe->prev_odm_pipe)
                        continue;
index f448b90..84148a7 100644 (file)
@@ -692,7 +692,7 @@ static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge)
        return container_of(bridge, struct ti_sn65dsi86, bridge);
 }
 
-static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
+static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86 *pdata)
 {
        int val;
        struct mipi_dsi_host *host;
@@ -707,7 +707,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
        if (!host)
                return -EPROBE_DEFER;
 
-       dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+       dsi = devm_mipi_dsi_device_register_full(&adev->dev, host, &info);
        if (IS_ERR(dsi))
                return PTR_ERR(dsi);
 
@@ -725,7 +725,7 @@ static int ti_sn_attach_host(struct ti_sn65dsi86 *pdata)
 
        pdata->dsi = dsi;
 
-       return devm_mipi_dsi_attach(dev, dsi);
+       return devm_mipi_dsi_attach(&adev->dev, dsi);
 }
 
 static int ti_sn_bridge_attach(struct drm_bridge *bridge,
@@ -1298,9 +1298,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
        struct device_node *np = pdata->dev->of_node;
        int ret;
 
-       pdata->next_bridge = devm_drm_of_get_bridge(pdata->dev, np, 1, 0);
+       pdata->next_bridge = devm_drm_of_get_bridge(&adev->dev, np, 1, 0);
        if (IS_ERR(pdata->next_bridge))
-               return dev_err_probe(pdata->dev, PTR_ERR(pdata->next_bridge),
+               return dev_err_probe(&adev->dev, PTR_ERR(pdata->next_bridge),
                                     "failed to create panel bridge\n");
 
        ti_sn_bridge_parse_lanes(pdata, np);
@@ -1319,9 +1319,9 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
 
        drm_bridge_add(&pdata->bridge);
 
-       ret = ti_sn_attach_host(pdata);
+       ret = ti_sn_attach_host(adev, pdata);
        if (ret) {
-               dev_err_probe(pdata->dev, ret, "failed to attach dsi host\n");
+               dev_err_probe(&adev->dev, ret, "failed to attach dsi host\n");
                goto err_remove_bridge;
        }
 
index 292e38e..60794fc 100644 (file)
@@ -290,7 +290,8 @@ static int
 update_connector_routing(struct drm_atomic_state *state,
                         struct drm_connector *connector,
                         struct drm_connector_state *old_connector_state,
-                        struct drm_connector_state *new_connector_state)
+                        struct drm_connector_state *new_connector_state,
+                        bool added_by_user)
 {
        const struct drm_connector_helper_funcs *funcs;
        struct drm_encoder *new_encoder;
@@ -339,9 +340,13 @@ update_connector_routing(struct drm_atomic_state *state,
         * there's a chance the connector may have been destroyed during the
         * process, but it's better to ignore that then cause
         * drm_atomic_helper_resume() to fail.
+        *
+        * Last, we want to ignore connector registration when the connector
+        * was not pulled in the atomic state by user-space (ie, was pulled
+        * in by the driver, e.g. when updating a DP-MST stream).
         */
        if (!state->duplicated && drm_connector_is_unregistered(connector) &&
-           crtc_state->active) {
+           added_by_user && crtc_state->active) {
                drm_dbg_atomic(connector->dev,
                               "[CONNECTOR:%d:%s] is not registered\n",
                               connector->base.id, connector->name);
@@ -620,7 +625,10 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
        struct drm_connector *connector;
        struct drm_connector_state *old_connector_state, *new_connector_state;
        int i, ret;
-       unsigned int connectors_mask = 0;
+       unsigned int connectors_mask = 0, user_connectors_mask = 0;
+
+       for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i)
+               user_connectors_mask |= BIT(i);
 
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                bool has_connectors =
@@ -685,7 +693,8 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
                 */
                ret = update_connector_routing(state, connector,
                                               old_connector_state,
-                                              new_connector_state);
+                                              new_connector_state,
+                                              BIT(i) & user_connectors_mask);
                if (ret)
                        return ret;
                if (old_connector_state->crtc) {
index 340da82..4b71040 100644 (file)
@@ -123,6 +123,9 @@ static const struct edid_quirk {
        /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
        EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
 
+       /* BenQ GW2765 */
+       EDID_QUIRK('B', 'N', 'Q', 0x78d6, EDID_QUIRK_FORCE_8BPC),
+
        /* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
        EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
 
index 6129b89..44a948b 100644 (file)
@@ -540,7 +540,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
        struct page **pages;
        struct folio *folio;
        struct folio_batch fbatch;
-       int i, j, npages;
+       long i, j, npages;
 
        if (WARN_ON(!obj->filp))
                return ERR_PTR(-EINVAL);
@@ -564,11 +564,13 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
 
        i = 0;
        while (i < npages) {
+               long nr;
                folio = shmem_read_folio_gfp(mapping, i,
                                mapping_gfp_mask(mapping));
                if (IS_ERR(folio))
                        goto fail;
-               for (j = 0; j < folio_nr_pages(folio); j++, i++)
+               nr = min(npages - i, folio_nr_pages(folio));
+               for (j = 0; j < nr; j++, i++)
                        pages[i] = folio_file_page(folio, i);
 
                /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
index 1b00ef2..80e4ec6 100644 (file)
@@ -2553,8 +2553,7 @@ static void intel_cx0_phy_lane_reset(struct drm_i915_private *i915,
                drm_warn(&i915->drm, "PHY %c failed to bring out of SOC reset after %dus.\n",
                         phy_name(phy), XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US);
 
-       intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port),
-                    XELPDP_LANE_PIPE_RESET(0) | XELPDP_LANE_PIPE_RESET(1),
+       intel_de_rmw(i915, XELPDP_PORT_BUF_CTL2(port), lane_pipe_reset,
                     lane_pipe_reset);
 
        if (__intel_de_wait_for_register(i915, XELPDP_PORT_BUF_CTL2(port),
index aa4d842..3106545 100644 (file)
@@ -235,6 +235,7 @@ static vm_fault_t i915_error_to_vmf_fault(int err)
        case 0:
        case -EAGAIN:
        case -ENOSPC: /* transient failure to evict? */
+       case -ENOBUFS: /* temporarily out of fences? */
        case -ERESTARTSYS:
        case -EINTR:
        case -EBUSY:
index 9f364df..0e0a41b 100644 (file)
@@ -239,6 +239,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
        npages = obj->size >> PAGE_SHIFT;
        mtk_gem->pages = kcalloc(npages, sizeof(*mtk_gem->pages), GFP_KERNEL);
        if (!mtk_gem->pages) {
+               sg_free_table(sgt);
                kfree(sgt);
                return -ENOMEM;
        }
@@ -248,12 +249,15 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
        mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP,
                               pgprot_writecombine(PAGE_KERNEL));
        if (!mtk_gem->kvaddr) {
+               sg_free_table(sgt);
                kfree(sgt);
                kfree(mtk_gem->pages);
                return -ENOMEM;
        }
-out:
+       sg_free_table(sgt);
        kfree(sgt);
+
+out:
        iosys_map_set_vaddr(map, mtk_gem->kvaddr);
 
        return 0;
index c2aaade..0be195f 100644 (file)
@@ -119,6 +119,7 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
        struct dpu_sw_pipe_cfg *pipe_cfg)
 {
        int src_width, src_height, dst_height, fps;
+       u64 plane_pixel_rate, plane_bit_rate;
        u64 plane_prefill_bw;
        u64 plane_bw;
        u32 hw_latency_lines;
@@ -136,13 +137,12 @@ static u64 _dpu_plane_calc_bw(const struct dpu_mdss_cfg *catalog,
        scale_factor = src_height > dst_height ?
                mult_frac(src_height, 1, dst_height) : 1;
 
-       plane_bw =
-               src_width * mode->vtotal * fps * fmt->bpp *
-               scale_factor;
+       plane_pixel_rate = src_width * mode->vtotal * fps;
+       plane_bit_rate = plane_pixel_rate * fmt->bpp;
 
-       plane_prefill_bw =
-               src_width * hw_latency_lines * fps * fmt->bpp *
-               scale_factor * mode->vtotal;
+       plane_bw = plane_bit_rate * scale_factor;
+
+       plane_prefill_bw = plane_bw * hw_latency_lines;
 
        if ((vbp+vpw) > hw_latency_lines)
                do_div(plane_prefill_bw, (vbp+vpw));
@@ -733,9 +733,11 @@ static int dpu_plane_check_inline_rotation(struct dpu_plane *pdpu,
 static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
                struct dpu_sw_pipe *pipe,
                struct dpu_sw_pipe_cfg *pipe_cfg,
-               const struct dpu_format *fmt)
+               const struct dpu_format *fmt,
+               const struct drm_display_mode *mode)
 {
        uint32_t min_src_size;
+       struct dpu_kms *kms = _dpu_plane_get_kms(&pdpu->base);
 
        min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
 
@@ -774,6 +776,12 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
                return -EINVAL;
        }
 
+       /* max clk check */
+       if (_dpu_plane_calc_clk(mode, pipe_cfg) > kms->perf.max_core_clk_rate) {
+               DPU_DEBUG_PLANE(pdpu, "plane exceeds max mdp core clk limits\n");
+               return -E2BIG;
+       }
+
        return 0;
 }
 
@@ -899,12 +907,13 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
                r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
        }
 
-       ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt);
+       ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode);
        if (ret)
                return ret;
 
        if (r_pipe->sspp) {
-               ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt);
+               ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, fmt,
+                                                 &crtc_state->adjusted_mode);
                if (ret)
                        return ret;
        }
index a7a5c7e..77a8d93 100644 (file)
@@ -1774,13 +1774,6 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
                return rc;
 
        while (--link_train_max_retries) {
-               rc = dp_ctrl_reinitialize_mainlink(ctrl);
-               if (rc) {
-                       DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
-                                       rc);
-                       break;
-               }
-
                training_step = DP_TRAINING_NONE;
                rc = dp_ctrl_setup_main_link(ctrl, &training_step);
                if (rc == 0) {
@@ -1832,6 +1825,12 @@ int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
                        /* stop link training before start re training  */
                        dp_ctrl_clear_training_pattern(ctrl);
                }
+
+               rc = dp_ctrl_reinitialize_mainlink(ctrl);
+               if (rc) {
+                       DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n", rc);
+                       break;
+               }
        }
 
        if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
index 4242712..6375daa 100644 (file)
@@ -1090,7 +1090,7 @@ int dp_link_process_request(struct dp_link *dp_link)
        } else if (dp_link_read_psr_error_status(link)) {
                DRM_ERROR("PSR IRQ_HPD received\n");
        } else if (dp_link_psr_capability_changed(link)) {
-               drm_dbg_dp(link->drm_dev, "PSR Capability changed");
+               drm_dbg_dp(link->drm_dev, "PSR Capability changed\n");
        } else {
                ret = dp_link_process_link_status_update(link);
                if (!ret) {
@@ -1107,7 +1107,7 @@ int dp_link_process_request(struct dp_link *dp_link)
                }
        }
 
-       drm_dbg_dp(link->drm_dev, "sink request=%#x",
+       drm_dbg_dp(link->drm_dev, "sink request=%#x\n",
                                dp_link->sink_request);
        return ret;
 }
index 5d9ec27..3d6fb70 100644 (file)
@@ -1082,9 +1082,21 @@ static void dsi_wait4video_done(struct msm_dsi_host *msm_host)
 
 static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
 {
+       u32 data;
+
        if (!(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
                return;
 
+       data = dsi_read(msm_host, REG_DSI_STATUS0);
+
+       /* if video mode engine is not busy, its because
+        * either timing engine was not turned on or the
+        * DSI controller has finished transmitting the video
+        * data already, so no need to wait in those cases
+        */
+       if (!(data & DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY))
+               return;
+
        if (msm_host->power_on && msm_host->enabled) {
                dsi_wait4video_done(msm_host);
                /* delay 4 ms to skip BLLP */
@@ -1894,10 +1906,9 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
        }
 
        msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
-       if (msm_host->irq < 0) {
-               ret = msm_host->irq;
-               dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
-               return ret;
+       if (!msm_host->irq) {
+               dev_err(&pdev->dev, "failed to get irq\n");
+               return -EINVAL;
        }
 
        /* do not autoenable, will be enabled later */
index 2e87dd6..348c66b 100644 (file)
@@ -511,7 +511,7 @@ static int mdss_remove(struct platform_device *pdev)
 static const struct msm_mdss_data msm8998_data = {
        .ubwc_enc_version = UBWC_1_0,
        .ubwc_dec_version = UBWC_1_0,
-       .highest_bank_bit = 1,
+       .highest_bank_bit = 2,
 };
 
 static const struct msm_mdss_data qcm2290_data = {
index 46b057f..3249e5c 100644 (file)
@@ -62,6 +62,18 @@ nvkm_uconn_uevent_gpio(struct nvkm_object *object, u64 token, u32 bits)
        return object->client->event(token, &args, sizeof(args.v0));
 }
 
+static bool
+nvkm_connector_is_dp_dms(u8 type)
+{
+       switch (type) {
+       case DCB_CONNECTOR_DMS59_DP0:
+       case DCB_CONNECTOR_DMS59_DP1:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static int
 nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
 {
@@ -101,7 +113,7 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
        if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_GPIO_LO;
        if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ) {
                /* TODO: support DP IRQ on ANX9805 and remove this hack. */
-               if (!outp->info.location)
+               if (!outp->info.location && !nvkm_connector_is_dp_dms(conn->info.type))
                        return -EINVAL;
        }
 
index 5ac9262..c9087f4 100644 (file)
@@ -1342,9 +1342,7 @@ static const struct panel_init_cmd starry_himax83102_j02_init_cmd[] = {
        _INIT_DCS_CMD(0xB1, 0x01, 0xBF, 0x11),
        _INIT_DCS_CMD(0xCB, 0x86),
        _INIT_DCS_CMD(0xD2, 0x3C, 0xFA),
-       _INIT_DCS_CMD(0xE9, 0xC5),
-       _INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01),
-       _INIT_DCS_CMD(0xE9, 0x3F),
+       _INIT_DCS_CMD(0xD3, 0x00, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x0C, 0x01),
        _INIT_DCS_CMD(0xE7, 0x02, 0x00, 0x28, 0x01, 0x7E, 0x0F, 0x7E, 0x10, 0xA0, 0x00, 0x00, 0x20, 0x40, 0x50, 0x40),
        _INIT_DCS_CMD(0xBD, 0x02),
        _INIT_DCS_CMD(0xD8, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0, 0xFF, 0xFF, 0xBF, 0xFE, 0xAA, 0xA0),
index feb665d..95c8472 100644 (file)
@@ -976,32 +976,6 @@ static const struct panel_desc auo_b116xak01 = {
        },
 };
 
-static const struct drm_display_mode auo_b116xw03_mode = {
-       .clock = 70589,
-       .hdisplay = 1366,
-       .hsync_start = 1366 + 40,
-       .hsync_end = 1366 + 40 + 40,
-       .htotal = 1366 + 40 + 40 + 32,
-       .vdisplay = 768,
-       .vsync_start = 768 + 10,
-       .vsync_end = 768 + 10 + 12,
-       .vtotal = 768 + 10 + 12 + 6,
-       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc auo_b116xw03 = {
-       .modes = &auo_b116xw03_mode,
-       .num_modes = 1,
-       .bpc = 6,
-       .size = {
-               .width = 256,
-               .height = 144,
-       },
-       .delay = {
-               .enable = 400,
-       },
-};
-
 static const struct drm_display_mode auo_b133han05_mode = {
        .clock = 142600,
        .hdisplay = 1920,
@@ -1726,9 +1700,6 @@ static const struct of_device_id platform_of_match[] = {
                .compatible = "auo,b116xa01",
                .data = &auo_b116xak01,
        }, {
-               .compatible = "auo,b116xw03",
-               .data = &auo_b116xw03,
-       }, {
                .compatible = "auo,b133han05",
                .data = &auo_b133han05,
        }, {
index 95959dc..dd7928d 100644 (file)
@@ -919,6 +919,38 @@ static const struct panel_desc auo_b101xtn01 = {
        },
 };
 
+static const struct drm_display_mode auo_b116xw03_mode = {
+       .clock = 70589,
+       .hdisplay = 1366,
+       .hsync_start = 1366 + 40,
+       .hsync_end = 1366 + 40 + 40,
+       .htotal = 1366 + 40 + 40 + 32,
+       .vdisplay = 768,
+       .vsync_start = 768 + 10,
+       .vsync_end = 768 + 10 + 12,
+       .vtotal = 768 + 10 + 12 + 6,
+       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc auo_b116xw03 = {
+       .modes = &auo_b116xw03_mode,
+       .num_modes = 1,
+       .bpc = 6,
+       .size = {
+               .width = 256,
+               .height = 144,
+       },
+       .delay = {
+               .prepare = 1,
+               .enable = 200,
+               .disable = 200,
+               .unprepare = 500,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
 static const struct display_timing auo_g070vvn01_timings = {
        .pixelclock = { 33300000, 34209000, 45000000 },
        .hactive = { 800, 800, 800 },
@@ -4103,6 +4135,9 @@ static const struct of_device_id platform_of_match[] = {
                .compatible = "auo,b101xtn01",
                .data = &auo_b101xtn01,
        }, {
+               .compatible = "auo,b116xw03",
+               .data = &auo_b116xw03,
+       }, {
                .compatible = "auo,g070vvn01",
                .data = &auo_g070vvn01,
        }, {
index 506371c..5a3a622 100644 (file)
@@ -929,7 +929,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 
                if (next) {
                        next->s_fence->scheduled.timestamp =
-                               job->s_fence->finished.timestamp;
+                               dma_fence_timestamp(&job->s_fence->finished);
                        /* start TO timer for next job */
                        drm_sched_start_timeout(sched);
                }
index ff86ba1..8ea120e 100644 (file)
@@ -745,7 +745,7 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
 
                ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
                if (ret) {
-                       drm_err(dev, "could not acquire memory range %pr: %d\n", &res, ret);
+                       drm_err(dev, "could not acquire memory range %pr: %d\n", res, ret);
                        return ERR_PTR(ret);
                }
 
index 7726a72..d48b391 100644 (file)
@@ -232,10 +232,6 @@ void ttm_device_fini(struct ttm_device *bdev)
        struct ttm_resource_manager *man;
        unsigned i;
 
-       man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
-       ttm_resource_manager_set_used(man, false);
-       ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
-
        mutex_lock(&ttm_global_mutex);
        list_del(&bdev->device_list);
        mutex_unlock(&ttm_global_mutex);
@@ -243,6 +239,10 @@ void ttm_device_fini(struct ttm_device *bdev)
        drain_workqueue(bdev->wq);
        destroy_workqueue(bdev->wq);
 
+       man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
+       ttm_resource_manager_set_used(man, false);
+       ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
+
        spin_lock(&bdev->lru_lock);
        for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
                if (list_empty(&man->lru[0]))
index c438535..2bfac3a 100644 (file)
@@ -34,6 +34,8 @@
 
 static void vmw_bo_release(struct vmw_bo *vbo)
 {
+       WARN_ON(vbo->tbo.base.funcs &&
+               kref_read(&vbo->tbo.base.refcount) != 0);
        vmw_bo_unmap(vbo);
        drm_gem_object_release(&vbo->tbo.base);
 }
@@ -497,7 +499,7 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
                if (!(flags & drm_vmw_synccpu_allow_cs)) {
                        atomic_dec(&vmw_bo->cpu_writers);
                }
-               vmw_user_bo_unref(vmw_bo);
+               vmw_user_bo_unref(&vmw_bo);
        }
 
        return ret;
@@ -539,7 +541,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
                        return ret;
 
                ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
-               vmw_user_bo_unref(vbo);
+               vmw_user_bo_unref(&vbo);
                if (unlikely(ret != 0)) {
                        if (ret == -ERESTARTSYS || ret == -EBUSY)
                                return -EBUSY;
@@ -612,7 +614,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
        }
 
        *out = to_vmw_bo(gobj);
-       ttm_bo_get(&(*out)->tbo);
 
        return 0;
 }
index 1d433fc..0d496dc 100644 (file)
@@ -195,12 +195,19 @@ static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
        return buf;
 }
 
-static inline void vmw_user_bo_unref(struct vmw_bo *vbo)
+static inline struct vmw_bo *vmw_user_bo_ref(struct vmw_bo *vbo)
 {
-       if (vbo) {
-               ttm_bo_put(&vbo->tbo);
-               drm_gem_object_put(&vbo->tbo.base);
-       }
+       drm_gem_object_get(&vbo->tbo.base);
+       return vbo;
+}
+
+static inline void vmw_user_bo_unref(struct vmw_bo **buf)
+{
+       struct vmw_bo *tmp_buf = *buf;
+
+       *buf = NULL;
+       if (tmp_buf)
+               drm_gem_object_put(&tmp_buf->tbo.base);
 }
 
 static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
index c0b24d1..a7c0769 100644 (file)
@@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
         * for the new COTable. Initially pin the buffer object to make sure
         * we can use tryreserve without failure.
         */
-       ret = vmw_bo_create(dev_priv, &bo_params, &buf);
+       ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
        if (ret) {
                DRM_ERROR("Failed initializing new cotable MOB.\n");
                goto out_done;
@@ -502,7 +502,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
 
        vmw_resource_mob_attach(res);
        /* Let go of the old mob. */
-       vmw_bo_unreference(&old_buf);
+       vmw_user_bo_unref(&old_buf);
        res->id = vcotbl->type;
 
        ret = dma_resv_reserve_fences(bo->base.resv, 1);
@@ -521,7 +521,7 @@ out_map_new:
 out_wait:
        ttm_bo_unpin(bo);
        ttm_bo_unreserve(bo);
-       vmw_bo_unreference(&buf);
+       vmw_user_bo_unref(&buf);
 
 out_done:
        MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
index 58bfdf2..3cd5090 100644 (file)
@@ -853,6 +853,10 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
 /**
  * GEM related functionality - vmwgfx_gem.c
  */
+struct vmw_bo_params;
+int vmw_gem_object_create(struct vmw_private *vmw,
+                         struct vmw_bo_params *params,
+                         struct vmw_bo **p_vbo);
 extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
                                             struct drm_file *filp,
                                             uint32_t size,
index 98e0723..36987ef 100644 (file)
@@ -1151,7 +1151,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
                                 SVGAMobId *id,
                                 struct vmw_bo **vmw_bo_p)
 {
-       struct vmw_bo *vmw_bo;
+       struct vmw_bo *vmw_bo, *tmp_bo;
        uint32_t handle = *id;
        struct vmw_relocation *reloc;
        int ret;
@@ -1164,7 +1164,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
        }
        vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
        ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
-       vmw_user_bo_unref(vmw_bo);
+       tmp_bo = vmw_bo;
+       vmw_user_bo_unref(&tmp_bo);
        if (unlikely(ret != 0))
                return ret;
 
@@ -1206,7 +1207,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
                                   SVGAGuestPtr *ptr,
                                   struct vmw_bo **vmw_bo_p)
 {
-       struct vmw_bo *vmw_bo;
+       struct vmw_bo *vmw_bo, *tmp_bo;
        uint32_t handle = ptr->gmrId;
        struct vmw_relocation *reloc;
        int ret;
@@ -1220,7 +1221,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
        vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
                             VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
        ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
-       vmw_user_bo_unref(vmw_bo);
+       tmp_bo = vmw_bo;
+       vmw_user_bo_unref(&tmp_bo);
        if (unlikely(ret != 0))
                return ret;
 
@@ -1619,7 +1621,7 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
 {
        VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
        SVGA3dTextureState *last_state = (SVGA3dTextureState *)
-         ((unsigned long) header + header->size + sizeof(header));
+         ((unsigned long) header + header->size + sizeof(*header));
        SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
                ((unsigned long) header + sizeof(*cmd));
        struct vmw_resource *ctx;
index c0da89e..8b1eb00 100644 (file)
@@ -111,6 +111,20 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
        .vm_ops = &vmw_vm_ops,
 };
 
+int vmw_gem_object_create(struct vmw_private *vmw,
+                         struct vmw_bo_params *params,
+                         struct vmw_bo **p_vbo)
+{
+       int ret = vmw_bo_create(vmw, params, p_vbo);
+
+       if (ret != 0)
+               goto out_no_bo;
+
+       (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
+out_no_bo:
+       return ret;
+}
+
 int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
                                      struct drm_file *filp,
                                      uint32_t size,
@@ -126,12 +140,10 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
                .pin = false
        };
 
-       ret = vmw_bo_create(dev_priv, &params, p_vbo);
+       ret = vmw_gem_object_create(dev_priv, &params, p_vbo);
        if (ret != 0)
                goto out_no_bo;
 
-       (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
-
        ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle);
 out_no_bo:
        return ret;
index 1489ad7..818b7f1 100644 (file)
@@ -1471,8 +1471,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
        /* Reserve and switch the backing mob. */
        mutex_lock(&res->dev_priv->cmdbuf_mutex);
        (void) vmw_resource_reserve(res, false, true);
-       vmw_bo_unreference(&res->guest_memory_bo);
-       res->guest_memory_bo = vmw_bo_reference(bo_mob);
+       vmw_user_bo_unref(&res->guest_memory_bo);
+       res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
        res->guest_memory_offset = 0;
        vmw_resource_unreserve(res, false, false, false, NULL, 0);
        mutex_unlock(&res->dev_priv->cmdbuf_mutex);
@@ -1666,7 +1666,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
 err_out:
        /* vmw_user_lookup_handle takes one ref so does new_fb */
        if (bo)
-               vmw_user_bo_unref(bo);
+               vmw_user_bo_unref(&bo);
        if (surface)
                vmw_surface_unreference(&surface);
 
index fb85f24..c45b472 100644 (file)
@@ -451,7 +451,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
 
        ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
 
-       vmw_user_bo_unref(buf);
+       vmw_user_bo_unref(&buf);
 
 out_unlock:
        mutex_unlock(&overlay->mutex);
index 71eeabf..ca300c7 100644 (file)
@@ -141,7 +141,7 @@ static void vmw_resource_release(struct kref *kref)
                if (res->coherent)
                        vmw_bo_dirty_release(res->guest_memory_bo);
                ttm_bo_unreserve(bo);
-               vmw_bo_unreference(&res->guest_memory_bo);
+               vmw_user_bo_unref(&res->guest_memory_bo);
        }
 
        if (likely(res->hw_destroy != NULL)) {
@@ -338,7 +338,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
                return 0;
        }
 
-       ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
+       ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo);
        if (unlikely(ret != 0))
                goto out_no_bo;
 
@@ -457,11 +457,11 @@ void vmw_resource_unreserve(struct vmw_resource *res,
                        vmw_resource_mob_detach(res);
                        if (res->coherent)
                                vmw_bo_dirty_release(res->guest_memory_bo);
-                       vmw_bo_unreference(&res->guest_memory_bo);
+                       vmw_user_bo_unref(&res->guest_memory_bo);
                }
 
                if (new_guest_memory_bo) {
-                       res->guest_memory_bo = vmw_bo_reference(new_guest_memory_bo);
+                       res->guest_memory_bo = vmw_user_bo_ref(new_guest_memory_bo);
 
                        /*
                         * The validation code should already have added a
@@ -551,7 +551,7 @@ out_no_reserve:
        ttm_bo_put(val_buf->bo);
        val_buf->bo = NULL;
        if (guest_memory_dirty)
-               vmw_bo_unreference(&res->guest_memory_bo);
+               vmw_user_bo_unref(&res->guest_memory_bo);
 
        return ret;
 }
@@ -727,7 +727,7 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr,
                goto out_no_validate;
        else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
                WARN_ON_ONCE(vmw_resource_mob_attached(res));
-               vmw_bo_unreference(&res->guest_memory_bo);
+               vmw_user_bo_unref(&res->guest_memory_bo);
        }
 
        return 0;
index 1e81ff2..a01ca32 100644 (file)
@@ -180,7 +180,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
 
        res->guest_memory_size = size;
        if (byte_code) {
-               res->guest_memory_bo = vmw_bo_reference(byte_code);
+               res->guest_memory_bo = vmw_user_bo_ref(byte_code);
                res->guest_memory_offset = offset;
        }
        shader->size = size;
@@ -809,7 +809,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
                                    shader_type, num_input_sig,
                                    num_output_sig, tfile, shader_handle);
 out_bad_arg:
-       vmw_user_bo_unref(buffer);
+       vmw_user_bo_unref(&buffer);
        return ret;
 }
 
index 5db403e..3829be2 100644 (file)
@@ -686,9 +686,6 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
            container_of(base, struct vmw_user_surface, prime.base);
        struct vmw_resource *res = &user_srf->srf.res;
 
-       if (res->guest_memory_bo)
-               drm_gem_object_put(&res->guest_memory_bo->tbo.base);
-
        *p_base = NULL;
        vmw_resource_unreference(&res);
 }
@@ -855,23 +852,21 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
         * expect a backup buffer to be present.
         */
        if (dev_priv->has_mob && req->shareable) {
-               uint32_t backup_handle;
-
-               ret = vmw_gem_object_create_with_handle(dev_priv,
-                                                       file_priv,
-                                                       res->guest_memory_size,
-                                                       &backup_handle,
-                                                       &res->guest_memory_bo);
+               struct vmw_bo_params params = {
+                       .domain = VMW_BO_DOMAIN_SYS,
+                       .busy_domain = VMW_BO_DOMAIN_SYS,
+                       .bo_type = ttm_bo_type_device,
+                       .size = res->guest_memory_size,
+                       .pin = false
+               };
+
+               ret = vmw_gem_object_create(dev_priv,
+                                           &params,
+                                           &res->guest_memory_bo);
                if (unlikely(ret != 0)) {
                        vmw_resource_unreference(&res);
                        goto out_unlock;
                }
-               vmw_bo_reference(res->guest_memory_bo);
-               /*
-                * We don't expose the handle to the userspace and surface
-                * already holds a gem reference
-                */
-               drm_gem_handle_delete(file_priv, backup_handle);
        }
 
        tmp = vmw_resource_reference(&srf->res);
@@ -1512,7 +1507,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
                if (ret == 0) {
                        if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
                                VMW_DEBUG_USER("Surface backup buffer too small.\n");
-                               vmw_bo_unreference(&res->guest_memory_bo);
+                               vmw_user_bo_unref(&res->guest_memory_bo);
                                ret = -EINVAL;
                                goto out_unlock;
                        } else {
@@ -1526,8 +1521,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
                                                        res->guest_memory_size,
                                                        &backup_handle,
                                                        &res->guest_memory_bo);
-               if (ret == 0)
-                       vmw_bo_reference(res->guest_memory_bo);
        }
 
        if (unlikely(ret != 0)) {
index ff077df..a209d51 100644 (file)
@@ -4515,7 +4515,8 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                        goto hid_hw_init_fail;
        }
 
-       hidpp_connect_event(hidpp);
+       schedule_work(&hidpp->work);
+       flush_work(&hidpp->work);
 
        if (will_restart) {
                /* Reset the HID node state */
index 9601c06..2735cd5 100644 (file)
@@ -998,45 +998,29 @@ static int i2c_hid_core_resume(struct i2c_hid *ihid)
        return hid_driver_reset_resume(hid);
 }
 
-/**
- * __do_i2c_hid_core_initial_power_up() - First time power up of the i2c-hid device.
- * @ihid: The ihid object created during probe.
- *
- * This function is called at probe time.
- *
- * The initial power on is where we do some basic validation that the device
- * exists, where we fetch the HID descriptor, and where we create the actual
- * HID devices.
- *
- * Return: 0 or error code.
+/*
+ * Check that the device exists and parse the HID descriptor.
  */
-static int __do_i2c_hid_core_initial_power_up(struct i2c_hid *ihid)
+static int __i2c_hid_core_probe(struct i2c_hid *ihid)
 {
        struct i2c_client *client = ihid->client;
        struct hid_device *hid = ihid->hid;
        int ret;
 
-       ret = i2c_hid_core_power_up(ihid);
-       if (ret)
-               return ret;
-
        /* Make sure there is something at this address */
        ret = i2c_smbus_read_byte(client);
        if (ret < 0) {
                i2c_hid_dbg(ihid, "nothing at this address: %d\n", ret);
-               ret = -ENXIO;
-               goto err;
+               return -ENXIO;
        }
 
        ret = i2c_hid_fetch_hid_descriptor(ihid);
        if (ret < 0) {
                dev_err(&client->dev,
                        "Failed to fetch the HID Descriptor\n");
-               goto err;
+               return ret;
        }
 
-       enable_irq(client->irq);
-
        hid->version = le16_to_cpu(ihid->hdesc.bcdVersion);
        hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
        hid->product = le16_to_cpu(ihid->hdesc.wProductID);
@@ -1050,17 +1034,49 @@ static int __do_i2c_hid_core_initial_power_up(struct i2c_hid *ihid)
 
        ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);
 
+       return 0;
+}
+
+static int i2c_hid_core_register_hid(struct i2c_hid *ihid)
+{
+       struct i2c_client *client = ihid->client;
+       struct hid_device *hid = ihid->hid;
+       int ret;
+
+       enable_irq(client->irq);
+
        ret = hid_add_device(hid);
        if (ret) {
                if (ret != -ENODEV)
                        hid_err(client, "can't add hid device: %d\n", ret);
-               goto err;
+               disable_irq(client->irq);
+               return ret;
        }
 
        return 0;
+}
+
+static int i2c_hid_core_probe_panel_follower(struct i2c_hid *ihid)
+{
+       int ret;
+
+       ret = i2c_hid_core_power_up(ihid);
+       if (ret)
+               return ret;
 
-err:
+       ret = __i2c_hid_core_probe(ihid);
+       if (ret)
+               goto err_power_down;
+
+       ret = i2c_hid_core_register_hid(ihid);
+       if (ret)
+               goto err_power_down;
+
+       return 0;
+
+err_power_down:
        i2c_hid_core_power_down(ihid);
+
        return ret;
 }
 
@@ -1077,7 +1093,7 @@ static void ihid_core_panel_prepare_work(struct work_struct *work)
         * steps.
         */
        if (!hid->version)
-               ret = __do_i2c_hid_core_initial_power_up(ihid);
+               ret = i2c_hid_core_probe_panel_follower(ihid);
        else
                ret = i2c_hid_core_resume(ihid);
 
@@ -1136,7 +1152,6 @@ static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid)
        struct device *dev = &ihid->client->dev;
        int ret;
 
-       ihid->is_panel_follower = true;
        ihid->panel_follower.funcs = &i2c_hid_core_panel_follower_funcs;
 
        /*
@@ -1156,30 +1171,6 @@ static int i2c_hid_core_register_panel_follower(struct i2c_hid *ihid)
        return 0;
 }
 
-static int i2c_hid_core_initial_power_up(struct i2c_hid *ihid)
-{
-       /*
-        * If we're a panel follower, we'll register and do our initial power
-        * up when the panel turns on; otherwise we do it right away.
-        */
-       if (drm_is_panel_follower(&ihid->client->dev))
-               return i2c_hid_core_register_panel_follower(ihid);
-       else
-               return __do_i2c_hid_core_initial_power_up(ihid);
-}
-
-static void i2c_hid_core_final_power_down(struct i2c_hid *ihid)
-{
-       /*
-        * If we're a follower, the act of unfollowing will cause us to be
-        * powered down. Otherwise we need to manually do it.
-        */
-       if (ihid->is_panel_follower)
-               drm_panel_remove_follower(&ihid->panel_follower);
-       else
-               i2c_hid_core_suspend(ihid, true);
-}
-
 int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
                       u16 hid_descriptor_address, u32 quirks)
 {
@@ -1211,6 +1202,7 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
        ihid->ops = ops;
        ihid->client = client;
        ihid->wHIDDescRegister = cpu_to_le16(hid_descriptor_address);
+       ihid->is_panel_follower = drm_is_panel_follower(&client->dev);
 
        init_waitqueue_head(&ihid->wait);
        mutex_init(&ihid->reset_lock);
@@ -1224,14 +1216,10 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
                return ret;
        device_enable_async_suspend(&client->dev);
 
-       ret = i2c_hid_init_irq(client);
-       if (ret < 0)
-               goto err_buffers_allocated;
-
        hid = hid_allocate_device();
        if (IS_ERR(hid)) {
                ret = PTR_ERR(hid);
-               goto err_irq;
+               goto err_free_buffers;
        }
 
        ihid->hid = hid;
@@ -1242,19 +1230,42 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops,
        hid->bus = BUS_I2C;
        hid->initial_quirks = quirks;
 
-       ret = i2c_hid_core_initial_power_up(ihid);
+       /* Power on and probe unless device is a panel follower. */
+       if (!ihid->is_panel_follower) {
+               ret = i2c_hid_core_power_up(ihid);
+               if (ret < 0)
+                       goto err_destroy_device;
+
+               ret = __i2c_hid_core_probe(ihid);
+               if (ret < 0)
+                       goto err_power_down;
+       }
+
+       ret = i2c_hid_init_irq(client);
+       if (ret < 0)
+               goto err_power_down;
+
+       /*
+        * If we're a panel follower, we'll register when the panel turns on;
+        * otherwise we do it right away.
+        */
+       if (ihid->is_panel_follower)
+               ret = i2c_hid_core_register_panel_follower(ihid);
+       else
+               ret = i2c_hid_core_register_hid(ihid);
        if (ret)
-               goto err_mem_free;
+               goto err_free_irq;
 
        return 0;
 
-err_mem_free:
-       hid_destroy_device(hid);
-
-err_irq:
+err_free_irq:
        free_irq(client->irq, ihid);
-
-err_buffers_allocated:
+err_power_down:
+       if (!ihid->is_panel_follower)
+               i2c_hid_core_power_down(ihid);
+err_destroy_device:
+       hid_destroy_device(hid);
+err_free_buffers:
        i2c_hid_free_buffers(ihid);
 
        return ret;
@@ -1266,7 +1277,14 @@ void i2c_hid_core_remove(struct i2c_client *client)
        struct i2c_hid *ihid = i2c_get_clientdata(client);
        struct hid_device *hid;
 
-       i2c_hid_core_final_power_down(ihid);
+       /*
+        * If we're a follower, the act of unfollowing will cause us to be
+        * powered down. Otherwise we need to manually do it.
+        */
+       if (ihid->is_panel_follower)
+               drm_panel_remove_follower(&ihid->panel_follower);
+       else
+               i2c_hid_core_suspend(ihid, true);
 
        hid = ihid->hid;
        hid_destroy_device(hid);
index 66dc5f9..8311e10 100644 (file)
@@ -610,7 +610,8 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata,
 
        flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size,
                                                &flat_buf->daddr,
-                                               DMA_FROM_DEVICE, GFP_KERNEL);
+                                               DMA_FROM_DEVICE,
+                                               GFP_KERNEL | __GFP_NOWARN);
        if (!flat_buf->vaddr) {
                kfree(flat_buf);
                return -ENOMEM;
@@ -1174,16 +1175,6 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
        }
 
        /*
-        * In sysFS mode we can have multiple writers per sink.  Since this
-        * sink is already enabled no memory is needed and the HW need not be
-        * touched, even if the buffer size has changed.
-        */
-       if (drvdata->mode == CS_MODE_SYSFS) {
-               atomic_inc(&csdev->refcnt);
-               goto out;
-       }
-
-       /*
         * If we don't have a buffer or it doesn't match the requested size,
         * use the buffer allocated above. Otherwise reuse the existing buffer.
         */
@@ -1204,7 +1195,7 @@ out:
 
 static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
 {
-       int ret;
+       int ret = 0;
        unsigned long flags;
        struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
        struct etr_buf *sysfs_buf = tmc_etr_get_sysfs_buffer(csdev);
@@ -1213,12 +1204,24 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
                return PTR_ERR(sysfs_buf);
 
        spin_lock_irqsave(&drvdata->spinlock, flags);
+
+       /*
+        * In sysFS mode we can have multiple writers per sink.  Since this
+        * sink is already enabled no memory is needed and the HW need not be
+        * touched, even if the buffer size has changed.
+        */
+       if (drvdata->mode == CS_MODE_SYSFS) {
+               atomic_inc(&csdev->refcnt);
+               goto out;
+       }
+
        ret = tmc_etr_enable_hw(drvdata, sysfs_buf);
        if (!ret) {
                drvdata->mode = CS_MODE_SYSFS;
                atomic_inc(&csdev->refcnt);
        }
 
+out:
        spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
        if (!ret)
index 69d1103..b64fd36 100644 (file)
@@ -177,6 +177,7 @@ struct ad7192_chip_info {
 struct ad7192_state {
        const struct ad7192_chip_info   *chip_info;
        struct regulator                *avdd;
+       struct regulator                *vref;
        struct clk                      *mclk;
        u16                             int_vref_mv;
        u32                             fclk;
@@ -1008,10 +1009,30 @@ static int ad7192_probe(struct spi_device *spi)
        if (ret)
                return dev_err_probe(&spi->dev, ret, "Failed to enable specified DVdd supply\n");
 
-       ret = regulator_get_voltage(st->avdd);
-       if (ret < 0) {
-               dev_err(&spi->dev, "Device tree error, reference voltage undefined\n");
-               return ret;
+       st->vref = devm_regulator_get_optional(&spi->dev, "vref");
+       if (IS_ERR(st->vref)) {
+               if (PTR_ERR(st->vref) != -ENODEV)
+                       return PTR_ERR(st->vref);
+
+               ret = regulator_get_voltage(st->avdd);
+               if (ret < 0)
+                       return dev_err_probe(&spi->dev, ret,
+                                            "Device tree error, AVdd voltage undefined\n");
+       } else {
+               ret = regulator_enable(st->vref);
+               if (ret) {
+                       dev_err(&spi->dev, "Failed to enable specified Vref supply\n");
+                       return ret;
+               }
+
+               ret = devm_add_action_or_reset(&spi->dev, ad7192_reg_disable, st->vref);
+               if (ret)
+                       return ret;
+
+               ret = regulator_get_voltage(st->vref);
+               if (ret < 0)
+                       return dev_err_probe(&spi->dev, ret,
+                                            "Device tree error, Vref voltage undefined\n");
        }
        st->int_vref_mv = ret / 1000;
 
index f5a0fc9..fff6e5a 100644 (file)
@@ -38,8 +38,8 @@
 #define IMX8QXP_ADR_ADC_FCTRL          0x30
 #define IMX8QXP_ADR_ADC_SWTRIG         0x34
 #define IMX8QXP_ADR_ADC_TCTRL(tid)     (0xc0 + (tid) * 4)
-#define IMX8QXP_ADR_ADC_CMDH(cid)      (0x100 + (cid) * 8)
-#define IMX8QXP_ADR_ADC_CMDL(cid)      (0x104 + (cid) * 8)
+#define IMX8QXP_ADR_ADC_CMDL(cid)      (0x100 + (cid) * 8)
+#define IMX8QXP_ADR_ADC_CMDH(cid)      (0x104 + (cid) * 8)
 #define IMX8QXP_ADR_ADC_RESFIFO                0x300
 #define IMX8QXP_ADR_ADC_TST            0xffc
 
index 877f912..397544f 100644 (file)
@@ -24,6 +24,8 @@ config AD74413R
        depends on GPIOLIB && SPI
        select REGMAP_SPI
        select CRC8
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
        help
          Say yes here to build support for Analog Devices AD74412R/AD74413R
          quad-channel software configurable input/output solution.
index b72d39f..6bfe5d6 100644 (file)
@@ -190,8 +190,11 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
        /*
         * Ignore samples if the buffer is not set: it is needed if the ODR is
         * set but the buffer is not enabled yet.
+        *
+        * Note: iio_device_claim_buffer_mode() returns -EBUSY if the buffer
+        * is not enabled.
         */
-       if (!iio_buffer_enabled(indio_dev))
+       if (iio_device_claim_buffer_mode(indio_dev) < 0)
                return 0;
 
        out = (s16 *)st->samples;
@@ -210,6 +213,7 @@ int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
        iio_push_to_buffers_with_timestamp(indio_dev, st->samples,
                                           timestamp + delta);
 
+       iio_device_release_buffer_mode(indio_dev);
        return 0;
 }
 EXPORT_SYMBOL_GPL(cros_ec_sensors_push_data);
index d5ea1a1..a492e8f 100644 (file)
@@ -140,8 +140,8 @@ enum ad3552r_ch_vref_select {
 };
 
 enum ad3542r_id {
-       AD3542R_ID = 0x4008,
-       AD3552R_ID = 0x4009,
+       AD3542R_ID = 0x4009,
+       AD3552R_ID = 0x4008,
 };
 
 enum ad3552r_ch_output_range {
index 6355c1f..9292307 100644 (file)
@@ -351,9 +351,9 @@ static int admv1013_update_mixer_vgate(struct admv1013_state *st)
        if (vcm < 0)
                return vcm;
 
-       if (vcm < 1800000)
+       if (vcm <= 1800000)
                mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
-       else if (vcm > 1800000 && vcm < 2600000)
+       else if (vcm > 1800000 && vcm <= 2600000)
                mixer_vgate = (2375 * vcm / 1000000 + 125) / 100;
        else
                return -EINVAL;
index fa79b1a..83e53ac 100644 (file)
@@ -2,6 +2,8 @@
 
 config BOSCH_BNO055
        tristate
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
 
 config BOSCH_BNO055_SERIAL
        tristate "Bosch BNO055 attached via UART"
index 3a52b09..fdf763a 100644 (file)
@@ -1513,7 +1513,6 @@ static int vcnl4040_write_event_config(struct iio_dev *indio_dev,
 
 out:
        mutex_unlock(&data->vcnl4000_lock);
-       data->chip_spec->set_power_state(data, data->ps_int || data->als_int);
 
        return ret;
 }
index 6089f3f..a2ef137 100644 (file)
@@ -2179,7 +2179,7 @@ int bmp280_common_probe(struct device *dev,
         * however as it happens, the BMP085 shares the chip ID of BMP180
         * so we look for an IRQ if we have that.
         */
-       if (irq > 0 || (chip_id  == BMP180_CHIP_ID)) {
+       if (irq > 0 && (chip_id  == BMP180_CHIP_ID)) {
                ret = bmp085_fetch_eoc_irq(dev, name, irq, data);
                if (ret)
                        return ret;
index b10dbf5..1ff091b 100644 (file)
@@ -57,8 +57,8 @@
 #define  DPS310_RESET_MAGIC    0x09
 #define DPS310_COEF_BASE       0x10
 
-/* Make sure sleep time is <= 20ms for usleep_range */
-#define DPS310_POLL_SLEEP_US(t)                min(20000, (t) / 8)
+/* Make sure sleep time is <= 30ms for usleep_range */
+#define DPS310_POLL_SLEEP_US(t)                min(30000, (t) / 8)
 /* Silently handle error in rate value here */
 #define DPS310_POLL_TIMEOUT_US(rc)     ((rc) <= 0 ? 1000000 : 1000000 / (rc))
 
@@ -402,8 +402,8 @@ static int dps310_reset_wait(struct dps310_data *data)
        if (rc)
                return rc;
 
-       /* Wait for device chip access: 2.5ms in specification */
-       usleep_range(2500, 12000);
+       /* Wait for device chip access: 15ms in specification */
+       usleep_range(15000, 55000);
        return 0;
 }
 
index 627497e..2fc706f 100644 (file)
@@ -76,7 +76,7 @@ static bool ms5611_prom_is_valid(u16 *prom, size_t len)
 
        crc = (crc >> 12) & 0x000F;
 
-       return crc_orig != 0x0000 && crc == crc_orig;
+       return crc == crc_orig;
 }
 
 static int ms5611_read_prom(struct iio_dev *indio_dev)
index 5bd791b..bdff91f 100644 (file)
@@ -759,14 +759,14 @@ static irqreturn_t irsd200_trigger_handler(int irq, void *pollf)
 {
        struct iio_dev *indio_dev = ((struct iio_poll_func *)pollf)->indio_dev;
        struct irsd200_data *data = iio_priv(indio_dev);
-       s16 buf = 0;
+       s64 buf[2] = {};
        int ret;
 
-       ret = irsd200_read_data(data, &buf);
+       ret = irsd200_read_data(data, (s16 *)buf);
        if (ret)
                goto end;
 
-       iio_push_to_buffers_with_timestamp(indio_dev, &buf,
+       iio_push_to_buffers_with_timestamp(indio_dev, buf,
                                           iio_get_time_ns(indio_dev));
 
 end:
index ede3805..f5c2156 100644 (file)
@@ -130,6 +130,7 @@ static const struct xpad_device {
        { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
        { 0x03eb, 0xff01, "Wooting One (Legacy)", 0, XTYPE_XBOX360 },
        { 0x03eb, 0xff02, "Wooting Two (Legacy)", 0, XTYPE_XBOX360 },
+       { 0x03f0, 0x0495, "HyperX Clutch Gladiate", 0, XTYPE_XBOXONE },
        { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
        { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
@@ -272,6 +273,7 @@ static const struct xpad_device {
        { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
        { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
        { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
+       { 0x11ff, 0x0511, "PXN V900", 0, XTYPE_XBOX360 },
        { 0x1209, 0x2882, "Ardwiino Controller", 0, XTYPE_XBOX360 },
        { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
        { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
@@ -459,6 +461,7 @@ static const struct usb_device_id xpad_table[] = {
        { USB_INTERFACE_INFO('X', 'B', 0) },    /* Xbox USB-IF not-approved class */
        XPAD_XBOX360_VENDOR(0x0079),            /* GPD Win 2 controller */
        XPAD_XBOX360_VENDOR(0x03eb),            /* Wooting Keyboards (Legacy) */
+       XPAD_XBOXONE_VENDOR(0x03f0),            /* HP HyperX Xbox One controllers */
        XPAD_XBOX360_VENDOR(0x044f),            /* Thrustmaster Xbox 360 controllers */
        XPAD_XBOX360_VENDOR(0x045e),            /* Microsoft Xbox 360 controllers */
        XPAD_XBOXONE_VENDOR(0x045e),            /* Microsoft Xbox One controllers */
@@ -477,6 +480,7 @@ static const struct usb_device_id xpad_table[] = {
        XPAD_XBOX360_VENDOR(0x1038),            /* SteelSeries controllers */
        XPAD_XBOXONE_VENDOR(0x10f5),            /* Turtle Beach Controllers */
        XPAD_XBOX360_VENDOR(0x11c9),            /* Nacon GC100XF */
+       XPAD_XBOX360_VENDOR(0x11ff),            /* PXN V900 */
        XPAD_XBOX360_VENDOR(0x1209),            /* Ardwiino Controllers */
        XPAD_XBOX360_VENDOR(0x12ab),            /* Xbox 360 dance pads */
        XPAD_XBOX360_VENDOR(0x1430),            /* RedOctane Xbox 360 controllers */
index c1c733a..db2ba89 100644 (file)
@@ -425,6 +425,7 @@ static void powermate_disconnect(struct usb_interface *intf)
                pm->requires_update = 0;
                usb_kill_urb(pm->irq);
                input_unregister_device(pm->input);
+               usb_kill_urb(pm->config);
                usb_free_urb(pm->irq);
                usb_free_urb(pm->config);
                powermate_free_buffers(interface_to_usbdev(intf), pm);
index 2118b20..4e38229 100644 (file)
@@ -2114,6 +2114,7 @@ static int elantech_setup_ps2(struct psmouse *psmouse,
        psmouse->protocol_handler = elantech_process_byte;
        psmouse->disconnect = elantech_disconnect;
        psmouse->reconnect = elantech_reconnect;
+       psmouse->fast_reconnect = NULL;
        psmouse->pktsize = info->hw_version > 1 ? 6 : 4;
 
        return 0;
index 7b13de9..2a2459b 100644 (file)
@@ -5,7 +5,6 @@
 
 #define pr_fmt(fmt)            KBUILD_MODNAME ": " fmt
 
-#include <linux/delay.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/libps2.h>
@@ -119,18 +118,13 @@ static psmouse_ret_t psmouse_smbus_process_byte(struct psmouse *psmouse)
        return PSMOUSE_FULL_PACKET;
 }
 
-static void psmouse_activate_smbus_mode(struct psmouse_smbus_dev *smbdev)
-{
-       if (smbdev->need_deactivate) {
-               psmouse_deactivate(smbdev->psmouse);
-               /* Give the device time to switch into SMBus mode */
-               msleep(30);
-       }
-}
-
 static int psmouse_smbus_reconnect(struct psmouse *psmouse)
 {
-       psmouse_activate_smbus_mode(psmouse->private);
+       struct psmouse_smbus_dev *smbdev = psmouse->private;
+
+       if (smbdev->need_deactivate)
+               psmouse_deactivate(psmouse);
+
        return 0;
 }
 
@@ -263,7 +257,8 @@ int psmouse_smbus_init(struct psmouse *psmouse,
                }
        }
 
-       psmouse_activate_smbus_mode(smbdev);
+       if (need_deactivate)
+               psmouse_deactivate(psmouse);
 
        psmouse->private = smbdev;
        psmouse->protocol_handler = psmouse_smbus_process_byte;
index ada299e..22d16d8 100644 (file)
@@ -1623,6 +1623,7 @@ static int synaptics_init_ps2(struct psmouse *psmouse,
        psmouse->set_rate = synaptics_set_rate;
        psmouse->disconnect = synaptics_disconnect;
        psmouse->reconnect = synaptics_reconnect;
+       psmouse->fast_reconnect = NULL;
        psmouse->cleanup = synaptics_reset;
        /* Synaptics can usually stay in sync without extra help */
        psmouse->resync_time = 0;
@@ -1752,6 +1753,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse,
                psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) &&
                !SYN_CAP_EXT_BUTTONS_STICK(info->ext_cap_10);
        const struct rmi_device_platform_data pdata = {
+               .reset_delay_ms = 30,
                .sensor_pdata = {
                        .sensor_type = rmi_sensor_touchpad,
                        .axis_align.flip_y = true,
index 7059a27..b0b099b 100644 (file)
@@ -235,12 +235,29 @@ static void rmi_smb_clear_state(struct rmi_smb_xport *rmi_smb)
 
 static int rmi_smb_enable_smbus_mode(struct rmi_smb_xport *rmi_smb)
 {
-       int retval;
+       struct i2c_client *client = rmi_smb->client;
+       int smbus_version;
+
+       /*
+        * psmouse driver resets the controller, we only need to wait
+        * to give the firmware chance to fully reinitialize.
+        */
+       if (rmi_smb->xport.pdata.reset_delay_ms)
+               msleep(rmi_smb->xport.pdata.reset_delay_ms);
 
        /* we need to get the smbus version to activate the touchpad */
-       retval = rmi_smb_get_version(rmi_smb);
-       if (retval < 0)
-               return retval;
+       smbus_version = rmi_smb_get_version(rmi_smb);
+       if (smbus_version < 0)
+               return smbus_version;
+
+       rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
+               smbus_version);
+
+       if (smbus_version != 2 && smbus_version != 3) {
+               dev_err(&client->dev, "Unrecognized SMB version %d\n",
+                               smbus_version);
+               return -ENODEV;
+       }
 
        return 0;
 }
@@ -253,11 +270,10 @@ static int rmi_smb_reset(struct rmi_transport_dev *xport, u16 reset_addr)
        rmi_smb_clear_state(rmi_smb);
 
        /*
-        * we do not call the actual reset command, it has to be handled in
-        * PS/2 or there will be races between PS/2 and SMBus.
-        * PS/2 should ensure that a psmouse_reset is called before
-        * intializing the device and after it has been removed to be in a known
-        * state.
+        * We do not call the actual reset command, it has to be handled in
+        * PS/2 or there will be races between PS/2 and SMBus. PS/2 should
+        * ensure that a psmouse_reset is called before initializing the
+        * device and after it has been removed to be in a known state.
         */
        return rmi_smb_enable_smbus_mode(rmi_smb);
 }
@@ -272,7 +288,6 @@ static int rmi_smb_probe(struct i2c_client *client)
 {
        struct rmi_device_platform_data *pdata = dev_get_platdata(&client->dev);
        struct rmi_smb_xport *rmi_smb;
-       int smbus_version;
        int error;
 
        if (!pdata) {
@@ -311,18 +326,9 @@ static int rmi_smb_probe(struct i2c_client *client)
        rmi_smb->xport.proto_name = "smb";
        rmi_smb->xport.ops = &rmi_smb_ops;
 
-       smbus_version = rmi_smb_get_version(rmi_smb);
-       if (smbus_version < 0)
-               return smbus_version;
-
-       rmi_dbg(RMI_DEBUG_XPORT, &client->dev, "Smbus version is %d",
-               smbus_version);
-
-       if (smbus_version != 2 && smbus_version != 3) {
-               dev_err(&client->dev, "Unrecognized SMB version %d\n",
-                               smbus_version);
-               return -ENODEV;
-       }
+       error = rmi_smb_enable_smbus_mode(rmi_smb);
+       if (error)
+               return error;
 
        i2c_set_clientdata(client, rmi_smb);
 
index 1724d6c..9c39553 100644 (file)
@@ -619,6 +619,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
                .driver_data = (void *)(SERIO_QUIRK_NOMUX)
        },
        {
+               /* Fujitsu Lifebook E5411 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU CLIENT COMPUTING LIMITED"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E5411"),
+               },
+               .driver_data = (void *)(SERIO_QUIRK_NOAUX)
+       },
+       {
                /* Gigabyte M912 */
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
index da9954d..af32fbe 100644 (file)
@@ -900,6 +900,25 @@ static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
                dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n");
                ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
                gpio_mapping = acpi_goodix_int_last_gpios;
+       } else if (ts->gpio_count == 1 && ts->gpio_int_idx == 0) {
+               /*
+                * On newer devices there is only 1 GpioInt resource and _PS0
+                * does the whole reset sequence for us.
+                */
+               acpi_device_fix_up_power(ACPI_COMPANION(dev));
+
+               /*
+                * Before the _PS0 call the int GPIO may have been in output
+                * mode and the call should have put the int GPIO in input mode,
+                * but the GPIO subsys cached state may still think it is
+                * in output mode, causing gpiochip_lock_as_irq() failure.
+                *
+                * Add a mapping for the int GPIO to make the
+                * gpiod_int = gpiod_get(..., GPIOD_IN) call succeed,
+                * which will explicitly set the direction to input.
+                */
+               ts->irq_pin_access_method = IRQ_PIN_ACCESS_NONE;
+               gpio_mapping = acpi_goodix_int_first_gpios;
        } else {
                dev_warn(dev, "Unexpected ACPI resources: gpio_count %d, gpio_int_idx %d\n",
                         ts->gpio_count, ts->gpio_int_idx);
index 3db4592..f407cce 100644 (file)
@@ -29,4 +29,8 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
 void gic_enable_of_quirks(const struct device_node *np,
                          const struct gic_quirk *quirks, void *data);
 
+#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING    (1 << 0)
+#define RDIST_FLAGS_RD_TABLES_PREALLOCATED     (1 << 1)
+#define RDIST_FLAGS_FORCE_NON_SHAREABLE        (1 << 2)
+
 #endif /* _IRQ_GIC_COMMON_H */
index e0c2b10..75a2dd5 100644 (file)
 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144      (1ULL << 2)
 #define ITS_FLAGS_FORCE_NON_SHAREABLE          (1ULL << 3)
 
-#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING    (1 << 0)
-#define RDIST_FLAGS_RD_TABLES_PREALLOCATED     (1 << 1)
-#define RDIST_FLAGS_FORCE_NON_SHAREABLE                (1 << 2)
-
 #define RD_LOCAL_LPI_ENABLED                    BIT(0)
 #define RD_LOCAL_PENDTABLE_PREALLOCATED         BIT(1)
 #define RD_LOCAL_MEMRESERVE_DONE                BIT(2)
@@ -4754,6 +4750,14 @@ static bool __maybe_unused its_enable_rk3588001(void *data)
        return true;
 }
 
+static bool its_set_non_coherent(void *data)
+{
+       struct its_node *its = data;
+
+       its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE;
+       return true;
+}
+
 static const struct gic_quirk its_quirks[] = {
 #ifdef CONFIG_CAVIUM_ERRATUM_22375
        {
@@ -4809,6 +4813,11 @@ static const struct gic_quirk its_quirks[] = {
        },
 #endif
        {
+               .desc   = "ITS: non-coherent attribute",
+               .property = "dma-noncoherent",
+               .init   = its_set_non_coherent,
+       },
+       {
        }
 };
 
@@ -4817,6 +4826,10 @@ static void its_enable_quirks(struct its_node *its)
        u32 iidr = readl_relaxed(its->base + GITS_IIDR);
 
        gic_enable_quirks(iidr, its_quirks, its);
+
+       if (is_of_node(its->fwnode_handle))
+               gic_enable_of_quirks(to_of_node(its->fwnode_handle),
+                                    its_quirks, its);
 }
 
 static int its_save_disable(void)
@@ -4952,7 +4965,7 @@ out_unmap:
        return NULL;
 }
 
-static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
+static int its_init_domain(struct its_node *its)
 {
        struct irq_domain *inner_domain;
        struct msi_domain_info *info;
@@ -4966,7 +4979,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
 
        inner_domain = irq_domain_create_hierarchy(its_parent,
                                                   its->msi_domain_flags, 0,
-                                                  handle, &its_domain_ops,
+                                                  its->fwnode_handle, &its_domain_ops,
                                                   info);
        if (!inner_domain) {
                kfree(info);
@@ -5017,8 +5030,7 @@ static int its_init_vpe_domain(void)
        return 0;
 }
 
-static int __init its_compute_its_list_map(struct resource *res,
-                                          void __iomem *its_base)
+static int __init its_compute_its_list_map(struct its_node *its)
 {
        int its_number;
        u32 ctlr;
@@ -5032,15 +5044,15 @@ static int __init its_compute_its_list_map(struct resource *res,
        its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
        if (its_number >= GICv4_ITS_LIST_MAX) {
                pr_err("ITS@%pa: No ITSList entry available!\n",
-                      &res->start);
+                      &its->phys_base);
                return -EINVAL;
        }
 
-       ctlr = readl_relaxed(its_base + GITS_CTLR);
+       ctlr = readl_relaxed(its->base + GITS_CTLR);
        ctlr &= ~GITS_CTLR_ITS_NUMBER;
        ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
-       writel_relaxed(ctlr, its_base + GITS_CTLR);
-       ctlr = readl_relaxed(its_base + GITS_CTLR);
+       writel_relaxed(ctlr, its->base + GITS_CTLR);
+       ctlr = readl_relaxed(its->base + GITS_CTLR);
        if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
                its_number = ctlr & GITS_CTLR_ITS_NUMBER;
                its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
@@ -5048,75 +5060,50 @@ static int __init its_compute_its_list_map(struct resource *res,
 
        if (test_and_set_bit(its_number, &its_list_map)) {
                pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
-                      &res->start, its_number);
+                      &its->phys_base, its_number);
                return -EINVAL;
        }
 
        return its_number;
 }
 
-static int __init its_probe_one(struct resource *res,
-                               struct fwnode_handle *handle, int numa_node)
+static int __init its_probe_one(struct its_node *its)
 {
-       struct its_node *its;
-       void __iomem *its_base;
-       u64 baser, tmp, typer;
+       u64 baser, tmp;
        struct page *page;
        u32 ctlr;
        int err;
 
-       its_base = its_map_one(res, &err);
-       if (!its_base)
-               return err;
-
-       pr_info("ITS %pR\n", res);
-
-       its = kzalloc(sizeof(*its), GFP_KERNEL);
-       if (!its) {
-               err = -ENOMEM;
-               goto out_unmap;
-       }
-
-       raw_spin_lock_init(&its->lock);
-       mutex_init(&its->dev_alloc_lock);
-       INIT_LIST_HEAD(&its->entry);
-       INIT_LIST_HEAD(&its->its_device_list);
-       typer = gic_read_typer(its_base + GITS_TYPER);
-       its->typer = typer;
-       its->base = its_base;
-       its->phys_base = res->start;
        if (is_v4(its)) {
-               if (!(typer & GITS_TYPER_VMOVP)) {
-                       err = its_compute_its_list_map(res, its_base);
+               if (!(its->typer & GITS_TYPER_VMOVP)) {
+                       err = its_compute_its_list_map(its);
                        if (err < 0)
-                               goto out_free_its;
+                               goto out;
 
                        its->list_nr = err;
 
                        pr_info("ITS@%pa: Using ITS number %d\n",
-                               &res->start, err);
+                               &its->phys_base, err);
                } else {
-                       pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
+                       pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base);
                }
 
                if (is_v4_1(its)) {
-                       u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
+                       u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
 
-                       its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
+                       its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K);
                        if (!its->sgir_base) {
                                err = -ENOMEM;
-                               goto out_free_its;
+                               goto out;
                        }
 
-                       its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
+                       its->mpidr = readl_relaxed(its->base + GITS_MPIDR);
 
                        pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
-                               &res->start, its->mpidr, svpet);
+                               &its->phys_base, its->mpidr, svpet);
                }
        }
 
-       its->numa_node = numa_node;
-
        page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
                                get_order(ITS_CMD_QUEUE_SZ));
        if (!page) {
@@ -5125,12 +5112,9 @@ static int __init its_probe_one(struct resource *res,
        }
        its->cmd_base = (void *)page_address(page);
        its->cmd_write = its->cmd_base;
-       its->fwnode_handle = handle;
        its->get_msi_base = its_irq_get_msi_base;
        its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI;
 
-       its_enable_quirks(its);
-
        err = its_alloc_tables(its);
        if (err)
                goto out_free_cmd;
@@ -5174,7 +5158,7 @@ static int __init its_probe_one(struct resource *res,
                ctlr |= GITS_CTLR_ImDe;
        writel_relaxed(ctlr, its->base + GITS_CTLR);
 
-       err = its_init_domain(handle, its);
+       err = its_init_domain(its);
        if (err)
                goto out_free_tables;
 
@@ -5191,11 +5175,8 @@ out_free_cmd:
 out_unmap_sgir:
        if (its->sgir_base)
                iounmap(its->sgir_base);
-out_free_its:
-       kfree(its);
-out_unmap:
-       iounmap(its_base);
-       pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
+out:
+       pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err);
        return err;
 }
 
@@ -5356,10 +5337,53 @@ static const struct of_device_id its_device_id[] = {
        {},
 };
 
+static struct its_node __init *its_node_init(struct resource *res,
+                                            struct fwnode_handle *handle, int numa_node)
+{
+       void __iomem *its_base;
+       struct its_node *its;
+       int err;
+
+       its_base = its_map_one(res, &err);
+       if (!its_base)
+               return NULL;
+
+       pr_info("ITS %pR\n", res);
+
+       its = kzalloc(sizeof(*its), GFP_KERNEL);
+       if (!its)
+               goto out_unmap;
+
+       raw_spin_lock_init(&its->lock);
+       mutex_init(&its->dev_alloc_lock);
+       INIT_LIST_HEAD(&its->entry);
+       INIT_LIST_HEAD(&its->its_device_list);
+
+       its->typer = gic_read_typer(its_base + GITS_TYPER);
+       its->base = its_base;
+       its->phys_base = res->start;
+
+       its->numa_node = numa_node;
+       its->fwnode_handle = handle;
+
+       return its;
+
+out_unmap:
+       iounmap(its_base);
+       return NULL;
+}
+
+static void its_node_destroy(struct its_node *its)
+{
+       iounmap(its->base);
+       kfree(its);
+}
+
 static int __init its_of_probe(struct device_node *node)
 {
        struct device_node *np;
        struct resource res;
+       int err;
 
        /*
         * Make sure *all* the ITS are reset before we probe any, as
@@ -5369,8 +5393,6 @@ static int __init its_of_probe(struct device_node *node)
         */
        for (np = of_find_matching_node(node, its_device_id); np;
             np = of_find_matching_node(np, its_device_id)) {
-               int err;
-
                if (!of_device_is_available(np) ||
                    !of_property_read_bool(np, "msi-controller") ||
                    of_address_to_resource(np, 0, &res))
@@ -5383,6 +5405,8 @@ static int __init its_of_probe(struct device_node *node)
 
        for (np = of_find_matching_node(node, its_device_id); np;
             np = of_find_matching_node(np, its_device_id)) {
+               struct its_node *its;
+
                if (!of_device_is_available(np))
                        continue;
                if (!of_property_read_bool(np, "msi-controller")) {
@@ -5396,7 +5420,17 @@ static int __init its_of_probe(struct device_node *node)
                        continue;
                }
 
-               its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
+
+               its = its_node_init(&res, &np->fwnode, of_node_to_nid(np));
+               if (!its)
+                       return -ENOMEM;
+
+               its_enable_quirks(its);
+               err = its_probe_one(its);
+               if (err)  {
+                       its_node_destroy(its);
+                       return err;
+               }
        }
        return 0;
 }
@@ -5508,6 +5542,7 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
 {
        struct acpi_madt_generic_translator *its_entry;
        struct fwnode_handle *dom_handle;
+       struct its_node *its;
        struct resource res;
        int err;
 
@@ -5532,11 +5567,18 @@ static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
                goto dom_err;
        }
 
-       err = its_probe_one(&res, dom_handle,
-                       acpi_get_its_numa_node(its_entry->translation_id));
+       its = its_node_init(&res, dom_handle,
+                           acpi_get_its_numa_node(its_entry->translation_id));
+       if (!its) {
+               err = -ENOMEM;
+               goto node_err;
+       }
+
+       err = its_probe_one(its);
        if (!err)
                return 0;
 
+node_err:
        iort_deregister_domain_token(its_entry->translation_id);
 dom_err:
        irq_domain_free_fwnode(dom_handle);
index eedfa8e..f59ac95 100644 (file)
@@ -1857,6 +1857,14 @@ static bool gic_enable_quirk_arm64_2941627(void *data)
        return true;
 }
 
+static bool rd_set_non_coherent(void *data)
+{
+       struct gic_chip_data *d = data;
+
+       d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
+       return true;
+}
+
 static const struct gic_quirk gic_quirks[] = {
        {
                .desc   = "GICv3: Qualcomm MSM8996 broken firmware",
@@ -1924,6 +1932,11 @@ static const struct gic_quirk gic_quirks[] = {
                .init   = gic_enable_quirk_arm64_2941627,
        },
        {
+               .desc   = "GICv3: non-coherent attribute",
+               .property = "dma-noncoherent",
+               .init   = rd_set_non_coherent,
+       },
+       {
        }
 };
 
index 4bbfa2b..96f4e32 100644 (file)
@@ -118,7 +118,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d)
 
                raw_spin_lock(&priv->lock);
                reg = readl_relaxed(priv->base + TSSR(tssr_index));
-               reg &= ~(TSSEL_MASK << tssr_offset);
+               reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
                writel_relaxed(reg, priv->base + TSSR(tssr_index));
                raw_spin_unlock(&priv->lock);
        }
@@ -130,8 +130,8 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
        unsigned int hw_irq = irqd_to_hwirq(d);
 
        if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
+               unsigned long tint = (uintptr_t)irq_data_get_irq_chip_data(d);
                struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
-               unsigned long tint = (uintptr_t)d->chip_data;
                u32 offset = hw_irq - IRQC_TINT_START;
                u32 tssr_offset = TSSR_OFFSET(offset);
                u8 tssr_index = TSSR_INDEX(offset);
index 4adeee1..e8d01b1 100644 (file)
@@ -155,8 +155,16 @@ static int __init riscv_intc_init(struct device_node *node,
         * for each INTC DT node. We only need to do INTC initialization
         * for the INTC DT node belonging to boot CPU (or boot HART).
         */
-       if (riscv_hartid_to_cpuid(hartid) != smp_processor_id())
+       if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) {
+               /*
+                * The INTC nodes of each CPU are suppliers for downstream
+                * interrupt controllers (such as PLIC, IMSIC and APLIC
+                * direct-mode) so we should mark an INTC node as initialized
+                * if we are not creating IRQ domain for it.
+                */
+               fwnode_dev_initialized(of_fwnode_handle(node), true);
                return 0;
+       }
 
        return riscv_intc_init_common(of_node_to_fwnode(node));
 }
index d8ba5fb..971240e 100644 (file)
@@ -460,6 +460,7 @@ static const struct irq_domain_ops irq_exti_domain_ops = {
        .map    = irq_map_generic_chip,
        .alloc  = stm32_exti_alloc,
        .free   = stm32_exti_free,
+       .xlate  = irq_domain_xlate_twocell,
 };
 
 static void stm32_irq_ack(struct irq_data *d)
index a32c0d2..74b2f12 100644 (file)
 
 #define PDC_MAX_GPIO_IRQS      256
 
+/* Valid only on HW version < 3.2 */
 #define IRQ_ENABLE_BANK                0x10
 #define IRQ_i_CFG              0x110
 
+/* Valid only on HW version >= 3.2 */
+#define IRQ_i_CFG_IRQ_ENABLE   3
+
+#define IRQ_i_CFG_TYPE_MASK    GENMASK(2, 0)
+
+#define PDC_VERSION_REG                0x1000
+
+/* Notable PDC versions */
+#define PDC_VERSION_3_2                0x30200
+
 struct pdc_pin_region {
        u32 pin_base;
        u32 parent_base;
@@ -37,6 +48,7 @@ static DEFINE_RAW_SPINLOCK(pdc_lock);
 static void __iomem *pdc_base;
 static struct pdc_pin_region *pdc_region;
 static int pdc_region_cnt;
+static unsigned int pdc_version;
 
 static void pdc_reg_write(int reg, u32 i, u32 val)
 {
@@ -48,20 +60,32 @@ static u32 pdc_reg_read(int reg, u32 i)
        return readl_relaxed(pdc_base + reg + i * sizeof(u32));
 }
 
-static void pdc_enable_intr(struct irq_data *d, bool on)
+static void __pdc_enable_intr(int pin_out, bool on)
 {
-       int pin_out = d->hwirq;
        unsigned long enable;
-       unsigned long flags;
-       u32 index, mask;
 
-       index = pin_out / 32;
-       mask = pin_out % 32;
+       if (pdc_version < PDC_VERSION_3_2) {
+               u32 index, mask;
+
+               index = pin_out / 32;
+               mask = pin_out % 32;
+
+               enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
+               __assign_bit(mask, &enable, on);
+               pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
+       } else {
+               enable = pdc_reg_read(IRQ_i_CFG, pin_out);
+               __assign_bit(IRQ_i_CFG_IRQ_ENABLE, &enable, on);
+               pdc_reg_write(IRQ_i_CFG, pin_out, enable);
+       }
+}
+
+static void pdc_enable_intr(struct irq_data *d, bool on)
+{
+       unsigned long flags;
 
        raw_spin_lock_irqsave(&pdc_lock, flags);
-       enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
-       __assign_bit(mask, &enable, on);
-       pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
+       __pdc_enable_intr(d->hwirq, on);
        raw_spin_unlock_irqrestore(&pdc_lock, flags);
 }
 
@@ -142,6 +166,7 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
        }
 
        old_pdc_type = pdc_reg_read(IRQ_i_CFG, d->hwirq);
+       pdc_type |= (old_pdc_type & ~IRQ_i_CFG_TYPE_MASK);
        pdc_reg_write(IRQ_i_CFG, d->hwirq, pdc_type);
 
        ret = irq_chip_set_type_parent(d, type);
@@ -246,7 +271,6 @@ static const struct irq_domain_ops qcom_pdc_ops = {
 static int pdc_setup_pin_mapping(struct device_node *np)
 {
        int ret, n, i;
-       u32 irq_index, reg_index, val;
 
        n = of_property_count_elems_of_size(np, "qcom,pdc-ranges", sizeof(u32));
        if (n <= 0 || n % 3)
@@ -276,29 +300,38 @@ static int pdc_setup_pin_mapping(struct device_node *np)
                if (ret)
                        return ret;
 
-               for (i = 0; i < pdc_region[n].cnt; i++) {
-                       reg_index = (i + pdc_region[n].pin_base) >> 5;
-                       irq_index = (i + pdc_region[n].pin_base) & 0x1f;
-                       val = pdc_reg_read(IRQ_ENABLE_BANK, reg_index);
-                       val &= ~BIT(irq_index);
-                       pdc_reg_write(IRQ_ENABLE_BANK, reg_index, val);
-               }
+               for (i = 0; i < pdc_region[n].cnt; i++)
+                       __pdc_enable_intr(i + pdc_region[n].pin_base, 0);
        }
 
        return 0;
 }
 
+#define QCOM_PDC_SIZE 0x30000
+
 static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
 {
        struct irq_domain *parent_domain, *pdc_domain;
+       resource_size_t res_size;
+       struct resource res;
        int ret;
 
-       pdc_base = of_iomap(node, 0);
+       /* compat with old sm8150 DT which had very small region for PDC */
+       if (of_address_to_resource(node, 0, &res))
+               return -EINVAL;
+
+       res_size = max_t(resource_size_t, resource_size(&res), QCOM_PDC_SIZE);
+       if (res_size > resource_size(&res))
+               pr_warn("%pOF: invalid reg size, please fix DT\n", node);
+
+       pdc_base = ioremap(res.start, res_size);
        if (!pdc_base) {
                pr_err("%pOF: unable to map PDC registers\n", node);
                return -ENXIO;
        }
 
+       pdc_version = pdc_reg_read(PDC_VERSION_REG, 0);
+
        parent_domain = irq_find_host(parent);
        if (!parent_domain) {
                pr_err("%pOF: unable to find PDC's parent domain\n", node);
index 978fdfc..0cac5be 100644 (file)
@@ -387,17 +387,13 @@ EXPORT_SYMBOL_NS_GPL(mcb_free_dev, MCB);
 
 static int __mcb_bus_add_devices(struct device *dev, void *data)
 {
-       struct mcb_device *mdev = to_mcb_device(dev);
        int retval;
 
-       if (mdev->is_added)
-               return 0;
-
        retval = device_attach(dev);
-       if (retval < 0)
+       if (retval < 0) {
                dev_err(dev, "Error adding device (%d)\n", retval);
-
-       mdev->is_added = true;
+               return retval;
+       }
 
        return 0;
 }
index 2aef990..656b6b7 100644 (file)
@@ -99,8 +99,6 @@ static int chameleon_parse_gdd(struct mcb_bus *bus,
        mdev->mem.end = mdev->mem.start + size - 1;
        mdev->mem.flags = IORESOURCE_MEM;
 
-       mdev->is_added = false;
-
        ret = mcb_device_register(bus, mdev);
        if (ret < 0)
                goto err;
index f2662c2..5315fd2 100644 (file)
@@ -753,7 +753,8 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv,
        int err;
        u8 *buf;
 
-       reqsize = ALIGN(crypto_skcipher_reqsize(tfm), __alignof__(__le64));
+       reqsize = sizeof(*req) + crypto_skcipher_reqsize(tfm);
+       reqsize = ALIGN(reqsize, __alignof__(__le64));
 
        req = kmalloc(reqsize + cc->iv_size, GFP_NOIO);
        if (!req)
index 3af6125..4d9fd76 100644 (file)
@@ -1850,9 +1850,9 @@ static int ov8858_parse_of(struct ov8858 *ov8858)
        }
 
        ret = v4l2_fwnode_endpoint_parse(endpoint, &vep);
+       fwnode_handle_put(endpoint);
        if (ret) {
                dev_err(dev, "Failed to parse endpoint: %d\n", ret);
-               fwnode_handle_put(endpoint);
                return ret;
        }
 
@@ -1864,12 +1864,9 @@ static int ov8858_parse_of(struct ov8858 *ov8858)
        default:
                dev_err(dev, "Unsupported number of data lanes %u\n",
                        ov8858->num_lanes);
-               fwnode_handle_put(endpoint);
                return -EINVAL;
        }
 
-       ov8858->subdev.fwnode = endpoint;
-
        return 0;
 }
 
@@ -1913,7 +1910,7 @@ static int ov8858_probe(struct i2c_client *client)
 
        ret = ov8858_init_ctrls(ov8858);
        if (ret)
-               goto err_put_fwnode;
+               return ret;
 
        sd = &ov8858->subdev;
        sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
@@ -1964,8 +1961,6 @@ err_clean_entity:
        media_entity_cleanup(&sd->entity);
 err_free_handler:
        v4l2_ctrl_handler_free(&ov8858->ctrl_handler);
-err_put_fwnode:
-       fwnode_handle_put(ov8858->subdev.fwnode);
 
        return ret;
 }
@@ -1978,7 +1973,6 @@ static void ov8858_remove(struct i2c_client *client)
        v4l2_async_unregister_subdev(sd);
        media_entity_cleanup(&sd->entity);
        v4l2_ctrl_handler_free(&ov8858->ctrl_handler);
-       fwnode_handle_put(ov8858->subdev.fwnode);
 
        pm_runtime_disable(&client->dev);
        if (!pm_runtime_status_suspended(&client->dev))
index 1bde8b6..e38198e 100644 (file)
@@ -107,8 +107,10 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev
                for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1)
                        /* camera sensor depends on IVSC in DSDT if exist */
                        for_each_acpi_consumer_dev(ivsc_adev, consumer)
-                               if (consumer->handle == handle)
+                               if (consumer->handle == handle) {
+                                       acpi_dev_put(consumer);
                                        return ivsc_adev;
+                               }
        }
 
        return NULL;
index 4285770..996684a 100644 (file)
@@ -55,11 +55,18 @@ xvip_graph_find_entity(struct xvip_composite_device *xdev,
 {
        struct xvip_graph_entity *entity;
        struct v4l2_async_connection *asd;
-
-       list_for_each_entry(asd, &xdev->notifier.done_list, asc_entry) {
-               entity = to_xvip_entity(asd);
-               if (entity->asd.match.fwnode == fwnode)
-                       return entity;
+       struct list_head *lists[] = {
+               &xdev->notifier.done_list,
+               &xdev->notifier.waiting_list
+       };
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(lists); i++) {
+               list_for_each_entry(asd, lists[i], asc_entry) {
+                       entity = to_xvip_entity(asd);
+                       if (entity->asd.match.fwnode == fwnode)
+                               return entity;
+               }
        }
 
        return NULL;
index b92348a..31752c0 100644 (file)
@@ -502,6 +502,13 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg,
                                       V4L2_SUBDEV_CLIENT_CAP_STREAMS;
        int rval;
 
+       /*
+        * If the streams API is not enabled, remove V4L2_SUBDEV_CAP_STREAMS.
+        * Remove this when the API is no longer experimental.
+        */
+       if (!v4l2_subdev_enable_streams_api)
+               streams_subdev = false;
+
        switch (cmd) {
        case VIDIOC_SUBDEV_QUERYCAP: {
                struct v4l2_subdev_capability *cap = arg;
index b5b414a..3a8f27c 100644 (file)
@@ -179,6 +179,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
                               struct mmc_queue *mq);
 static void mmc_blk_hsq_req_done(struct mmc_request *mrq);
 static int mmc_spi_err_check(struct mmc_card *card);
+static int mmc_blk_busy_cb(void *cb_data, bool *busy);
 
 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 {
@@ -470,7 +471,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
        struct mmc_data data = {};
        struct mmc_request mrq = {};
        struct scatterlist sg;
-       bool r1b_resp, use_r1b_resp = false;
+       bool r1b_resp;
        unsigned int busy_timeout_ms;
        int err;
        unsigned int target_part;
@@ -551,8 +552,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
        busy_timeout_ms = idata->ic.cmd_timeout_ms ? : MMC_BLK_TIMEOUT_MS;
        r1b_resp = (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B;
        if (r1b_resp)
-               use_r1b_resp = mmc_prepare_busy_cmd(card->host, &cmd,
-                                                   busy_timeout_ms);
+               mmc_prepare_busy_cmd(card->host, &cmd, busy_timeout_ms);
 
        mmc_wait_for_req(card->host, &mrq);
        memcpy(&idata->ic.response, cmd.resp, sizeof(cmd.resp));
@@ -605,19 +605,28 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
        if (idata->ic.postsleep_min_us)
                usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
 
-       /* No need to poll when using HW busy detection. */
-       if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)
-               return 0;
-
        if (mmc_host_is_spi(card->host)) {
                if (idata->ic.write_flag || r1b_resp || cmd.flags & MMC_RSP_SPI_BUSY)
                        return mmc_spi_err_check(card);
                return err;
        }
-       /* Ensure RPMB/R1B command has completed by polling with CMD13. */
-       if (idata->rpmb || r1b_resp)
-               err = mmc_poll_for_busy(card, busy_timeout_ms, false,
-                                       MMC_BUSY_IO);
+
+       /*
+        * Ensure RPMB, writes and R1B responses are completed by polling with
+        * CMD13. Note that, usually we don't need to poll when using HW busy
+        * detection, but here it's needed since some commands may indicate the
+        * error through the R1 status bits.
+        */
+       if (idata->rpmb || idata->ic.write_flag || r1b_resp) {
+               struct mmc_blk_busy_data cb_data = {
+                       .card = card,
+               };
+
+               err = __mmc_poll_for_busy(card->host, 0, busy_timeout_ms,
+                                         &mmc_blk_busy_cb, &cb_data);
+
+               idata->ic.response[0] = cb_data.status;
+       }
 
        return err;
 }
index 89cd48f..4a4bab9 100644 (file)
@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card)
        case 3: /* MMC v3.1 - v3.3 */
        case 4: /* MMC v4 */
                card->cid.manfid        = UNSTUFF_BITS(resp, 120, 8);
-               card->cid.oemid         = UNSTUFF_BITS(resp, 104, 16);
+               card->cid.oemid         = UNSTUFF_BITS(resp, 104, 8);
                card->cid.prod_name[0]  = UNSTUFF_BITS(resp, 96, 8);
                card->cid.prod_name[1]  = UNSTUFF_BITS(resp, 88, 8);
                card->cid.prod_name[2]  = UNSTUFF_BITS(resp, 80, 8);
index f64b9ac..5914516 100644 (file)
@@ -1089,8 +1089,14 @@ static int mmc_sdio_resume(struct mmc_host *host)
                }
                err = mmc_sdio_reinit_card(host);
        } else if (mmc_card_wake_sdio_irq(host)) {
-               /* We may have switched to 1-bit mode during suspend */
+               /*
+                * We may have switched to 1-bit mode during suspend,
+                * need to hold retuning, because tuning only supprt
+                * 4-bit mode or 8 bit mode.
+                */
+               mmc_retune_hold_now(host);
                err = sdio_enable_4bit_bus(host->card);
+               mmc_retune_release(host);
        }
 
        if (err)
index 5392200..97f7c3d 100644 (file)
@@ -669,11 +669,11 @@ static void msdc_reset_hw(struct msdc_host *host)
        u32 val;
 
        sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_RST);
-       readl_poll_timeout(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
+       readl_poll_timeout_atomic(host->base + MSDC_CFG, val, !(val & MSDC_CFG_RST), 0, 0);
 
        sdr_set_bits(host->base + MSDC_FIFOCS, MSDC_FIFOCS_CLR);
-       readl_poll_timeout(host->base + MSDC_FIFOCS, val,
-                          !(val & MSDC_FIFOCS_CLR), 0, 0);
+       readl_poll_timeout_atomic(host->base + MSDC_FIFOCS, val,
+                                 !(val & MSDC_FIFOCS_CLR), 0, 0);
 
        val = readl(host->base + MSDC_INT);
        writel(val, host->base + MSDC_INT);
index ae8c307..109d4b0 100644 (file)
@@ -1144,42 +1144,6 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
        return value;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
-{
-       struct sdhci_pci_slot *slot = chip->slots[0];
-
-       pci_free_irq_vectors(slot->chip->pdev);
-       gli_pcie_enable_msi(slot);
-
-       return sdhci_pci_resume_host(chip);
-}
-
-static int sdhci_cqhci_gli_resume(struct sdhci_pci_chip *chip)
-{
-       struct sdhci_pci_slot *slot = chip->slots[0];
-       int ret;
-
-       ret = sdhci_pci_gli_resume(chip);
-       if (ret)
-               return ret;
-
-       return cqhci_resume(slot->host->mmc);
-}
-
-static int sdhci_cqhci_gli_suspend(struct sdhci_pci_chip *chip)
-{
-       struct sdhci_pci_slot *slot = chip->slots[0];
-       int ret;
-
-       ret = cqhci_suspend(slot->host->mmc);
-       if (ret)
-               return ret;
-
-       return sdhci_suspend_host(slot->host);
-}
-#endif
-
 static void gl9763e_hs400_enhanced_strobe(struct mmc_host *mmc,
                                          struct mmc_ios *ios)
 {
@@ -1420,6 +1384,70 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip)
 }
 #endif
 
+#ifdef CONFIG_PM_SLEEP
+static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
+{
+       struct sdhci_pci_slot *slot = chip->slots[0];
+
+       pci_free_irq_vectors(slot->chip->pdev);
+       gli_pcie_enable_msi(slot);
+
+       return sdhci_pci_resume_host(chip);
+}
+
+static int gl9763e_resume(struct sdhci_pci_chip *chip)
+{
+       struct sdhci_pci_slot *slot = chip->slots[0];
+       int ret;
+
+       ret = sdhci_pci_gli_resume(chip);
+       if (ret)
+               return ret;
+
+       ret = cqhci_resume(slot->host->mmc);
+       if (ret)
+               return ret;
+
+       /*
+        * Disable LPM negotiation to bring device back in sync
+        * with its runtime_pm state.
+        */
+       gl9763e_set_low_power_negotiation(slot, false);
+
+       return 0;
+}
+
+static int gl9763e_suspend(struct sdhci_pci_chip *chip)
+{
+       struct sdhci_pci_slot *slot = chip->slots[0];
+       int ret;
+
+       /*
+        * Certain SoCs can suspend only with the bus in low-
+        * power state, notably x86 SoCs when using S0ix.
+        * Re-enable LPM negotiation to allow entering L1 state
+        * and entering system suspend.
+        */
+       gl9763e_set_low_power_negotiation(slot, true);
+
+       ret = cqhci_suspend(slot->host->mmc);
+       if (ret)
+               goto err_suspend;
+
+       ret = sdhci_suspend_host(slot->host);
+       if (ret)
+               goto err_suspend_host;
+
+       return 0;
+
+err_suspend_host:
+       cqhci_resume(slot->host->mmc);
+err_suspend:
+       gl9763e_set_low_power_negotiation(slot, false);
+       return ret;
+}
+#endif
+
 static int gli_probe_slot_gl9763e(struct sdhci_pci_slot *slot)
 {
        struct pci_dev *pdev = slot->chip->pdev;
@@ -1527,8 +1555,8 @@ const struct sdhci_pci_fixes sdhci_gl9763e = {
        .probe_slot     = gli_probe_slot_gl9763e,
        .ops            = &sdhci_gl9763e_ops,
 #ifdef CONFIG_PM_SLEEP
-       .resume         = sdhci_cqhci_gli_resume,
-       .suspend        = sdhci_cqhci_gli_suspend,
+       .resume         = gl9763e_resume,
+       .suspend        = gl9763e_suspend,
 #endif
 #ifdef CONFIG_PM
        .runtime_suspend = gl9763e_runtime_suspend,
index 649ae07..6b84ba2 100644 (file)
@@ -644,6 +644,7 @@ static int sdhci_sprd_tuning(struct mmc_host *mmc, struct mmc_card *card,
        best_clk_sample = sdhci_sprd_get_best_clk_sample(mmc, value);
        if (best_clk_sample < 0) {
                dev_err(mmc_dev(host->mmc), "all tuning phase fail!\n");
+               err = best_clk_sample;
                goto out;
        }
 
index 78710fb..fc87213 100644 (file)
@@ -551,6 +551,17 @@ static int physmap_flash_probe(struct platform_device *dev)
                if (info->probe_type) {
                        info->mtds[i] = do_map_probe(info->probe_type,
                                                     &info->maps[i]);
+
+                       /* Fall back to mapping region as ROM */
+                       if (!info->mtds[i] && IS_ENABLED(CONFIG_MTD_ROM) &&
+                           strcmp(info->probe_type, "map_rom")) {
+                               dev_warn(&dev->dev,
+                                        "map_probe() failed for type %s\n",
+                                        info->probe_type);
+
+                               info->mtds[i] = do_map_probe("map_rom",
+                                                            &info->maps[i]);
+                       }
                } else {
                        int j;
 
index 4621ec5..a492051 100644 (file)
@@ -515,6 +515,7 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
        struct mtd_info *mtd = nand_to_mtd(chip);
        unsigned int len = mtd->writesize + (oob_required ? mtd->oobsize : 0);
        dma_addr_t dma_addr;
+       u8 status;
        int ret;
        struct anfc_op nfc_op = {
                .pkt_reg =
@@ -561,10 +562,21 @@ static int anfc_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
        }
 
        /* Spare data is not protected */
-       if (oob_required)
+       if (oob_required) {
                ret = nand_write_oob_std(chip, page);
+               if (ret)
+                       return ret;
+       }
 
-       return ret;
+       /* Check write status on the chip side */
+       ret = nand_status_op(chip, &status);
+       if (ret)
+               return ret;
+
+       if (status & NAND_STATUS_FAIL)
+               return -EIO;
+
+       return 0;
 }
 
 static int anfc_sel_write_page_hw_ecc(struct nand_chip *chip, const u8 *buf,
index 2c94da7..b841a81 100644 (file)
@@ -1165,6 +1165,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
                .ndcb[2] = NDCB2_ADDR5_PAGE(page),
        };
        unsigned int oob_bytes = lt->spare_bytes + (raw ? lt->ecc_bytes : 0);
+       u8 status;
        int ret;
 
        /* NFCv2 needs more information about the operation being executed */
@@ -1198,7 +1199,18 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
 
        ret = marvell_nfc_wait_op(chip,
                                  PSEC_TO_MSEC(sdr->tPROG_max));
-       return ret;
+       if (ret)
+               return ret;
+
+       /* Check write status on the chip side */
+       ret = nand_status_op(chip, &status);
+       if (ret)
+               return ret;
+
+       if (status & NAND_STATUS_FAIL)
+               return -EIO;
+
+       return 0;
 }
 
 static int marvell_nfc_hw_ecc_hmg_write_page_raw(struct nand_chip *chip,
@@ -1627,6 +1639,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
        int data_len = lt->data_bytes;
        int spare_len = lt->spare_bytes;
        int chunk, ret;
+       u8 status;
 
        marvell_nfc_select_target(chip, chip->cur_cs);
 
@@ -1663,6 +1676,14 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip,
        if (ret)
                return ret;
 
+       /* Check write status on the chip side */
+       ret = nand_status_op(chip, &status);
+       if (ret)
+               return ret;
+
+       if (status & NAND_STATUS_FAIL)
+               return -EIO;
+
        return 0;
 }
 
index d4b5515..1fcac40 100644 (file)
@@ -5110,6 +5110,9 @@ static void rawnand_check_cont_read_support(struct nand_chip *chip)
 {
        struct mtd_info *mtd = nand_to_mtd(chip);
 
+       if (!chip->parameters.supports_read_cache)
+               return;
+
        if (chip->read_retries)
                return;
 
index 8367577..b3cc8f3 100644 (file)
@@ -94,6 +94,9 @@ int nand_jedec_detect(struct nand_chip *chip)
                goto free_jedec_param_page;
        }
 
+       if (p->opt_cmd[0] & JEDEC_OPT_CMD_READ_CACHE)
+               chip->parameters.supports_read_cache = true;
+
        memorg->pagesize = le32_to_cpu(p->byte_per_page);
        mtd->writesize = memorg->pagesize;
 
index f15ef90..861975e 100644 (file)
@@ -303,6 +303,9 @@ int nand_onfi_detect(struct nand_chip *chip)
                           ONFI_FEATURE_ADDR_TIMING_MODE, 1);
        }
 
+       if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_READ_CACHE)
+               chip->parameters.supports_read_cache = true;
+
        onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
        if (!onfi) {
                ret = -ENOMEM;
index 8da5fee..c506e92 100644 (file)
@@ -511,6 +511,7 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
        u32 addr1 = 0, addr2 = 0, row;
        u32 cmd_addr;
        int i, ret;
+       u8 status;
 
        ret = pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_APB);
        if (ret)
@@ -563,6 +564,14 @@ static int pl35x_nand_write_page_hwecc(struct nand_chip *chip,
        if (ret)
                goto disable_ecc_engine;
 
+       /* Check write status on the chip side */
+       ret = nand_status_op(chip, &status);
+       if (ret)
+               goto disable_ecc_engine;
+
+       if (status & NAND_STATUS_FAIL)
+               ret = -EIO;
+
 disable_ecc_engine:
        pl35x_smc_set_ecc_mode(nfc, chip, PL35X_SMC_ECC_CFG_MODE_BYPASS);
 
index 64499c1..b079605 100644 (file)
@@ -3444,7 +3444,7 @@ err_nandc_alloc:
 err_aon_clk:
        clk_disable_unprepare(nandc->core_clk);
 err_core_clk:
-       dma_unmap_resource(dev, res->start, resource_size(res),
+       dma_unmap_resource(dev, nandc->base_dma, resource_size(res),
                           DMA_BIDIRECTIONAL, 0);
        return ret;
 }
index 50b7295..12601bc 100644 (file)
@@ -12,7 +12,7 @@
 
 #define SPINAND_MFR_MICRON             0x2c
 
-#define MICRON_STATUS_ECC_MASK         GENMASK(7, 4)
+#define MICRON_STATUS_ECC_MASK         GENMASK(6, 4)
 #define MICRON_STATUS_ECC_NO_BITFLIPS  (0 << 4)
 #define MICRON_STATUS_ECC_1TO3_BITFLIPS        (1 << 4)
 #define MICRON_STATUS_ECC_4TO6_BITFLIPS        (3 << 4)
index ed7212e..51d47ed 100644 (file)
@@ -4023,7 +4023,7 @@ static inline const void *bond_pull_data(struct sk_buff *skb,
        if (likely(n <= hlen))
                return data;
        else if (skb && likely(pskb_may_pull(skb, n)))
-               return skb->head;
+               return skb->data;
 
        return NULL;
 }
index 649453a..f8cde9f 100644 (file)
@@ -190,7 +190,7 @@ config CAN_SLCAN
 
 config CAN_SUN4I
        tristate "Allwinner A10 CAN controller"
-       depends on MACH_SUN4I || MACH_SUN7I || RISCV || COMPILE_TEST
+       depends on MACH_SUN4I || MACH_SUN7I || (RISCV && ARCH_SUNXI) || COMPILE_TEST
        help
          Say Y here if you want to use CAN controller found on Allwinner
          A10/A20/D1 SoCs.
index add39e9..d15f85a 100644 (file)
@@ -348,7 +348,7 @@ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
 static struct flexcan_devtype_data fsl_imx93_devtype_data = {
        .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
                FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
-               FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_AUTO_STOP_MODE |
+               FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
                FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC |
                FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
                FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
@@ -544,11 +544,6 @@ static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
        } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) {
                regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
                                   1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
-       } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) {
-               /* For the auto stop mode, software do nothing, hardware will cover
-                * all the operation automatically after system go into low power mode.
-                */
-               return 0;
        }
 
        return flexcan_low_power_enter_ack(priv);
@@ -574,12 +569,6 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
        reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
        priv->write(reg_mcr, &regs->mcr);
 
-       /* For the auto stop mode, hardware will exist stop mode
-        * automatically after system go out of low power mode.
-        */
-       if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
-               return 0;
-
        return flexcan_low_power_exit_ack(priv);
 }
 
@@ -1994,13 +1983,18 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
                ret = flexcan_setup_stop_mode_scfw(pdev);
        else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR)
                ret = flexcan_setup_stop_mode_gpr(pdev);
-       else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
-               ret = 0;
        else
                /* return 0 directly if doesn't support stop mode feature */
                return 0;
 
-       if (ret)
+       /* If ret is -EINVAL, this means SoC claim to support stop mode, but
+        * dts file lack the stop mode property definition. For this case,
+        * directly return 0, this will skip the wakeup capable setting and
+        * will not block the driver probe.
+        */
+       if (ret == -EINVAL)
+               return 0;
+       else if (ret)
                return ret;
 
        device_set_wakeup_capable(&pdev->dev, true);
@@ -2320,16 +2314,8 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
        if (netif_running(dev)) {
                int err;
 
-               if (device_may_wakeup(device)) {
+               if (device_may_wakeup(device))
                        flexcan_enable_wakeup_irq(priv, true);
-                       /* For auto stop mode, need to keep the clock on before
-                        * system go into low power mode. After system go into
-                        * low power mode, hardware will config the flexcan into
-                        * stop mode, and gate off the clock automatically.
-                        */
-                       if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)
-                               return 0;
-               }
 
                err = pm_runtime_force_suspend(device);
                if (err)
@@ -2347,15 +2333,9 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
        if (netif_running(dev)) {
                int err;
 
-               /* For the wakeup in auto stop mode, no need to gate on the
-                * clock here, hardware will do this automatically.
-                */
-               if (!(device_may_wakeup(device) &&
-                     priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)) {
-                       err = pm_runtime_force_resume(device);
-                       if (err)
-                               return err;
-               }
+               err = pm_runtime_force_resume(device);
+               if (err)
+                       return err;
 
                if (device_may_wakeup(device))
                        flexcan_enable_wakeup_irq(priv, false);
index 9140297..025c341 100644 (file)
@@ -68,8 +68,6 @@
 #define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
 /* Device supports RX via FIFO */
 #define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
-/* auto enter stop mode to support wakeup */
-#define FLEXCAN_QUIRK_AUTO_STOP_MODE BIT(17)
 
 struct flexcan_devtype_data {
        u32 quirks;             /* quirks needed for different IP cores */
index 8a41438..ae8c42f 100644 (file)
@@ -125,7 +125,7 @@ static const struct tcan4x5x_version_info tcan4x5x_versions[] = {
        },
        [TCAN4553] = {
                .name = "4553",
-               .id2_register = 0x32353534,
+               .id2_register = 0x33353534,
        },
        /* generic version with no id2_register at the end */
        [TCAN4X5X] = {
index 0ada0e1..743c2eb 100644 (file)
@@ -392,7 +392,13 @@ static irqreturn_t sja1000_reset_interrupt(int irq, void *dev_id)
        struct net_device *dev = (struct net_device *)dev_id;
 
        netdev_dbg(dev, "performing a soft reset upon overrun\n");
-       sja1000_start(dev);
+
+       netif_tx_lock(dev);
+
+       can_free_echo_skb(dev, 0, NULL);
+       sja1000_set_mode(dev, CAN_MODE_START);
+
+       netif_tx_unlock(dev);
 
        return IRQ_HANDLED;
 }
index 72374b0..cd1f240 100644 (file)
@@ -617,17 +617,16 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
        dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
        priv->master_mii_bus = of_mdio_find_bus(dn);
        if (!priv->master_mii_bus) {
-               of_node_put(dn);
-               return -EPROBE_DEFER;
+               err = -EPROBE_DEFER;
+               goto err_of_node_put;
        }
 
-       get_device(&priv->master_mii_bus->dev);
        priv->master_mii_dn = dn;
 
        priv->slave_mii_bus = mdiobus_alloc();
        if (!priv->slave_mii_bus) {
-               of_node_put(dn);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto err_put_master_mii_bus_dev;
        }
 
        priv->slave_mii_bus->priv = priv;
@@ -684,11 +683,17 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
        }
 
        err = mdiobus_register(priv->slave_mii_bus);
-       if (err && dn) {
-               mdiobus_free(priv->slave_mii_bus);
-               of_node_put(dn);
-       }
+       if (err && dn)
+               goto err_free_slave_mii_bus;
 
+       return 0;
+
+err_free_slave_mii_bus:
+       mdiobus_free(priv->slave_mii_bus);
+err_put_master_mii_bus_dev:
+       put_device(&priv->master_mii_bus->dev);
+err_of_node_put:
+       of_node_put(dn);
        return err;
 }
 
@@ -696,6 +701,7 @@ static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
 {
        mdiobus_unregister(priv->slave_mii_bus);
        mdiobus_free(priv->slave_mii_bus);
+       put_device(&priv->master_mii_bus->dev);
        of_node_put(priv->master_mii_dn);
 }
 
index de1dc22..4ce68e6 100644 (file)
@@ -505,8 +505,8 @@ qca8k_bulk_read(void *ctx, const void *reg_buf, size_t reg_len,
                void *val_buf, size_t val_len)
 {
        int i, count = val_len / sizeof(u32), ret;
-       u32 reg = *(u32 *)reg_buf & U16_MAX;
        struct qca8k_priv *priv = ctx;
+       u32 reg = *(u16 *)reg_buf;
 
        if (priv->mgmt_master &&
            !qca8k_read_eth(priv, reg, val_buf, val_len))
@@ -527,8 +527,8 @@ qca8k_bulk_gather_write(void *ctx, const void *reg_buf, size_t reg_len,
                        const void *val_buf, size_t val_len)
 {
        int i, count = val_len / sizeof(u32), ret;
-       u32 reg = *(u32 *)reg_buf & U16_MAX;
        struct qca8k_priv *priv = ctx;
+       u32 reg = *(u16 *)reg_buf;
        u32 *val = (u32 *)val_buf;
 
        if (priv->mgmt_master &&
@@ -666,6 +666,15 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
                goto err_read_skb;
        }
 
+       /* It seems that accessing the switch's internal PHYs via management
+        * packets still uses the MDIO bus within the switch internally, and
+        * these accesses can conflict with external MDIO accesses to other
+        * devices on the MDIO bus.
+        * We therefore need to lock the MDIO bus onto which the switch is
+        * connected.
+        */
+       mutex_lock(&priv->bus->mdio_lock);
+
        /* Actually start the request:
         * 1. Send mdio master packet
         * 2. Busy Wait for mdio master command
@@ -678,6 +687,7 @@ qca8k_phy_eth_command(struct qca8k_priv *priv, bool read, int phy,
        mgmt_master = priv->mgmt_master;
        if (!mgmt_master) {
                mutex_unlock(&mgmt_eth_data->mutex);
+               mutex_unlock(&priv->bus->mdio_lock);
                ret = -EINVAL;
                goto err_mgmt_master;
        }
@@ -765,6 +775,7 @@ exit:
                                    QCA8K_ETHERNET_TIMEOUT);
 
        mutex_unlock(&mgmt_eth_data->mutex);
+       mutex_unlock(&priv->bus->mdio_lock);
 
        return ret;
 
index 5fc64e4..d567e42 100644 (file)
@@ -911,7 +911,7 @@ static int csk_wait_memory(struct chtls_dev *cdev,
                           struct sock *sk, long *timeo_p)
 {
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
-       int err = 0;
+       int ret, err = 0;
        long current_timeo;
        long vm_wait = 0;
        bool noblock;
@@ -942,10 +942,13 @@ static int csk_wait_memory(struct chtls_dev *cdev,
 
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                sk->sk_write_pending++;
-               sk_wait_event(sk, &current_timeo, sk->sk_err ||
-                             (sk->sk_shutdown & SEND_SHUTDOWN) ||
-                             (csk_mem_free(cdev, sk) && !vm_wait), &wait);
+               ret = sk_wait_event(sk, &current_timeo, sk->sk_err ||
+                                   (sk->sk_shutdown & SEND_SHUTDOWN) ||
+                                   (csk_mem_free(cdev, sk) && !vm_wait),
+                                   &wait);
                sk->sk_write_pending--;
+               if (ret < 0)
+                       goto do_error;
 
                if (vm_wait) {
                        vm_wait -= current_timeo;
@@ -1348,6 +1351,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
        int copied = 0;
        int target;
        long timeo;
+       int ret;
 
        buffers_freed = 0;
 
@@ -1423,7 +1427,11 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                if (copied >= target)
                        break;
                chtls_cleanup_rbuf(sk, copied);
-               sk_wait_data(sk, &timeo, NULL);
+               ret = sk_wait_data(sk, &timeo, NULL);
+               if (ret < 0) {
+                       copied = copied ? : ret;
+                       goto unlock;
+               }
                continue;
 found_ok_skb:
                if (!skb->len) {
@@ -1518,6 +1526,8 @@ skip_copy:
 
        if (buffers_freed)
                chtls_cleanup_rbuf(sk, copied);
+
+unlock:
        release_sock(sk);
        return copied;
 }
@@ -1534,6 +1544,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
        int copied = 0;
        size_t avail;          /* amount of available data in current skb */
        long timeo;
+       int ret;
 
        lock_sock(sk);
        timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
@@ -1585,7 +1596,12 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
                        release_sock(sk);
                        lock_sock(sk);
                } else {
-                       sk_wait_data(sk, &timeo, NULL);
+                       ret = sk_wait_data(sk, &timeo, NULL);
+                       if (ret < 0) {
+                               /* here 'copied' is 0 due to previous checks */
+                               copied = ret;
+                               break;
+                       }
                }
 
                if (unlikely(peek_seq != tp->copied_seq)) {
@@ -1656,6 +1672,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
        int copied = 0;
        long timeo;
        int target;             /* Read at least this many bytes */
+       int ret;
 
        buffers_freed = 0;
 
@@ -1747,7 +1764,11 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                if (copied >= target)
                        break;
                chtls_cleanup_rbuf(sk, copied);
-               sk_wait_data(sk, &timeo, NULL);
+               ret = sk_wait_data(sk, &timeo, NULL);
+               if (ret < 0) {
+                       copied = copied ? : ret;
+                       goto unlock;
+               }
                continue;
 
 found_ok_skb:
@@ -1816,6 +1837,7 @@ skip_copy:
        if (buffers_freed)
                chtls_cleanup_rbuf(sk, copied);
 
+unlock:
        release_sock(sk);
        return copied;
 }
index d1da741..e84a066 100644 (file)
@@ -146,7 +146,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
                err = gve_rx_alloc_buffer(priv, &priv->pdev->dev, &rx->data.page_info[i],
                                          &rx->data.data_ring[i]);
                if (err)
-                       goto alloc_err;
+                       goto alloc_err_rda;
        }
 
        if (!rx->data.raw_addressing) {
@@ -171,12 +171,26 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
        return slots;
 
 alloc_err_qpl:
+       /* Fully free the copy pool pages. */
        while (j--) {
                page_ref_sub(rx->qpl_copy_pool[j].page,
                             rx->qpl_copy_pool[j].pagecnt_bias - 1);
                put_page(rx->qpl_copy_pool[j].page);
        }
-alloc_err:
+
+       /* Do not fully free QPL pages - only remove the bias added in this
+        * function with gve_setup_rx_buffer.
+        */
+       while (i--)
+               page_ref_sub(rx->data.page_info[i].page,
+                            rx->data.page_info[i].pagecnt_bias - 1);
+
+       gve_unassign_qpl(priv, rx->data.qpl->id);
+       rx->data.qpl = NULL;
+
+       return err;
+
+alloc_err_rda:
        while (i--)
                gve_rx_free_buffer(&priv->pdev->dev,
                                   &rx->data.page_info[i],
index eeef20f..1b49385 100644 (file)
@@ -1082,7 +1082,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
                     I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
        j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
            I40E_PFLAN_QALLOC_LASTQ_SHIFT;
-       if (val & I40E_PFLAN_QALLOC_VALID_MASK)
+       if (val & I40E_PFLAN_QALLOC_VALID_MASK && j >= base_queue)
                num_queues = (j - base_queue) + 1;
        else
                num_queues = 0;
@@ -1092,7 +1092,7 @@ void i40e_clear_hw(struct i40e_hw *hw)
            I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
        j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
            I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
-       if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
+       if (val & I40E_PF_VT_PFALLOC_VALID_MASK && j >= i)
                num_vfs = (j - i) + 1;
        else
                num_vfs = 0;
index 4f39863..7b12569 100644 (file)
@@ -2093,3 +2093,35 @@ lag_rebuild_out:
        }
        mutex_unlock(&pf->lag_mutex);
 }
+
+/**
+ * ice_lag_is_switchdev_running
+ * @pf: pointer to PF structure
+ *
+ * Check if switchdev is running on any of the interfaces connected to lag.
+ */
+bool ice_lag_is_switchdev_running(struct ice_pf *pf)
+{
+       struct ice_lag *lag = pf->lag;
+       struct net_device *tmp_nd;
+
+       if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag)
+               return false;
+
+       rcu_read_lock();
+       for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+               struct ice_netdev_priv *priv = netdev_priv(tmp_nd);
+
+               if (!netif_is_ice(tmp_nd) || !priv || !priv->vsi ||
+                   !priv->vsi->back)
+                       continue;
+
+               if (ice_is_switchdev_running(priv->vsi->back)) {
+                       rcu_read_unlock();
+                       return true;
+               }
+       }
+       rcu_read_unlock();
+
+       return false;
+}
index 18075b8..facb6c8 100644 (file)
@@ -62,4 +62,5 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf);
 int ice_init_lag(struct ice_pf *pf);
 void ice_deinit_lag(struct ice_pf *pf);
 void ice_lag_rebuild(struct ice_pf *pf);
+bool ice_lag_is_switchdev_running(struct ice_pf *pf);
 #endif /* _ICE_LAG_H_ */
index 201570c..73bbf06 100644 (file)
@@ -1201,8 +1201,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
 
        ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
                                ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
-                               ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
-                                ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+                               (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
 }
 
 static void
@@ -3575,6 +3574,12 @@ int ice_set_dflt_vsi(struct ice_vsi *vsi)
 
        dev = ice_pf_to_dev(vsi->back);
 
+       if (ice_lag_is_switchdev_running(vsi->back)) {
+               dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n",
+                       vsi->vsi_num);
+               return 0;
+       }
+
        /* the VSI passed in is already the default VSI */
        if (ice_is_vsi_dflt_vsi(vsi)) {
                dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
index c8286ad..7784135 100644 (file)
@@ -6,6 +6,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <generated/utsrelease.h>
+#include <linux/crash_dump.h>
 #include "ice.h"
 #include "ice_base.h"
 #include "ice_lib.h"
@@ -4683,6 +4684,9 @@ static void ice_init_features(struct ice_pf *pf)
 
 static void ice_deinit_features(struct ice_pf *pf)
 {
+       if (ice_is_safe_mode(pf))
+               return;
+
        ice_deinit_lag(pf);
        if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
                ice_cfg_lldp_mib_change(&pf->hw, false);
@@ -5014,6 +5018,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
                return -EINVAL;
        }
 
+       /* when under a kdump kernel initiate a reset before enabling the
+        * device in order to clear out any pending DMA transactions. These
+        * transactions can cause some systems to machine check when doing
+        * the pcim_enable_device() below.
+        */
+       if (is_kdump_kernel()) {
+               pci_save_state(pdev);
+               pci_clear_master(pdev);
+               err = pcie_flr(pdev);
+               if (err)
+                       return err;
+               pci_restore_state(pdev);
+       }
+
        /* this driver uses devres, see
         * Documentation/driver-api/driver-model/devres.rst
         */
index 29cc609..ea88ac0 100644 (file)
@@ -28,6 +28,9 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
        struct vf_macvlans *mv_list;
        int num_vf_macvlans, i;
 
+       /* Initialize list of VF macvlans */
+       INIT_LIST_HEAD(&adapter->vf_mvs.l);
+
        num_vf_macvlans = hw->mac.num_rar_entries -
                          (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs);
        if (!num_vf_macvlans)
@@ -36,8 +39,6 @@ static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter,
        mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans),
                          GFP_KERNEL);
        if (mv_list) {
-               /* Initialize list of VF macvlans */
-               INIT_LIST_HEAD(&adapter->vf_mvs.l);
                for (i = 0; i < num_vf_macvlans; i++) {
                        mv_list[i].vf = -1;
                        mv_list[i].free = true;
index dbc518f..5b46ca4 100644 (file)
@@ -715,20 +715,19 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
                hw_desc->dptr = tx_buffer->sglist_dma;
        }
 
-       /* Flush the hw descriptor before writing to doorbell */
-       wmb();
-
-       /* Ring Doorbell to notify the NIC there is a new packet */
-       writel(1, iq->doorbell_reg);
+       netdev_tx_sent_queue(iq->netdev_q, skb->len);
+       skb_tx_timestamp(skb);
        atomic_inc(&iq->instr_pending);
        wi++;
        if (wi == iq->max_count)
                wi = 0;
        iq->host_write_index = wi;
+       /* Flush the hw descriptor before writing to doorbell */
+       wmb();
 
-       netdev_tx_sent_queue(iq->netdev_q, skb->len);
+       /* Ring Doorbell to notify the NIC there is a new packet */
+       writel(1, iq->doorbell_reg);
        iq->stats.instr_posted++;
-       skb_tx_timestamp(skb);
        return NETDEV_TX_OK;
 
 dma_map_sg_err:
index 59b1382..6cc7a78 100644 (file)
@@ -1357,10 +1357,12 @@ static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
 
        if (netif_running(secy->netdev)) {
                /* Keys cannot be changed after creation */
-               err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
-                                          sw_tx_sa->next_pn);
-               if (err)
-                       return err;
+               if (ctx->sa.update_pn) {
+                       err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+                                                  sw_tx_sa->next_pn);
+                       if (err)
+                               return err;
+               }
 
                err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
                                              sa_num, sw_tx_sa->active);
@@ -1529,6 +1531,9 @@ static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
                if (err)
                        return err;
 
+               if (!ctx->sa.update_pn)
+                       return 0;
+
                err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num,
                                               rx_sa->next_pn);
                if (err)
index 997feda..818ce76 100644 (file)
@@ -1403,6 +1403,7 @@ int otx2_pool_init(struct otx2_nic *pfvf, u16 pool_id,
                return 0;
        }
 
+       pp_params.order = get_order(buf_size);
        pp_params.flags = PP_FLAG_PAGE_FRAG | PP_FLAG_DMA_MAP;
        pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
        pp_params.nid = NUMA_NO_NODE;
index afb3485..c22b0ad 100644 (file)
@@ -2186,52 +2186,23 @@ static u16 cmdif_rev(struct mlx5_core_dev *dev)
 
 int mlx5_cmd_init(struct mlx5_core_dev *dev)
 {
-       int size = sizeof(struct mlx5_cmd_prot_block);
-       int align = roundup_pow_of_two(size);
        struct mlx5_cmd *cmd = &dev->cmd;
-       u32 cmd_l;
-       int err;
-
-       cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
-       if (!cmd->pool)
-               return -ENOMEM;
 
-       err = alloc_cmd_page(dev, cmd);
-       if (err)
-               goto err_free_pool;
-
-       cmd_l = (u32)(cmd->dma);
-       if (cmd_l & 0xfff) {
-               mlx5_core_err(dev, "invalid command queue address\n");
-               err = -ENOMEM;
-               goto err_cmd_page;
-       }
        cmd->checksum_disabled = 1;
 
        spin_lock_init(&cmd->alloc_lock);
        spin_lock_init(&cmd->token_lock);
 
-       create_msg_cache(dev);
-
        set_wqname(dev);
        cmd->wq = create_singlethread_workqueue(cmd->wq_name);
        if (!cmd->wq) {
                mlx5_core_err(dev, "failed to create command workqueue\n");
-               err = -ENOMEM;
-               goto err_cache;
+               return -ENOMEM;
        }
 
        mlx5_cmdif_debugfs_init(dev);
 
        return 0;
-
-err_cache:
-       destroy_msg_cache(dev);
-err_cmd_page:
-       free_cmd_page(dev, cmd);
-err_free_pool:
-       dma_pool_destroy(cmd->pool);
-       return err;
 }
 
 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
@@ -2240,15 +2211,15 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
 
        mlx5_cmdif_debugfs_cleanup(dev);
        destroy_workqueue(cmd->wq);
-       destroy_msg_cache(dev);
-       free_cmd_page(dev, cmd);
-       dma_pool_destroy(cmd->pool);
 }
 
 int mlx5_cmd_enable(struct mlx5_core_dev *dev)
 {
+       int size = sizeof(struct mlx5_cmd_prot_block);
+       int align = roundup_pow_of_two(size);
        struct mlx5_cmd *cmd = &dev->cmd;
        u32 cmd_h, cmd_l;
+       int err;
 
        memset(&cmd->vars, 0, sizeof(cmd->vars));
        cmd->vars.cmdif_rev = cmdif_rev(dev);
@@ -2281,10 +2252,21 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
        sema_init(&cmd->vars.pages_sem, 1);
        sema_init(&cmd->vars.throttle_sem, DIV_ROUND_UP(cmd->vars.max_reg_cmds, 2));
 
+       cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
+       if (!cmd->pool)
+               return -ENOMEM;
+
+       err = alloc_cmd_page(dev, cmd);
+       if (err)
+               goto err_free_pool;
+
        cmd_h = (u32)((u64)(cmd->dma) >> 32);
        cmd_l = (u32)(cmd->dma);
-       if (WARN_ON(cmd_l & 0xfff))
-               return -EINVAL;
+       if (cmd_l & 0xfff) {
+               mlx5_core_err(dev, "invalid command queue address\n");
+               err = -ENOMEM;
+               goto err_cmd_page;
+       }
 
        iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
        iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
@@ -2297,17 +2279,27 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
        cmd->mode = CMD_MODE_POLLING;
        cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
 
+       create_msg_cache(dev);
        create_debugfs_files(dev);
 
        return 0;
+
+err_cmd_page:
+       free_cmd_page(dev, cmd);
+err_free_pool:
+       dma_pool_destroy(cmd->pool);
+       return err;
 }
 
 void mlx5_cmd_disable(struct mlx5_core_dev *dev)
 {
        struct mlx5_cmd *cmd = &dev->cmd;
 
-       clean_debug_files(dev);
        flush_workqueue(cmd->wq);
+       clean_debug_files(dev);
+       destroy_msg_cache(dev);
+       free_cmd_page(dev, cmd);
+       dma_pool_destroy(cmd->pool);
 }
 
 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
index 7c0f2ad..ad78934 100644 (file)
@@ -848,7 +848,7 @@ static void mlx5_fw_tracer_ownership_change(struct work_struct *work)
 
        mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner);
        if (tracer->owner) {
-               tracer->owner = false;
+               mlx5_fw_tracer_ownership_acquire(tracer);
                return;
        }
 
index 0fef853..5d128c5 100644 (file)
@@ -467,6 +467,17 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb,
                /* only handle the event on peers */
                if (mlx5_esw_bridge_is_local(dev, rep, esw))
                        break;
+
+               fdb_info = container_of(info,
+                                       struct switchdev_notifier_fdb_info,
+                                       info);
+               /* Mark for deletion to prevent the update wq task from
+                * spuriously refreshing the entry which would mark it again as
+                * offloaded in SW bridge. After this fallthrough to regular
+                * async delete code.
+                */
+               mlx5_esw_bridge_fdb_mark_deleted(dev, vport_num, esw_owner_vhca_id, br_offloads,
+                                                fdb_info);
                fallthrough;
        case SWITCHDEV_FDB_ADD_TO_DEVICE:
        case SWITCHDEV_FDB_DEL_TO_DEVICE:
index 1730f6a..b10e40e 100644 (file)
@@ -24,7 +24,8 @@ static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
 
        route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
 
-       if (!route_dev || !netif_is_ovs_master(route_dev))
+       if (!route_dev || !netif_is_ovs_master(route_dev) ||
+           attr->parse_attr->filter_dev == e->out_dev)
                goto out;
 
        err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
index 12f56d0..8bed17d 100644 (file)
@@ -874,11 +874,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
        }
 
 out:
-       if (flags & XDP_XMIT_FLUSH) {
-               if (sq->mpwqe.wqe)
-                       mlx5e_xdp_mpwqe_complete(sq);
+       if (sq->mpwqe.wqe)
+               mlx5e_xdp_mpwqe_complete(sq);
+
+       if (flags & XDP_XMIT_FLUSH)
                mlx5e_xmit_xdp_doorbell(sq);
-       }
 
        return nxmit;
 }
index c9c1db9..d4ebd87 100644 (file)
@@ -580,7 +580,7 @@ static int mlx5e_macsec_upd_txsa(struct macsec_context *ctx)
                goto out;
        }
 
-       if (tx_sa->next_pn != ctx_tx_sa->next_pn_halves.lower) {
+       if (ctx->sa.update_pn) {
                netdev_err(netdev, "MACsec offload: update TX sa %d PN isn't supported\n",
                           assoc_num);
                err = -EINVAL;
@@ -973,7 +973,7 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx)
                goto out;
        }
 
-       if (rx_sa->next_pn != ctx_rx_sa->next_pn_halves.lower) {
+       if (ctx->sa.update_pn) {
                netdev_err(ctx->netdev,
                           "MACsec offload update RX sa %d PN isn't supported\n",
                           assoc_num);
index a2ae791..acb4077 100644 (file)
@@ -3952,13 +3952,14 @@ static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
        struct mlx5e_channels *chs = &priv->channels;
        struct mlx5e_params new_params;
        int err;
+       bool rx_ts_over_crc = !enable;
 
        mutex_lock(&priv->state_lock);
 
        new_params = chs->params;
        new_params.scatter_fcs_en = enable;
        err = mlx5e_safe_switch_params(priv, &new_params, mlx5e_set_rx_port_ts_wrap,
-                                      &new_params.scatter_fcs_en, true);
+                                      &rx_ts_over_crc, true);
        mutex_unlock(&priv->state_lock);
        return err;
 }
index 2fdb889..fd1cce5 100644 (file)
@@ -701,7 +701,7 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 
        /* update HW stats in background for next time */
        mlx5e_queue_update_stats(priv);
-       memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
+       mlx5e_stats_copy_rep_stats(stats, &priv->stats.rep_stats);
 }
 
 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
@@ -769,6 +769,7 @@ static int mlx5e_rep_max_nch_limit(struct mlx5_core_dev *mdev)
 
 static void mlx5e_build_rep_params(struct net_device *netdev)
 {
+       const bool take_rtnl = netdev->reg_state == NETREG_REGISTERED;
        struct mlx5e_priv *priv = netdev_priv(netdev);
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct mlx5_eswitch_rep *rep = rpriv->rep;
@@ -794,8 +795,15 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
        /* RQ */
        mlx5e_build_rq_params(mdev, params);
 
+       /* If netdev is already registered (e.g. move from nic profile to uplink,
+        * RTNL lock must be held before triggering netdev notifiers.
+        */
+       if (take_rtnl)
+               rtnl_lock();
        /* update XDP supported features */
        mlx5e_set_xdp_feature(netdev);
+       if (take_rtnl)
+               rtnl_unlock();
 
        /* CQ moderation params */
        params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
index 3fd11b0..8d9743a 100644 (file)
@@ -457,26 +457,41 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
 static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
 {
        int remaining = wqe_bulk;
-       int i = 0;
+       int total_alloc = 0;
+       int refill_alloc;
+       int refill;
 
        /* The WQE bulk is split into smaller bulks that are sized
         * according to the page pool cache refill size to avoid overflowing
         * the page pool cache due to too many page releases at once.
         */
        do {
-               int refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
-               int alloc_count;
+               refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
 
-               mlx5e_free_rx_wqes(rq, ix + i, refill);
-               alloc_count = mlx5e_alloc_rx_wqes(rq, ix + i, refill);
-               i += alloc_count;
-               if (unlikely(alloc_count != refill))
-                       break;
+               mlx5e_free_rx_wqes(rq, ix + total_alloc, refill);
+               refill_alloc = mlx5e_alloc_rx_wqes(rq, ix + total_alloc, refill);
+               if (unlikely(refill_alloc != refill))
+                       goto err_free;
 
+               total_alloc += refill_alloc;
                remaining -= refill;
        } while (remaining);
 
-       return i;
+       return total_alloc;
+
+err_free:
+       mlx5e_free_rx_wqes(rq, ix, total_alloc + refill_alloc);
+
+       for (int i = 0; i < total_alloc + refill; i++) {
+               int j = mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, ix + i);
+               struct mlx5e_wqe_frag_info *frag;
+
+               frag = get_frag(rq, j);
+               for (int k = 0; k < rq->wqe.info.num_frags; k++, frag++)
+                       frag->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
+       }
+
+       return 0;
 }
 
 static void
@@ -816,6 +831,8 @@ err_unmap:
                mlx5e_page_release_fragmented(rq, frag_page);
        }
 
+       bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
+
 err:
        rq->stats->buff_alloc_err++;
 
index 176fa59..477c547 100644 (file)
@@ -484,11 +484,20 @@ struct mlx5e_stats {
        struct mlx5e_vnic_env_stats vnic;
        struct mlx5e_vport_stats vport;
        struct mlx5e_pport_stats pport;
-       struct rtnl_link_stats64 vf_vport;
        struct mlx5e_pcie_stats pcie;
        struct mlx5e_rep_stats rep_stats;
 };
 
+static inline void mlx5e_stats_copy_rep_stats(struct rtnl_link_stats64 *vf_vport,
+                                             struct mlx5e_rep_stats *rep_stats)
+{
+       memset(vf_vport, 0, sizeof(*vf_vport));
+       vf_vport->rx_packets = rep_stats->vport_rx_packets;
+       vf_vport->tx_packets = rep_stats->vport_tx_packets;
+       vf_vport->rx_bytes = rep_stats->vport_rx_bytes;
+       vf_vport->tx_bytes = rep_stats->vport_tx_bytes;
+}
+
 extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
 unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv);
 
index c24828b..c859048 100644 (file)
@@ -4972,7 +4972,8 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
                        if (err)
                                return err;
 
-                       rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
+                       mlx5e_stats_copy_rep_stats(&rpriv->prev_vf_vport_stats,
+                                                  &priv->stats.rep_stats);
                        break;
                default:
                        NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
@@ -5012,7 +5013,7 @@ void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
        u64 dbytes;
        u64 dpkts;
 
-       cur_stats = priv->stats.vf_vport;
+       mlx5e_stats_copy_rep_stats(&cur_stats, &priv->stats.rep_stats);
        dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
        dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
        rpriv->prev_vf_vport_stats = cur_stats;
index e36294b..1b9bc32 100644 (file)
@@ -1748,6 +1748,28 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
        entry->lastuse = jiffies;
 }
 
+void mlx5_esw_bridge_fdb_mark_deleted(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+                                     struct mlx5_esw_bridge_offloads *br_offloads,
+                                     struct switchdev_notifier_fdb_info *fdb_info)
+{
+       struct mlx5_esw_bridge_fdb_entry *entry;
+       struct mlx5_esw_bridge *bridge;
+
+       bridge = mlx5_esw_bridge_from_port_lookup(vport_num, esw_owner_vhca_id, br_offloads);
+       if (!bridge)
+               return;
+
+       entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
+       if (!entry) {
+               esw_debug(br_offloads->esw->dev,
+                         "FDB mark deleted entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
+                         fdb_info->addr, fdb_info->vid, vport_num);
+               return;
+       }
+
+       entry->flags |= MLX5_ESW_BRIDGE_FLAG_DELETED;
+}
+
 void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
                                struct mlx5_esw_bridge_offloads *br_offloads,
                                struct switchdev_notifier_fdb_info *fdb_info)
@@ -1810,7 +1832,8 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
                        unsigned long lastuse =
                                (unsigned long)mlx5_fc_query_lastuse(entry->ingress_counter);
 
-                       if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
+                       if (entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER |
+                                           MLX5_ESW_BRIDGE_FLAG_DELETED))
                                continue;
 
                        if (time_after(lastuse, entry->lastuse))
index c2c7c70..d6f5391 100644 (file)
@@ -62,6 +62,9 @@ int mlx5_esw_bridge_vport_peer_unlink(struct net_device *br_netdev, u16 vport_nu
 void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
                                     struct mlx5_esw_bridge_offloads *br_offloads,
                                     struct switchdev_notifier_fdb_info *fdb_info);
+void mlx5_esw_bridge_fdb_mark_deleted(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
+                                     struct mlx5_esw_bridge_offloads *br_offloads,
+                                     struct switchdev_notifier_fdb_info *fdb_info);
 void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
                                struct mlx5_esw_bridge_offloads *br_offloads,
                                struct switchdev_notifier_fdb_info *fdb_info);
index 4911cc3..7c251af 100644 (file)
@@ -133,6 +133,7 @@ struct mlx5_esw_bridge_mdb_key {
 enum {
        MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0),
        MLX5_ESW_BRIDGE_FLAG_PEER = BIT(1),
+       MLX5_ESW_BRIDGE_FLAG_DELETED = BIT(2),
 };
 
 enum {
index d4cde65..8d0b915 100644 (file)
@@ -1038,11 +1038,8 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
        return ERR_PTR(err);
 }
 
-static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
+static void mlx5_eswitch_event_handler_register(struct mlx5_eswitch *esw)
 {
-       MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
-       mlx5_eq_notifier_register(esw->dev, &esw->nb);
-
        if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
                MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
                             ESW_FUNCTIONS_CHANGED);
@@ -1050,13 +1047,11 @@ static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
        }
 }
 
-static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
+static void mlx5_eswitch_event_handler_unregister(struct mlx5_eswitch *esw)
 {
        if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
                mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
 
-       mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
-
        flush_workqueue(esw->work_queue);
 }
 
@@ -1483,6 +1478,9 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
 
        mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
 
+       MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
+       mlx5_eq_notifier_register(esw->dev, &esw->nb);
+
        if (esw->mode == MLX5_ESWITCH_LEGACY) {
                err = esw_legacy_enable(esw);
        } else {
@@ -1495,7 +1493,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
 
        esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
 
-       mlx5_eswitch_event_handlers_register(esw);
+       mlx5_eswitch_event_handler_register(esw);
 
        esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
                 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
@@ -1622,7 +1620,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
         */
        mlx5_esw_mode_change_notify(esw, MLX5_ESWITCH_LEGACY);
 
-       mlx5_eswitch_event_handlers_unregister(esw);
+       mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
+       mlx5_eswitch_event_handler_unregister(esw);
 
        esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
                 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
index bb8eeb8..52c2fe3 100644 (file)
@@ -310,8 +310,8 @@ const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = {
        .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload,
 };
 
-static bool mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
-                                            bool learning_en)
+static int mlxsw_sp2_nve_vxlan_learning_set(struct mlxsw_sp *mlxsw_sp,
+                                           bool learning_en)
 {
        char tnpc_pl[MLXSW_REG_TNPC_LEN];
 
index f21cf1f..153533c 100644 (file)
@@ -210,6 +210,7 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
        unsigned int msg_len = nfp_flower_cmsg_get_data_len(skb);
        struct nfp_flower_cmsg_merge_hint *msg;
        struct nfp_fl_payload *sub_flows[2];
+       struct nfp_flower_priv *priv;
        int err, i, flow_cnt;
 
        msg = nfp_flower_cmsg_get_data(skb);
@@ -228,14 +229,15 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
                return;
        }
 
-       rtnl_lock();
+       priv = app->priv;
+       mutex_lock(&priv->nfp_fl_lock);
        for (i = 0; i < flow_cnt; i++) {
                u32 ctx = be32_to_cpu(msg->flow[i].host_ctx);
 
                sub_flows[i] = nfp_flower_get_fl_payload_from_ctx(app, ctx);
                if (!sub_flows[i]) {
                        nfp_flower_cmsg_warn(app, "Invalid flow in merge hint\n");
-                       goto err_rtnl_unlock;
+                       goto err_mutex_unlock;
                }
        }
 
@@ -244,8 +246,8 @@ nfp_flower_cmsg_merge_hint_rx(struct nfp_app *app, struct sk_buff *skb)
        if (err == -ENOMEM)
                nfp_flower_cmsg_warn(app, "Flow merge memory fail.\n");
 
-err_rtnl_unlock:
-       rtnl_unlock();
+err_mutex_unlock:
+       mutex_unlock(&priv->nfp_fl_lock);
 }
 
 static void
index 2643c4b..2967bab 100644 (file)
@@ -2131,8 +2131,6 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl
        struct nfp_fl_ct_flow_entry *ct_entry;
        struct netlink_ext_ack *extack = NULL;
 
-       ASSERT_RTNL();
-
        extack = flow->common.extack;
        switch (flow->command) {
        case FLOW_CLS_REPLACE:
@@ -2178,9 +2176,13 @@ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb
 
        switch (type) {
        case TC_SETUP_CLSFLOWER:
-               rtnl_lock();
+               while (!mutex_trylock(&zt->priv->nfp_fl_lock)) {
+                       if (!zt->nft) /* avoid deadlock */
+                               return err;
+                       msleep(20);
+               }
                err = nfp_fl_ct_offload_nft_flow(zt, flow);
-               rtnl_unlock();
+               mutex_unlock(&zt->priv->nfp_fl_lock);
                break;
        default:
                return -EOPNOTSUPP;
@@ -2208,6 +2210,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
        struct nfp_fl_ct_flow_entry *ct_entry;
        struct nfp_fl_ct_zone_entry *zt;
        struct rhashtable *m_table;
+       struct nf_flowtable *nft;
 
        if (!ct_map_ent)
                return -ENOENT;
@@ -2226,8 +2229,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
                if (ct_map_ent->cookie > 0)
                        kfree(ct_map_ent);
 
-               if (!zt->pre_ct_count) {
-                       zt->nft = NULL;
+               if (!zt->pre_ct_count && zt->nft) {
+                       nft = zt->nft;
+                       zt->nft = NULL; /* avoid deadlock */
+                       nf_flow_table_offload_del_cb(nft,
+                                                    nfp_fl_ct_handle_nft_flow,
+                                                    zt);
                        nfp_fl_ct_clean_nft_entries(zt);
                }
                break;
index 4037254..2b7c947 100644 (file)
@@ -297,6 +297,7 @@ struct nfp_fl_internal_ports {
  * @predt_list:                List to keep track of decap pretun flows
  * @neigh_table:       Table to keep track of neighbor entries
  * @predt_lock:                Lock to serialise predt/neigh table updates
+ * @nfp_fl_lock:       Lock to protect the flow offload operation
  */
 struct nfp_flower_priv {
        struct nfp_app *app;
@@ -339,6 +340,7 @@ struct nfp_flower_priv {
        struct list_head predt_list;
        struct rhashtable neigh_table;
        spinlock_t predt_lock; /* Lock to serialise predt/neigh table updates */
+       struct mutex nfp_fl_lock; /* Protect the flow operation */
 };
 
 /**
index 0f06ef6..80e4675 100644 (file)
@@ -528,6 +528,8 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
        if (err)
                goto err_free_stats_ctx_table;
 
+       mutex_init(&priv->nfp_fl_lock);
+
        err = rhashtable_init(&priv->ct_zone_table, &nfp_zone_table_params);
        if (err)
                goto err_free_merge_table;
index c153f05..0aceef9 100644 (file)
@@ -1009,8 +1009,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app,
        u64 parent_ctx = 0;
        int err;
 
-       ASSERT_RTNL();
-
        if (sub_flow1 == sub_flow2 ||
            nfp_flower_is_merge_flow(sub_flow1) ||
            nfp_flower_is_merge_flow(sub_flow2))
@@ -1727,19 +1725,30 @@ static int
 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
                        struct flow_cls_offload *flower)
 {
+       struct nfp_flower_priv *priv = app->priv;
+       int ret;
+
        if (!eth_proto_is_802_3(flower->common.protocol))
                return -EOPNOTSUPP;
 
+       mutex_lock(&priv->nfp_fl_lock);
        switch (flower->command) {
        case FLOW_CLS_REPLACE:
-               return nfp_flower_add_offload(app, netdev, flower);
+               ret = nfp_flower_add_offload(app, netdev, flower);
+               break;
        case FLOW_CLS_DESTROY:
-               return nfp_flower_del_offload(app, netdev, flower);
+               ret = nfp_flower_del_offload(app, netdev, flower);
+               break;
        case FLOW_CLS_STATS:
-               return nfp_flower_get_stats(app, netdev, flower);
+               ret = nfp_flower_get_stats(app, netdev, flower);
+               break;
        default:
-               return -EOPNOTSUPP;
+               ret = -EOPNOTSUPP;
+               break;
        }
+       mutex_unlock(&priv->nfp_fl_lock);
+
+       return ret;
 }
 
 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
@@ -1778,6 +1787,7 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
        repr_priv = repr->app_priv;
        repr_priv->block_shared = f->block_shared;
        f->driver_block_list = &nfp_block_cb_list;
+       f->unlocked_driver_cb = true;
 
        switch (f->command) {
        case FLOW_BLOCK_BIND:
@@ -1876,6 +1886,8 @@ nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct Qdisc *sch, str
             nfp_flower_internal_port_can_offload(app, netdev)))
                return -EOPNOTSUPP;
 
+       f->unlocked_driver_cb = true;
+
        switch (f->command) {
        case FLOW_BLOCK_BIND:
                cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
index 99052a9..e7180b4 100644 (file)
@@ -523,25 +523,31 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
 {
        struct netlink_ext_ack *extack = flow->common.extack;
        struct nfp_flower_priv *fl_priv = app->priv;
+       int ret;
 
        if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
                NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
                return -EOPNOTSUPP;
        }
 
+       mutex_lock(&fl_priv->nfp_fl_lock);
        switch (flow->command) {
        case TC_CLSMATCHALL_REPLACE:
-               return nfp_flower_install_rate_limiter(app, netdev, flow,
-                                                      extack);
+               ret = nfp_flower_install_rate_limiter(app, netdev, flow, extack);
+               break;
        case TC_CLSMATCHALL_DESTROY:
-               return nfp_flower_remove_rate_limiter(app, netdev, flow,
-                                                     extack);
+               ret = nfp_flower_remove_rate_limiter(app, netdev, flow, extack);
+               break;
        case TC_CLSMATCHALL_STATS:
-               return nfp_flower_stats_rate_limiter(app, netdev, flow,
-                                                    extack);
+               ret = nfp_flower_stats_rate_limiter(app, netdev, flow, extack);
+               break;
        default:
-               return -EOPNOTSUPP;
+               ret = -EOPNOTSUPP;
+               break;
        }
+       mutex_unlock(&fl_priv->nfp_fl_lock);
+
+       return ret;
 }
 
 /* Offload tc action, currently only for tc police */
index 717a0b3..ab5ef25 100644 (file)
@@ -113,7 +113,10 @@ static void qed_ll2b_complete_tx_packet(void *cxt,
 static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
                                u8 **data, dma_addr_t *phys_addr)
 {
-       *data = kmalloc(cdev->ll2->rx_size, GFP_ATOMIC);
+       size_t size = cdev->ll2->rx_size + NET_SKB_PAD +
+                     SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+       *data = kmalloc(size, GFP_ATOMIC);
        if (!(*data)) {
                DP_INFO(cdev, "Failed to allocate LL2 buffer data\n");
                return -ENOMEM;
@@ -2589,7 +2592,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
        INIT_LIST_HEAD(&cdev->ll2->list);
        spin_lock_init(&cdev->ll2->lock);
 
-       cdev->ll2->rx_size = NET_SKB_PAD + ETH_HLEN +
+       cdev->ll2->rx_size = PRM_DMA_PAD_BYTES_NUM + ETH_HLEN +
                             L1_CACHE_BYTES + params->mtu;
 
        /* Allocate memory for LL2.
index 7df9f9f..0ef0b88 100644 (file)
@@ -2167,6 +2167,8 @@ static int ravb_close(struct net_device *ndev)
                        of_phy_deregister_fixed_link(np);
        }
 
+       cancel_work_sync(&priv->work);
+
        if (info->multi_irqs) {
                free_irq(priv->tx_irqs[RAVB_NC], ndev);
                free_irq(priv->rx_irqs[RAVB_NC], ndev);
@@ -2891,8 +2893,6 @@ static int ravb_remove(struct platform_device *pdev)
        clk_disable_unprepare(priv->gptp_clk);
        clk_disable_unprepare(priv->refclk);
 
-       dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
-                         priv->desc_bat_dma);
        /* Set reset mode */
        ravb_write(ndev, CCC_OPC_RESET, CCC);
        unregister_netdev(ndev);
@@ -2900,6 +2900,8 @@ static int ravb_remove(struct platform_device *pdev)
                netif_napi_del(&priv->napi[RAVB_NC]);
        netif_napi_del(&priv->napi[RAVB_BE]);
        ravb_mdio_release(priv);
+       dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat,
+                         priv->desc_bat_dma);
        pm_runtime_put_sync(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        reset_control_assert(priv->rstc);
index fc01ad3..0fc0b6b 100644 (file)
@@ -1254,7 +1254,7 @@ static void rswitch_adjust_link(struct net_device *ndev)
                phy_print_status(phydev);
                if (phydev->link)
                        phy_power_on(rdev->serdes);
-               else
+               else if (rdev->serdes->power_count)
                        phy_power_off(rdev->serdes);
 
                rdev->etha->link = phydev->link;
@@ -1964,15 +1964,17 @@ static void rswitch_deinit(struct rswitch_private *priv)
        rswitch_gwca_hw_deinit(priv);
        rcar_gen4_ptp_unregister(priv->ptp_priv);
 
-       for (i = 0; i < RSWITCH_NUM_PORTS; i++) {
+       rswitch_for_each_enabled_port(priv, i) {
                struct rswitch_device *rdev = priv->rdev[i];
 
-               phy_exit(priv->rdev[i]->serdes);
-               rswitch_ether_port_deinit_one(rdev);
                unregister_netdev(rdev->ndev);
-               rswitch_device_free(priv, i);
+               rswitch_ether_port_deinit_one(rdev);
+               phy_exit(priv->rdev[i]->serdes);
        }
 
+       for (i = 0; i < RSWITCH_NUM_PORTS; i++)
+               rswitch_device_free(priv, i);
+
        rswitch_gwca_ts_queue_free(priv);
        rswitch_gwca_linkfix_free(priv);
 
index 0a33466..cac61f5 100644 (file)
@@ -90,12 +90,16 @@ config TI_CPTS
          The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
          driver offers a PTP Hardware Clock.
 
+config TI_K3_CPPI_DESC_POOL
+       tristate
+
 config TI_K3_AM65_CPSW_NUSS
        tristate "TI K3 AM654x/J721E CPSW Ethernet driver"
        depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
        select NET_DEVLINK
        select TI_DAVINCI_MDIO
        select PHYLINK
+       select TI_K3_CPPI_DESC_POOL
        imply PHY_TI_GMII_SEL
        depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
        help
@@ -187,6 +191,7 @@ config TI_ICSSG_PRUETH
        tristate "TI Gigabit PRU Ethernet driver"
        select PHYLIB
        select TI_ICSS_IEP
+       select TI_K3_CPPI_DESC_POOL
        depends on PRU_REMOTEPROC
        depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
        help
index 34fd7a7..67bed86 100644 (file)
@@ -24,14 +24,15 @@ keystone_netcp-y := netcp_core.o cpsw_ale.o
 obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
 keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o
 
+obj-$(CONFIG_TI_K3_CPPI_DESC_POOL) += k3-cppi-desc-pool.o
+
 obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
-ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o
+ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o am65-cpsw-qos.o
 ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
 obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
 
 obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o
-icssg-prueth-y := k3-cppi-desc-pool.o \
-                 icssg/icssg_prueth.o \
+icssg-prueth-y := icssg/icssg_prueth.o \
                  icssg/icssg_classifier.o \
                  icssg/icssg_queues.o \
                  icssg/icssg_config.o \
index 933b846..b272361 100644 (file)
@@ -379,9 +379,9 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
 
 /* Bitmask for ICSSG r30 commands */
 static const struct icssg_r30_cmd emac_r32_bitmask[] = {
-       {{0xffff0004, 0xffff0100, 0xffff0100, EMAC_NONE}},      /* EMAC_PORT_DISABLE */
+       {{0xffff0004, 0xffff0100, 0xffff0004, EMAC_NONE}},      /* EMAC_PORT_DISABLE */
        {{0xfffb0040, 0xfeff0200, 0xfeff0200, EMAC_NONE}},      /* EMAC_PORT_BLOCK */
-       {{0xffbb0000, 0xfcff0000, 0xdcff0000, EMAC_NONE}},      /* EMAC_PORT_FORWARD */
+       {{0xffbb0000, 0xfcff0000, 0xdcfb0000, EMAC_NONE}},      /* EMAC_PORT_FORWARD */
        {{0xffbb0000, 0xfcff0000, 0xfcff2000, EMAC_NONE}},      /* EMAC_PORT_FORWARD_WO_LEARNING */
        {{0xffff0001, EMAC_NONE,  EMAC_NONE, EMAC_NONE}},       /* ACCEPT ALL */
        {{0xfffe0002, EMAC_NONE,  EMAC_NONE, EMAC_NONE}},       /* ACCEPT TAGGED */
index bb0b339..3dbaddd 100644 (file)
@@ -9,6 +9,9 @@
 #include "icssg_stats.h"
 #include <linux/regmap.h>
 
+#define ICSSG_TX_PACKET_OFFSET 0xA0
+#define ICSSG_TX_BYTE_OFFSET   0xEC
+
 static u32 stats_base[] = {    0x54c,  /* Slice 0 stats start */
                                0xb18,  /* Slice 1 stats start */
 };
@@ -18,6 +21,7 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
        struct prueth *prueth = emac->prueth;
        int slice = prueth_emac_slice(emac);
        u32 base = stats_base[slice];
+       u32 tx_pkt_cnt = 0;
        u32 val;
        int i;
 
@@ -29,7 +33,12 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
                             base + icssg_all_stats[i].offset,
                             val);
 
+               if (icssg_all_stats[i].offset == ICSSG_TX_PACKET_OFFSET)
+                       tx_pkt_cnt = val;
+
                emac->stats[i] += val;
+               if (icssg_all_stats[i].offset == ICSSG_TX_BYTE_OFFSET)
+                       emac->stats[i] -= tx_pkt_cnt * 8;
        }
 }
 
index 38cc12f..05cc7aa 100644 (file)
@@ -39,6 +39,7 @@ void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool)
 
        gen_pool_destroy(pool->gen_pool);       /* frees pool->name */
 }
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_destroy);
 
 struct k3_cppi_desc_pool *
 k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
@@ -98,29 +99,38 @@ gen_pool_create_fail:
        devm_kfree(pool->dev, pool);
        return ERR_PTR(ret);
 }
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_create_name);
 
 dma_addr_t k3_cppi_desc_pool_virt2dma(struct k3_cppi_desc_pool *pool,
                                      void *addr)
 {
        return addr ? pool->dma_addr + (addr - pool->cpumem) : 0;
 }
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_virt2dma);
 
 void *k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma)
 {
        return dma ? pool->cpumem + (dma - pool->dma_addr) : NULL;
 }
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_dma2virt);
 
 void *k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool)
 {
        return (void *)gen_pool_alloc(pool->gen_pool, pool->desc_size);
 }
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_alloc);
 
 void k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr)
 {
        gen_pool_free(pool->gen_pool, (unsigned long)addr, pool->desc_size);
 }
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_free);
 
 size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool)
 {
        return gen_pool_avail(pool->gen_pool) / pool->desc_size;
 }
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_avail);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI K3 CPPI5 descriptors pool API");
index aebb19f..4ec0dab 100644 (file)
@@ -2740,7 +2740,6 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
        struct device_node *np = spi->dev.of_node;
        struct ca8210_priv *priv = spi_get_drvdata(spi);
        struct ca8210_platform_data *pdata = spi->dev.platform_data;
-       int ret = 0;
 
        if (!np)
                return -EFAULT;
@@ -2757,18 +2756,8 @@ static int ca8210_register_ext_clock(struct spi_device *spi)
                dev_crit(&spi->dev, "Failed to register external clk\n");
                return PTR_ERR(priv->clk);
        }
-       ret = of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
-       if (ret) {
-               clk_unregister(priv->clk);
-               dev_crit(
-                       &spi->dev,
-                       "Failed to register external clock as clock provider\n"
-               );
-       } else {
-               dev_info(&spi->dev, "External clock set as clock provider\n");
-       }
 
-       return ret;
+       return of_clk_add_provider(np, of_clk_src_simple_get, priv->clk);
 }
 
 /**
@@ -2780,8 +2769,8 @@ static void ca8210_unregister_ext_clock(struct spi_device *spi)
 {
        struct ca8210_priv *priv = spi_get_drvdata(spi);
 
-       if (!priv->clk)
-               return
+       if (IS_ERR_OR_NULL(priv->clk))
+               return;
 
        of_clk_del_provider(spi->dev.of_node);
        clk_unregister(priv->clk);
index b7e1514..c5cd455 100644 (file)
@@ -2383,6 +2383,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
 
                ctx.sa.assoc_num = assoc_num;
                ctx.sa.tx_sa = tx_sa;
+               ctx.sa.update_pn = !!prev_pn.full64;
                ctx.secy = secy;
 
                ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
@@ -2476,6 +2477,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
 
                ctx.sa.assoc_num = assoc_num;
                ctx.sa.rx_sa = rx_sa;
+               ctx.sa.update_pn = !!prev_pn.full64;
                ctx.secy = secy;
 
                ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
index a881e35..bef4cce 100644 (file)
@@ -55,6 +55,27 @@ out:
        return r;
 }
 
+static int mdio_mux_read_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+                            int regnum)
+{
+       struct mdio_mux_child_bus *cb = bus->priv;
+       struct mdio_mux_parent_bus *pb = cb->parent;
+       int r;
+
+       mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
+       r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
+       if (r)
+               goto out;
+
+       pb->current_child = cb->bus_number;
+
+       r = pb->mii_bus->read_c45(pb->mii_bus, phy_id, dev_addr, regnum);
+out:
+       mutex_unlock(&pb->mii_bus->mdio_lock);
+
+       return r;
+}
+
 /*
  * The parent bus' lock is used to order access to the switch_fn.
  */
@@ -80,6 +101,28 @@ out:
        return r;
 }
 
+static int mdio_mux_write_c45(struct mii_bus *bus, int phy_id, int dev_addr,
+                             int regnum, u16 val)
+{
+       struct mdio_mux_child_bus *cb = bus->priv;
+       struct mdio_mux_parent_bus *pb = cb->parent;
+
+       int r;
+
+       mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
+       r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
+       if (r)
+               goto out;
+
+       pb->current_child = cb->bus_number;
+
+       r = pb->mii_bus->write_c45(pb->mii_bus, phy_id, dev_addr, regnum, val);
+out:
+       mutex_unlock(&pb->mii_bus->mdio_lock);
+
+       return r;
+}
+
 static int parent_count;
 
 static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
@@ -173,6 +216,10 @@ int mdio_mux_init(struct device *dev,
                cb->mii_bus->parent = dev;
                cb->mii_bus->read = mdio_mux_read;
                cb->mii_bus->write = mdio_mux_write;
+               if (parent_bus->read_c45)
+                       cb->mii_bus->read_c45 = mdio_mux_read_c45;
+               if (parent_bus->write_c45)
+                       cb->mii_bus->write_c45 = mdio_mux_write_c45;
                r = of_mdiobus_register(cb->mii_bus, child_bus_node);
                if (r) {
                        mdiobus_free(cb->mii_bus);
index 8478b08..97638ba 100644 (file)
@@ -894,6 +894,9 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
        .name           = _name,                                        \
        /* PHY_BASIC_FEATURES */                                        \
        .flags          = PHY_IS_INTERNAL,                              \
+       .get_sset_count = bcm_phy_get_sset_count,                       \
+       .get_strings    = bcm_phy_get_strings,                          \
+       .get_stats      = bcm7xxx_28nm_get_phy_stats,                   \
        .probe          = bcm7xxx_28nm_probe,                           \
        .config_init    = bcm7xxx_16nm_ephy_config_init,                \
        .config_aneg    = genphy_config_aneg,                           \
index 018253a..4f39ba6 100644 (file)
@@ -849,6 +849,9 @@ static int vsc8584_macsec_upd_rxsa(struct macsec_context *ctx)
        struct macsec_flow *flow;
        int ret;
 
+       if (ctx->sa.update_pn)
+               return -EINVAL;
+
        flow = vsc8584_macsec_find_flow(ctx, MACSEC_INGR);
        if (IS_ERR(flow))
                return PTR_ERR(flow);
@@ -900,6 +903,9 @@ static int vsc8584_macsec_upd_txsa(struct macsec_context *ctx)
        struct macsec_flow *flow;
        int ret;
 
+       if (ctx->sa.update_pn)
+               return -EINVAL;
+
        flow = vsc8584_macsec_find_flow(ctx, MACSEC_EGR);
        if (IS_ERR(flow))
                return PTR_ERR(flow);
index 89ab9ef..afa5497 100644 (file)
@@ -3073,10 +3073,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
        struct net *net = sock_net(&tfile->sk);
        struct tun_struct *tun;
        void __user* argp = (void __user*)arg;
-       unsigned int ifindex, carrier;
+       unsigned int carrier;
        struct ifreq ifr;
        kuid_t owner;
        kgid_t group;
+       int ifindex;
        int sndbuf;
        int vnet_hdr_sz;
        int le;
@@ -3132,7 +3133,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
                ret = -EFAULT;
                if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
                        goto unlock;
-
+               ret = -EINVAL;
+               if (ifindex < 0)
+                       goto unlock;
                ret = 0;
                tfile->ifindex = ifindex;
                goto unlock;
index 48d7d27..99ec1d4 100644 (file)
@@ -222,13 +222,18 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
        struct usbnet *dev = netdev_priv(netdev);
 
        __le16 res;
+       int err;
 
        if (phy_id) {
                netdev_dbg(dev->net, "Only internal phy supported\n");
                return 0;
        }
 
-       dm_read_shared_word(dev, 1, loc, &res);
+       err = dm_read_shared_word(dev, 1, loc, &res);
+       if (err < 0) {
+               netdev_err(dev->net, "MDIO read error: %d\n", err);
+               return err;
+       }
 
        netdev_dbg(dev->net,
                   "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
index 563ecd2..17da42f 100644 (file)
@@ -897,7 +897,7 @@ static int smsc95xx_reset(struct usbnet *dev)
 
        if (timeout >= 100) {
                netdev_warn(dev->net, "timeout waiting for completion of Lite Reset\n");
-               return ret;
+               return -ETIMEDOUT;
        }
 
        ret = smsc95xx_set_mac_address(dev);
index fe7f314..d67f742 100644 (file)
@@ -607,16 +607,16 @@ static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
 
        --dma->ref;
 
-       if (dma->ref) {
-               if (dma->need_sync && len) {
-                       offset = buf - (head + sizeof(*dma));
+       if (dma->need_sync && len) {
+               offset = buf - (head + sizeof(*dma));
 
-                       virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, offset,
-                                                               len, DMA_FROM_DEVICE);
-               }
+               virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr,
+                                                       offset, len,
+                                                       DMA_FROM_DEVICE);
+       }
 
+       if (dma->ref)
                return;
-       }
 
        virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
                                         DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
index 635301d..829515a 100644 (file)
@@ -4,7 +4,6 @@
  */
 
 #include <linux/delay.h>
-#include <linux/pm_runtime.h>
 
 #include "iosm_ipc_chnl_cfg.h"
 #include "iosm_ipc_devlink.h"
@@ -632,11 +631,6 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
        /* Complete all memory stores after setting bit */
        smp_mb__after_atomic();
 
-       if (ipc_imem->pcie->pci->device == INTEL_CP_DEVICE_7560_ID) {
-               pm_runtime_mark_last_busy(ipc_imem->dev);
-               pm_runtime_put_autosuspend(ipc_imem->dev);
-       }
-
        return;
 
 err_ipc_mux_deinit:
@@ -1240,7 +1234,6 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
 
        /* forward MDM_NOT_READY to listeners */
        ipc_uevent_send(ipc_imem->dev, UEVENT_MDM_NOT_READY);
-       pm_runtime_get_sync(ipc_imem->dev);
 
        hrtimer_cancel(&ipc_imem->td_alloc_timer);
        hrtimer_cancel(&ipc_imem->tdupdate_timer);
@@ -1426,16 +1419,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
 
                set_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag);
        }
-
-       if (!pm_runtime_enabled(ipc_imem->dev))
-               pm_runtime_enable(ipc_imem->dev);
-
-       pm_runtime_set_autosuspend_delay(ipc_imem->dev,
-                                        IPC_MEM_AUTO_SUSPEND_DELAY_MS);
-       pm_runtime_use_autosuspend(ipc_imem->dev);
-       pm_runtime_allow(ipc_imem->dev);
-       pm_runtime_mark_last_busy(ipc_imem->dev);
-
        return ipc_imem;
 devlink_channel_fail:
        ipc_devlink_deinit(ipc_imem->ipc_devlink);
index 0144b45..5664ac5 100644 (file)
@@ -103,8 +103,6 @@ struct ipc_chnl_cfg;
 #define FULLY_FUNCTIONAL 0
 #define IOSM_DEVLINK_INIT 1
 
-#define IPC_MEM_AUTO_SUSPEND_DELAY_MS 5000
-
 /* List of the supported UL/DL pipes. */
 enum ipc_mem_pipes {
        IPC_MEM_PIPE_0 = 0,
index 3a259c9..04517bd 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/acpi.h>
 #include <linux/bitfield.h>
 #include <linux/module.h>
-#include <linux/pm_runtime.h>
 #include <net/rtnetlink.h>
 
 #include "iosm_ipc_imem.h"
@@ -438,8 +437,7 @@ static int __maybe_unused ipc_pcie_resume_cb(struct device *dev)
        return 0;
 }
 
-static DEFINE_RUNTIME_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb,
-                                ipc_pcie_resume_cb, NULL);
+static SIMPLE_DEV_PM_OPS(iosm_ipc_pm, ipc_pcie_suspend_cb, ipc_pcie_resume_cb);
 
 static struct pci_driver iosm_ipc_driver = {
        .name = KBUILD_MODNAME,
index 2ba1ddc..5d5b418 100644 (file)
@@ -3,8 +3,6 @@
  * Copyright (C) 2020-21 Intel Corporation.
  */
 
-#include <linux/pm_runtime.h>
-
 #include "iosm_ipc_chnl_cfg.h"
 #include "iosm_ipc_imem_ops.h"
 #include "iosm_ipc_port.h"
@@ -15,16 +13,12 @@ static int ipc_port_ctrl_start(struct wwan_port *port)
        struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
        int ret = 0;
 
-       pm_runtime_get_sync(ipc_port->ipc_imem->dev);
        ipc_port->channel = ipc_imem_sys_port_open(ipc_port->ipc_imem,
                                                   ipc_port->chl_id,
                                                   IPC_HP_CDEV_OPEN);
        if (!ipc_port->channel)
                ret = -EIO;
 
-       pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
-       pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
-
        return ret;
 }
 
@@ -33,24 +27,15 @@ static void ipc_port_ctrl_stop(struct wwan_port *port)
 {
        struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
 
-       pm_runtime_get_sync(ipc_port->ipc_imem->dev);
        ipc_imem_sys_port_close(ipc_port->ipc_imem, ipc_port->channel);
-       pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
-       pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
 }
 
 /* transfer control data to modem */
 static int ipc_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
 {
        struct iosm_cdev *ipc_port = wwan_port_get_drvdata(port);
-       int ret;
 
-       pm_runtime_get_sync(ipc_port->ipc_imem->dev);
-       ret = ipc_imem_sys_cdev_write(ipc_port, skb);
-       pm_runtime_mark_last_busy(ipc_port->ipc_imem->dev);
-       pm_runtime_put_autosuspend(ipc_port->ipc_imem->dev);
-
-       return ret;
+       return ipc_imem_sys_cdev_write(ipc_port, skb);
 }
 
 static const struct wwan_port_ops ipc_wwan_ctrl_ops = {
index 4368373..eeecfa3 100644 (file)
@@ -3,9 +3,7 @@
  * Copyright (C) 2020-2021 Intel Corporation.
  */
 
-#include <linux/pm_runtime.h>
 #include <linux/wwan.h>
-
 #include "iosm_ipc_trace.h"
 
 /* sub buffer size and number of sub buffer */
@@ -99,8 +97,6 @@ static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
        if (ret)
                return ret;
 
-       pm_runtime_get_sync(ipc_trace->ipc_imem->dev);
-
        mutex_lock(&ipc_trace->trc_mutex);
        if (val == TRACE_ENABLE && ipc_trace->mode != TRACE_ENABLE) {
                ipc_trace->channel = ipc_imem_sys_port_open(ipc_trace->ipc_imem,
@@ -121,10 +117,6 @@ static ssize_t ipc_trace_ctrl_file_write(struct file *filp,
        ret = count;
 unlock:
        mutex_unlock(&ipc_trace->trc_mutex);
-
-       pm_runtime_mark_last_busy(ipc_trace->ipc_imem->dev);
-       pm_runtime_put_autosuspend(ipc_trace->ipc_imem->dev);
-
        return ret;
 }
 
index 93d17de..ff747fc 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/etherdevice.h>
 #include <linux/if_arp.h>
 #include <linux/if_link.h>
-#include <linux/pm_runtime.h>
 #include <linux/rtnetlink.h>
 #include <linux/wwan.h>
 #include <net/pkt_sched.h>
@@ -52,13 +51,11 @@ static int ipc_wwan_link_open(struct net_device *netdev)
        struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
        struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
        int if_id = priv->if_id;
-       int ret = 0;
 
        if (if_id < IP_MUX_SESSION_START ||
            if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
                return -EINVAL;
 
-       pm_runtime_get_sync(ipc_wwan->ipc_imem->dev);
        /* get channel id */
        priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id);
 
@@ -66,8 +63,7 @@ static int ipc_wwan_link_open(struct net_device *netdev)
                dev_err(ipc_wwan->dev,
                        "cannot connect wwan0 & id %d to the IPC mem layer",
                        if_id);
-               ret = -ENODEV;
-               goto err_out;
+               return -ENODEV;
        }
 
        /* enable tx path, DL data may follow */
@@ -76,11 +72,7 @@ static int ipc_wwan_link_open(struct net_device *netdev)
        dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d",
                priv->ch_id, priv->if_id);
 
-err_out:
-       pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
-       pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
-
-       return ret;
+       return 0;
 }
 
 /* Bring-down the wwan net link */
@@ -90,12 +82,9 @@ static int ipc_wwan_link_stop(struct net_device *netdev)
 
        netif_stop_queue(netdev);
 
-       pm_runtime_get_sync(priv->ipc_wwan->ipc_imem->dev);
        ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id,
                                priv->ch_id);
        priv->ch_id = -1;
-       pm_runtime_mark_last_busy(priv->ipc_wwan->ipc_imem->dev);
-       pm_runtime_put_autosuspend(priv->ipc_wwan->ipc_imem->dev);
 
        return 0;
 }
@@ -117,7 +106,6 @@ static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
            if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))
                return -EINVAL;
 
-       pm_runtime_get(ipc_wwan->ipc_imem->dev);
        /* Send the SKB to device for transmission */
        ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem,
                                         if_id, priv->ch_id, skb);
@@ -131,14 +119,9 @@ static netdev_tx_t ipc_wwan_link_transmit(struct sk_buff *skb,
                ret = NETDEV_TX_BUSY;
                dev_err(ipc_wwan->dev, "unable to push packets");
        } else {
-               pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
-               pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
                goto exit;
        }
 
-       pm_runtime_mark_last_busy(ipc_wwan->ipc_imem->dev);
-       pm_runtime_put_autosuspend(ipc_wwan->ipc_imem->dev);
-
        return ret;
 
 exit:
index f3f2c07..fc3bb63 100644 (file)
@@ -41,8 +41,6 @@
 #include <asm/xen/hypercall.h>
 #include <xen/balloon.h>
 
-#define XENVIF_QUEUE_LENGTH 32
-
 /* Number of bytes allowed on the internal guest Rx queue. */
 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
 
@@ -530,8 +528,6 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        dev->features = dev->hw_features | NETIF_F_RXCSUM;
        dev->ethtool_ops = &xenvif_ethtool_ops;
 
-       dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
-
        dev->min_mtu = ETH_MIN_MTU;
        dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
 
index daf5d14..064592a 100644 (file)
@@ -341,7 +341,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
        struct nvmf_auth_dhchap_success1_data *data = chap->buf;
        size_t size = sizeof(*data);
 
-       if (chap->ctrl_key)
+       if (chap->s2)
                size += chap->hash_len;
 
        if (size > CHAP_BUF_SIZE) {
@@ -825,7 +825,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
                goto fail2;
        }
 
-       if (chap->ctrl_key) {
+       if (chap->s2) {
                /* DH-HMAC-CHAP Step 5: send success2 */
                dev_dbg(ctrl->device, "%s: qid %d send success2\n",
                        __func__, chap->qid);
index d8ff796..747c879 100644 (file)
@@ -108,9 +108,13 @@ static void *nvme_add_user_metadata(struct request *req, void __user *ubuf,
        if (!buf)
                goto out;
 
-       ret = -EFAULT;
-       if ((req_op(req) == REQ_OP_DRV_OUT) && copy_from_user(buf, ubuf, len))
-               goto out_free_meta;
+       if (req_op(req) == REQ_OP_DRV_OUT) {
+               ret = -EFAULT;
+               if (copy_from_user(buf, ubuf, len))
+                       goto out_free_meta;
+       } else {
+               memset(buf, 0, len);
+       }
 
        bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
        if (IS_ERR(bip)) {
index 347cb5d..3f0c9ee 100644 (file)
@@ -3329,7 +3329,8 @@ static const struct pci_device_id nvme_id_table[] = {
        { PCI_VDEVICE(INTEL, 0x0a54),   /* Intel P4500/P4600 */
                .driver_data = NVME_QUIRK_STRIPE_SIZE |
                                NVME_QUIRK_DEALLOCATE_ZEROES |
-                               NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+                               NVME_QUIRK_IGNORE_DEV_SUBNQN |
+                               NVME_QUIRK_BOGUS_NID, },
        { PCI_VDEVICE(INTEL, 0x0a55),   /* Dell Express Flash P4600 */
                .driver_data = NVME_QUIRK_STRIPE_SIZE |
                                NVME_QUIRK_DEALLOCATE_ZEROES, },
index 337a624..a7fea4c 100644 (file)
@@ -638,6 +638,9 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
 
 static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue)
 {
+       if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
+               return;
+
        mutex_lock(&queue->queue_lock);
        if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags))
                __nvme_rdma_stop_queue(queue);
index 586458f..1d98544 100644 (file)
@@ -333,19 +333,21 @@ done:
                         __func__, ctrl->cntlid, req->sq->qid,
                         status, req->error_loc);
        req->cqe->result.u64 = 0;
-       nvmet_req_complete(req, status);
        if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
            req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
                unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
 
                mod_delayed_work(system_wq, &req->sq->auth_expired_work,
                                 auth_expire_secs * HZ);
-               return;
+               goto complete;
        }
        /* Final states, clear up variables */
        nvmet_auth_sq_free(req->sq);
        if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
                nvmet_ctrl_fatal_error(ctrl);
+
+complete:
+       nvmet_req_complete(req, status);
 }
 
 static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
@@ -514,11 +516,12 @@ void nvmet_execute_auth_receive(struct nvmet_req *req)
        kfree(d);
 done:
        req->cqe->result.u64 = 0;
-       nvmet_req_complete(req, status);
+
        if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
                nvmet_auth_sq_free(req->sq);
        else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
                nvmet_auth_sq_free(req->sq);
                nvmet_ctrl_fatal_error(ctrl);
        }
+       nvmet_req_complete(req, status);
 }
index cd92d7d..197fc2e 100644 (file)
@@ -372,6 +372,7 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
 
 static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
 {
+       queue->rcv_state = NVMET_TCP_RECV_ERR;
        if (status == -EPIPE || status == -ECONNRESET)
                kernel_sock_shutdown(queue->sock, SHUT_RDWR);
        else
@@ -910,15 +911,11 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
        iov.iov_len = sizeof(*icresp);
        ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
        if (ret < 0)
-               goto free_crypto;
+               return ret; /* queue removal will cleanup */
 
        queue->state = NVMET_TCP_Q_LIVE;
        nvmet_prepare_receive_pdu(queue);
        return 0;
-free_crypto:
-       if (queue->hdr_digest || queue->data_digest)
-               nvmet_tcp_free_crypto(queue);
-       return ret;
 }
 
 static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
index 1f9a35f..0dda70e 100644 (file)
@@ -23,7 +23,8 @@ static bool riscv_perf_user_access(struct perf_event *event)
        return ((event->attr.type == PERF_TYPE_HARDWARE) ||
                (event->attr.type == PERF_TYPE_HW_CACHE) ||
                (event->attr.type == PERF_TYPE_RAW)) &&
-               !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT);
+               !!(event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) &&
+               (event->hw.idx != -1);
 }
 
 void arch_perf_update_userpage(struct perf_event *event,
index 9a51053..96c7f67 100644 (file)
@@ -510,16 +510,18 @@ static void pmu_sbi_set_scounteren(void *arg)
 {
        struct perf_event *event = (struct perf_event *)arg;
 
-       csr_write(CSR_SCOUNTEREN,
-                 csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
+       if (event->hw.idx != -1)
+               csr_write(CSR_SCOUNTEREN,
+                         csr_read(CSR_SCOUNTEREN) | (1 << pmu_sbi_csr_index(event)));
 }
 
 static void pmu_sbi_reset_scounteren(void *arg)
 {
        struct perf_event *event = (struct perf_event *)arg;
 
-       csr_write(CSR_SCOUNTEREN,
-                 csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
+       if (event->hw.idx != -1)
+               csr_write(CSR_SCOUNTEREN,
+                         csr_read(CSR_SCOUNTEREN) & ~(1 << pmu_sbi_csr_index(event)));
 }
 
 static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
@@ -541,7 +543,8 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
 
        if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
            (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
-               pmu_sbi_set_scounteren((void *)event);
+               on_each_cpu_mask(mm_cpumask(event->owner->mm),
+                                pmu_sbi_set_scounteren, (void *)event, 1);
 }
 
 static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
@@ -551,7 +554,8 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
 
        if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
            (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
-               pmu_sbi_reset_scounteren((void *)event);
+               on_each_cpu_mask(mm_cpumask(event->owner->mm),
+                                pmu_sbi_reset_scounteren, (void *)event, 1);
 
        ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
        if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
index 4f036c7..e218776 100644 (file)
@@ -127,6 +127,10 @@ struct lynx_28g_lane {
 struct lynx_28g_priv {
        void __iomem *base;
        struct device *dev;
+       /* Serialize concurrent access to registers shared between lanes,
+        * like PCCn
+        */
+       spinlock_t pcc_lock;
        struct lynx_28g_pll pll[LYNX_28G_NUM_PLL];
        struct lynx_28g_lane lane[LYNX_28G_NUM_LANE];
 
@@ -397,6 +401,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
        if (powered_up)
                lynx_28g_power_off(phy);
 
+       spin_lock(&priv->pcc_lock);
+
        switch (submode) {
        case PHY_INTERFACE_MODE_SGMII:
        case PHY_INTERFACE_MODE_1000BASEX:
@@ -413,6 +419,8 @@ static int lynx_28g_set_mode(struct phy *phy, enum phy_mode mode, int submode)
        lane->interface = submode;
 
 out:
+       spin_unlock(&priv->pcc_lock);
+
        /* Power up the lane if necessary */
        if (powered_up)
                lynx_28g_power_on(phy);
@@ -508,11 +516,12 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work)
        for (i = 0; i < LYNX_28G_NUM_LANE; i++) {
                lane = &priv->lane[i];
 
-               if (!lane->init)
-                       continue;
+               mutex_lock(&lane->phy->mutex);
 
-               if (!lane->powered_up)
+               if (!lane->init || !lane->powered_up) {
+                       mutex_unlock(&lane->phy->mutex);
                        continue;
+               }
 
                rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
                if (!(rrstctl & LYNX_28G_LNaRRSTCTL_CDR_LOCK)) {
@@ -521,6 +530,8 @@ static void lynx_28g_cdr_lock_check(struct work_struct *work)
                                rrstctl = lynx_28g_lane_read(lane, LNaRRSTCTL);
                        } while (!(rrstctl & LYNX_28G_LNaRRSTCTL_RST_DONE));
                }
+
+               mutex_unlock(&lane->phy->mutex);
        }
        queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
                           msecs_to_jiffies(1000));
@@ -593,6 +604,7 @@ static int lynx_28g_probe(struct platform_device *pdev)
 
        dev_set_drvdata(dev, priv);
 
+       spin_lock_init(&priv->pcc_lock);
        INIT_DELAYED_WORK(&priv->cdr_check, lynx_28g_cdr_lock_check);
 
        queue_delayed_work(system_power_efficient_wq, &priv->cdr_check,
@@ -604,6 +616,14 @@ static int lynx_28g_probe(struct platform_device *pdev)
        return PTR_ERR_OR_ZERO(provider);
 }
 
+static void lynx_28g_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct lynx_28g_priv *priv = dev_get_drvdata(dev);
+
+       cancel_delayed_work_sync(&priv->cdr_check);
+}
+
 static const struct of_device_id lynx_28g_of_match_table[] = {
        { .compatible = "fsl,lynx-28g" },
        { },
@@ -612,6 +632,7 @@ MODULE_DEVICE_TABLE(of, lynx_28g_of_match_table);
 
 static struct platform_driver lynx_28g_driver = {
        .probe  = lynx_28g_probe,
+       .remove_new = lynx_28g_remove,
        .driver = {
                .name = "lynx-28g",
                .of_match_table = lynx_28g_of_match_table,
index 2d1c165..8a9961a 100644 (file)
@@ -1062,13 +1062,13 @@ static int wpcm450_gpio_register(struct platform_device *pdev,
                if (ret < 0)
                        return ret;
 
-               gpio = &pctrl->gpio_bank[reg];
-               gpio->pctrl = pctrl;
-
                if (reg >= WPCM450_NUM_BANKS)
                        return dev_err_probe(dev, -EINVAL,
                                             "GPIO index %d out of range!\n", reg);
 
+               gpio = &pctrl->gpio_bank[reg];
+               gpio->pctrl = pctrl;
+
                bank = &wpcm450_banks[reg];
                gpio->bank = bank;
 
index efb25fc..1ac49ae 100644 (file)
@@ -198,5 +198,4 @@ enum ltq_pin {
 
 extern int ltq_pinctrl_register(struct platform_device *pdev,
                                   struct ltq_pinmux_info *info);
-extern int ltq_pinctrl_unregister(struct platform_device *pdev);
 #endif /* __PINCTRL_LANTIQ_H */
index e5a4180..0b2839d 100644 (file)
@@ -32,7 +32,8 @@ struct lpi_pinctrl {
        char __iomem *tlmm_base;
        char __iomem *slew_base;
        struct clk_bulk_data clks[MAX_LPI_NUM_CLKS];
-       struct mutex slew_access_lock;
+       /* Protects from concurrent register updates */
+       struct mutex lock;
        DECLARE_BITMAP(ever_gpio, MAX_NR_GPIO);
        const struct lpi_pinctrl_variant_data *data;
 };
@@ -103,6 +104,7 @@ static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
        if (WARN_ON(i == g->nfuncs))
                return -EINVAL;
 
+       mutex_lock(&pctrl->lock);
        val = lpi_gpio_read(pctrl, pin, LPI_GPIO_CFG_REG);
 
        /*
@@ -128,6 +130,7 @@ static int lpi_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
 
        u32p_replace_bits(&val, i, LPI_GPIO_FUNCTION_MASK);
        lpi_gpio_write(pctrl, pin, LPI_GPIO_CFG_REG, val);
+       mutex_unlock(&pctrl->lock);
 
        return 0;
 }
@@ -233,14 +236,14 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
                        if (slew_offset == LPI_NO_SLEW)
                                break;
 
-                       mutex_lock(&pctrl->slew_access_lock);
+                       mutex_lock(&pctrl->lock);
 
                        sval = ioread32(pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
                        sval &= ~(LPI_SLEW_RATE_MASK << slew_offset);
                        sval |= arg << slew_offset;
                        iowrite32(sval, pctrl->slew_base + LPI_SLEW_RATE_CTL_REG);
 
-                       mutex_unlock(&pctrl->slew_access_lock);
+                       mutex_unlock(&pctrl->lock);
                        break;
                default:
                        return -EINVAL;
@@ -256,6 +259,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
                lpi_gpio_write(pctrl, group, LPI_GPIO_VALUE_REG, val);
        }
 
+       mutex_lock(&pctrl->lock);
        val = lpi_gpio_read(pctrl, group, LPI_GPIO_CFG_REG);
 
        u32p_replace_bits(&val, pullup, LPI_GPIO_PULL_MASK);
@@ -264,6 +268,7 @@ static int lpi_config_set(struct pinctrl_dev *pctldev, unsigned int group,
        u32p_replace_bits(&val, output_enabled, LPI_GPIO_OE_MASK);
 
        lpi_gpio_write(pctrl, group, LPI_GPIO_CFG_REG, val);
+       mutex_unlock(&pctrl->lock);
 
        return 0;
 }
@@ -461,7 +466,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
        pctrl->chip.label = dev_name(dev);
        pctrl->chip.can_sleep = false;
 
-       mutex_init(&pctrl->slew_access_lock);
+       mutex_init(&pctrl->lock);
 
        pctrl->ctrl = devm_pinctrl_register(dev, &pctrl->desc, pctrl);
        if (IS_ERR(pctrl->ctrl)) {
@@ -483,7 +488,7 @@ int lpi_pinctrl_probe(struct platform_device *pdev)
        return 0;
 
 err_pinctrl:
-       mutex_destroy(&pctrl->slew_access_lock);
+       mutex_destroy(&pctrl->lock);
        clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
 
        return ret;
@@ -495,7 +500,7 @@ int lpi_pinctrl_remove(struct platform_device *pdev)
        struct lpi_pinctrl *pctrl = platform_get_drvdata(pdev);
        int i;
 
-       mutex_destroy(&pctrl->slew_access_lock);
+       mutex_destroy(&pctrl->lock);
        clk_bulk_disable_unprepare(MAX_LPI_NUM_CLKS, pctrl->clks);
 
        for (i = 0; i < pctrl->data->npins; i++)
index 77730dc..c8d519c 100644 (file)
@@ -235,6 +235,7 @@ config PINCTRL_RZN1
        depends on OF
        depends on ARCH_RZN1 || COMPILE_TEST
        select GENERIC_PINCONF
+       select PINMUX
        help
          This selects pinctrl driver for Renesas RZ/N1 devices.
 
index 4bfe3aa..cf42e20 100644 (file)
@@ -31,6 +31,8 @@
 #define JH7110_AON_NGPIO               4
 #define JH7110_AON_GC_BASE             64
 
+#define JH7110_AON_REGS_NUM            37
+
 /* registers */
 #define JH7110_AON_DOEN                        0x0
 #define JH7110_AON_DOUT                        0x4
@@ -145,6 +147,7 @@ static const struct jh7110_pinctrl_soc_info jh7110_aon_pinctrl_info = {
        .gpi_mask       = GENMASK(3, 0),
        .gpioin_reg_base           = JH7110_AON_GPIOIN,
        .irq_reg                   = &jh7110_aon_irq_reg,
+       .nsaved_regs               = JH7110_AON_REGS_NUM,
        .jh7110_set_one_pin_mux  = jh7110_aon_set_one_pin_mux,
        .jh7110_get_padcfg_base  = jh7110_aon_get_padcfg_base,
        .jh7110_gpio_irq_handler = jh7110_aon_irq_handler,
@@ -165,6 +168,7 @@ static struct platform_driver jh7110_aon_pinctrl_driver = {
        .driver = {
                .name = "starfive-jh7110-aon-pinctrl",
                .of_match_table = jh7110_aon_pinctrl_of_match,
+               .pm = pm_sleep_ptr(&jh7110_pinctrl_pm_ops),
        },
 };
 module_platform_driver(jh7110_aon_pinctrl_driver);
index 20c85db..03c2ad8 100644 (file)
@@ -31,6 +31,8 @@
 #define JH7110_SYS_NGPIO               64
 #define JH7110_SYS_GC_BASE             0
 
+#define JH7110_SYS_REGS_NUM            174
+
 /* registers */
 #define JH7110_SYS_DOEN                        0x000
 #define JH7110_SYS_DOUT                        0x040
@@ -417,6 +419,7 @@ static const struct jh7110_pinctrl_soc_info jh7110_sys_pinctrl_info = {
        .gpi_mask       = GENMASK(6, 0),
        .gpioin_reg_base           = JH7110_SYS_GPIOIN,
        .irq_reg                   = &jh7110_sys_irq_reg,
+       .nsaved_regs               = JH7110_SYS_REGS_NUM,
        .jh7110_set_one_pin_mux  = jh7110_sys_set_one_pin_mux,
        .jh7110_get_padcfg_base  = jh7110_sys_get_padcfg_base,
        .jh7110_gpio_irq_handler = jh7110_sys_irq_handler,
@@ -437,6 +440,7 @@ static struct platform_driver jh7110_sys_pinctrl_driver = {
        .driver = {
                .name = "starfive-jh7110-sys-pinctrl",
                .of_match_table = jh7110_sys_pinctrl_of_match,
+               .pm = pm_sleep_ptr(&jh7110_pinctrl_pm_ops),
        },
 };
 module_platform_driver(jh7110_sys_pinctrl_driver);
index b908180..640f827 100644 (file)
@@ -872,6 +872,13 @@ int jh7110_pinctrl_probe(struct platform_device *pdev)
        if (!sfp)
                return -ENOMEM;
 
+#if IS_ENABLED(CONFIG_PM_SLEEP)
+       sfp->saved_regs = devm_kcalloc(dev, info->nsaved_regs,
+                                      sizeof(*sfp->saved_regs), GFP_KERNEL);
+       if (!sfp->saved_regs)
+               return -ENOMEM;
+#endif
+
        sfp->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(sfp->base))
                return PTR_ERR(sfp->base);
@@ -967,14 +974,45 @@ int jh7110_pinctrl_probe(struct platform_device *pdev)
        if (ret)
                return dev_err_probe(dev, ret, "could not register gpiochip\n");
 
-       irq_domain_set_pm_device(sfp->gc.irq.domain, dev);
-
        dev_info(dev, "StarFive GPIO chip registered %d GPIOs\n", sfp->gc.ngpio);
 
        return pinctrl_enable(sfp->pctl);
 }
 EXPORT_SYMBOL_GPL(jh7110_pinctrl_probe);
 
+static int jh7110_pinctrl_suspend(struct device *dev)
+{
+       struct jh7110_pinctrl *sfp = dev_get_drvdata(dev);
+       unsigned long flags;
+       unsigned int i;
+
+       raw_spin_lock_irqsave(&sfp->lock, flags);
+       for (i = 0 ; i < sfp->info->nsaved_regs ; i++)
+               sfp->saved_regs[i] = readl_relaxed(sfp->base + 4 * i);
+
+       raw_spin_unlock_irqrestore(&sfp->lock, flags);
+       return 0;
+}
+
+static int jh7110_pinctrl_resume(struct device *dev)
+{
+       struct jh7110_pinctrl *sfp = dev_get_drvdata(dev);
+       unsigned long flags;
+       unsigned int i;
+
+       raw_spin_lock_irqsave(&sfp->lock, flags);
+       for (i = 0 ; i < sfp->info->nsaved_regs ; i++)
+               writel_relaxed(sfp->saved_regs[i], sfp->base + 4 * i);
+
+       raw_spin_unlock_irqrestore(&sfp->lock, flags);
+       return 0;
+}
+
+const struct dev_pm_ops jh7110_pinctrl_pm_ops = {
+       LATE_SYSTEM_SLEEP_PM_OPS(jh7110_pinctrl_suspend, jh7110_pinctrl_resume)
+};
+EXPORT_SYMBOL_GPL(jh7110_pinctrl_pm_ops);
+
 MODULE_DESCRIPTION("Pinctrl driver for the StarFive JH7110 SoC");
 MODULE_AUTHOR("Emil Renner Berthing <kernel@esmil.dk>");
 MODULE_AUTHOR("Jianlong Huang <jianlong.huang@starfivetech.com>");
index 3f20b7f..a33d0d4 100644 (file)
@@ -21,6 +21,7 @@ struct jh7110_pinctrl {
        /* register read/write mutex */
        struct mutex mutex;
        const struct jh7110_pinctrl_soc_info *info;
+       u32 *saved_regs;
 };
 
 struct jh7110_gpio_irq_reg {
@@ -50,6 +51,8 @@ struct jh7110_pinctrl_soc_info {
 
        const struct jh7110_gpio_irq_reg *irq_reg;
 
+       unsigned int nsaved_regs;
+
        /* generic pinmux */
        int (*jh7110_set_one_pin_mux)(struct jh7110_pinctrl *sfp,
                                      unsigned int pin,
@@ -66,5 +69,6 @@ void jh7110_set_gpiomux(struct jh7110_pinctrl *sfp, unsigned int pin,
                        unsigned int din, u32 dout, u32 doen);
 int jh7110_pinctrl_probe(struct platform_device *pdev);
 struct jh7110_pinctrl *jh7110_from_irq_desc(struct irq_desc *desc);
+extern const struct dev_pm_ops jh7110_pinctrl_pm_ops;
 
 #endif /* __PINCTRL_STARFIVE_JH7110_H__ */
index cfeda5b..734c71e 100644 (file)
@@ -96,7 +96,6 @@ static const struct cfg_param {
        {"nvidia,slew-rate-falling",    TEGRA_PINCONF_PARAM_SLEW_RATE_FALLING},
        {"nvidia,slew-rate-rising",     TEGRA_PINCONF_PARAM_SLEW_RATE_RISING},
        {"nvidia,drive-type",           TEGRA_PINCONF_PARAM_DRIVE_TYPE},
-       {"nvidia,function",             TEGRA_PINCONF_PARAM_FUNCTION},
 };
 
 static int tegra_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
@@ -471,12 +470,6 @@ static int tegra_pinconf_reg(struct tegra_pmx *pmx,
                *bit = g->drvtype_bit;
                *width = 2;
                break;
-       case TEGRA_PINCONF_PARAM_FUNCTION:
-               *bank = g->mux_bank;
-               *reg = g->mux_reg;
-               *bit = g->mux_bit;
-               *width = 2;
-               break;
        default:
                dev_err(pmx->dev, "Invalid config param %04x\n", param);
                return -ENOTSUPP;
@@ -640,16 +633,8 @@ static void tegra_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
                val >>= bit;
                val &= (1 << width) - 1;
 
-               if (cfg_params[i].param == TEGRA_PINCONF_PARAM_FUNCTION) {
-                       u8 idx = pmx->soc->groups[group].funcs[val];
-
-                       seq_printf(s, "\n\t%s=%s",
-                                  strip_prefix(cfg_params[i].property),
-                                        pmx->functions[idx].name);
-               } else {
-                       seq_printf(s, "\n\t%s=%u",
-                                  strip_prefix(cfg_params[i].property), val);
-               }
+               seq_printf(s, "\n\t%s=%u",
+                          strip_prefix(cfg_params[i].property), val);
        }
 }
 
index e728efe..b3289bd 100644 (file)
@@ -54,8 +54,6 @@ enum tegra_pinconf_param {
        TEGRA_PINCONF_PARAM_SLEW_RATE_RISING,
        /* argument: Integer, range is HW-dependant */
        TEGRA_PINCONF_PARAM_DRIVE_TYPE,
-       /* argument: pinmux settings */
-       TEGRA_PINCONF_PARAM_FUNCTION,
 };
 
 enum tegra_pinconf_pull {
index 2f693b6..891c1d9 100644 (file)
@@ -150,7 +150,8 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
        { "mclk-out-1", IMX_SC_R_MCLK_OUT_1, 1, false, 0 },
        { "dma0-ch", IMX_SC_R_DMA_0_CH0, 32, true, 0 },
        { "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 },
-       { "dma2-ch", IMX_SC_R_DMA_2_CH0, 32, true, 0 },
+       { "dma2-ch-0", IMX_SC_R_DMA_2_CH0, 5, true, 0 },
+       { "dma2-ch-1", IMX_SC_R_DMA_2_CH5, 27, true, 0 },
        { "dma3-ch", IMX_SC_R_DMA_3_CH0, 32, true, 0 },
        { "asrc0", IMX_SC_R_ASRC_0, 1, false, 0 },
        { "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 },
index de77df9..ec163d1 100644 (file)
@@ -105,7 +105,7 @@ struct qcom_battmgr_property_request {
 
 struct qcom_battmgr_update_request {
        struct pmic_glink_hdr hdr;
-       u32 battery_id;
+       __le32 battery_id;
 };
 
 struct qcom_battmgr_charge_time_request {
@@ -1282,9 +1282,9 @@ static void qcom_battmgr_enable_worker(struct work_struct *work)
 {
        struct qcom_battmgr *battmgr = container_of(work, struct qcom_battmgr, enable_work);
        struct qcom_battmgr_enable_request req = {
-               .hdr.owner = PMIC_GLINK_OWNER_BATTMGR,
-               .hdr.type = PMIC_GLINK_NOTIFY,
-               .hdr.opcode = BATTMGR_REQUEST_NOTIFICATION,
+               .hdr.owner = cpu_to_le32(PMIC_GLINK_OWNER_BATTMGR),
+               .hdr.type = cpu_to_le32(PMIC_GLINK_NOTIFY),
+               .hdr.opcode = cpu_to_le32(BATTMGR_REQUEST_NOTIFICATION),
        };
        int ret;
 
index 74760c1..4902d45 100644 (file)
@@ -102,7 +102,7 @@ config CCWGROUP
 
 config ISM
        tristate "Support for ISM vPCI Adapter"
-       depends on PCI && SMC
+       depends on PCI
        default n
        help
          Select this option if you want to use the Internal Shared Memory
index c3c1f46..605013d 100644 (file)
@@ -12913,8 +12913,10 @@ _mpt3sas_init(void)
        mpt3sas_ctl_init(hbas_to_enumerate);
 
        error = pci_register_driver(&mpt3sas_driver);
-       if (error)
+       if (error) {
+               mpt3sas_ctl_exit(hbas_to_enumerate);
                scsih_exit();
+       }
 
        return error;
 }
index 50db082..dcae09a 100644 (file)
@@ -4953,7 +4953,7 @@ qla2x00_mem_free(struct qla_hw_data *ha)
        ha->gid_list = NULL;
        ha->gid_list_dma = 0;
 
-       if (!list_empty(&ha->base_qpair->dsd_list)) {
+       if (ha->base_qpair && !list_empty(&ha->base_qpair->dsd_list)) {
                struct dsd_dma *dsd_ptr, *tdsd_ptr;
 
                /* clean up allocated prev pool */
index 902655d..44680f6 100644 (file)
@@ -1627,12 +1627,13 @@ int scsi_rescan_device(struct scsi_device *sdev)
        device_lock(dev);
 
        /*
-        * Bail out if the device is not running. Otherwise, the rescan may
-        * block waiting for commands to be executed, with us holding the
-        * device lock. This can result in a potential deadlock in the power
-        * management core code when system resume is on-going.
+        * Bail out if the device or its queue are not running. Otherwise,
+        * the rescan may block waiting for commands to be executed, with us
+        * holding the device lock. This can result in a potential deadlock
+        * in the power management core code when system resume is on-going.
         */
-       if (sdev->sdev_state != SDEV_RUNNING) {
+       if (sdev->sdev_state != SDEV_RUNNING ||
+           blk_queue_pm_only(sdev->request_queue)) {
                ret = -EWOULDBLOCK;
                goto unlock;
        }
index 5a75ab6..12040ce 100644 (file)
@@ -333,6 +333,7 @@ if RISCV
 
 config ARCH_R9A07G043
        bool "RISC-V Platform support for RZ/Five"
+       depends on NONPORTABLE
        select ARCH_RZG2L
        select AX45MP_L2_CACHE if RISCV_DMA_NONCOHERENT
        select DMA_GLOBAL_POOL
index c3d3ab3..657f588 100644 (file)
@@ -15,6 +15,10 @@ ifdef CONFIG_DEBUG_FS
 soundwire-bus-y += debugfs.o
 endif
 
+ifdef CONFIG_IRQ_DOMAIN
+soundwire-bus-y += irq.o
+endif
+
 #AMD driver
 soundwire-amd-y :=     amd_manager.o
 obj-$(CONFIG_SOUNDWIRE_AMD) += soundwire-amd.o
index 1720031..0e7bc3c 100644 (file)
@@ -3,13 +3,13 @@
 
 #include <linux/acpi.h>
 #include <linux/delay.h>
-#include <linux/irq.h>
 #include <linux/mod_devicetable.h>
 #include <linux/pm_runtime.h>
 #include <linux/soundwire/sdw_registers.h>
 #include <linux/soundwire/sdw.h>
 #include <linux/soundwire/sdw_type.h>
 #include "bus.h"
+#include "irq.h"
 #include "sysfs_local.h"
 
 static DEFINE_IDA(sdw_bus_ida);
@@ -25,23 +25,6 @@ static int sdw_get_id(struct sdw_bus *bus)
        return 0;
 }
 
-static int sdw_irq_map(struct irq_domain *h, unsigned int virq,
-                      irq_hw_number_t hw)
-{
-       struct sdw_bus *bus = h->host_data;
-
-       irq_set_chip_data(virq, bus);
-       irq_set_chip(virq, &bus->irq_chip);
-       irq_set_nested_thread(virq, 1);
-       irq_set_noprobe(virq);
-
-       return 0;
-}
-
-static const struct irq_domain_ops sdw_domain_ops = {
-       .map    = sdw_irq_map,
-};
-
 /**
  * sdw_bus_master_add() - add a bus Master instance
  * @bus: bus instance
@@ -168,13 +151,9 @@ int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
        bus->params.curr_bank = SDW_BANK0;
        bus->params.next_bank = SDW_BANK1;
 
-       bus->irq_chip.name = dev_name(bus->dev);
-       bus->domain = irq_domain_create_linear(fwnode, SDW_MAX_DEVICES,
-                                              &sdw_domain_ops, bus);
-       if (!bus->domain) {
-               dev_err(bus->dev, "Failed to add IRQ domain\n");
-               return -EINVAL;
-       }
+       ret = sdw_irq_create(bus, fwnode);
+       if (ret)
+               return ret;
 
        return 0;
 }
@@ -213,7 +192,7 @@ void sdw_bus_master_delete(struct sdw_bus *bus)
 {
        device_for_each_child(bus->dev, NULL, sdw_delete_slave);
 
-       irq_domain_remove(bus->domain);
+       sdw_irq_delete(bus);
 
        sdw_master_device_del(bus);
 
index fafbc28..9fa93bb 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/soundwire/sdw.h>
 #include <linux/soundwire/sdw_type.h>
 #include "bus.h"
+#include "irq.h"
 #include "sysfs_local.h"
 
 /**
@@ -122,11 +123,8 @@ static int sdw_drv_probe(struct device *dev)
        if (drv->ops && drv->ops->read_prop)
                drv->ops->read_prop(slave);
 
-       if (slave->prop.use_domain_irq) {
-               slave->irq = irq_create_mapping(slave->bus->domain, slave->dev_num);
-               if (!slave->irq)
-                       dev_warn(dev, "Failed to map IRQ\n");
-       }
+       if (slave->prop.use_domain_irq)
+               sdw_irq_create_mapping(slave);
 
        /* init the sysfs as we have properties now */
        ret = sdw_slave_sysfs_init(slave);
@@ -176,8 +174,7 @@ static int sdw_drv_remove(struct device *dev)
        slave->probed = false;
 
        if (slave->prop.use_domain_irq)
-               irq_dispose_mapping(irq_find_mapping(slave->bus->domain,
-                                                    slave->dev_num));
+               sdw_irq_dispose_mapping(slave);
 
        mutex_unlock(&slave->sdw_dev_lock);
 
diff --git a/drivers/soundwire/irq.c b/drivers/soundwire/irq.c
new file mode 100644 (file)
index 0000000..0c08ceb
--- /dev/null
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2023 Cirrus Logic, Inc. and
+//                    Cirrus Logic International Semiconductor Ltd.
+
+#include <linux/device.h>
+#include <linux/fwnode.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/soundwire/sdw.h>
+#include "irq.h"
+
+static int sdw_irq_map(struct irq_domain *h, unsigned int virq,
+                      irq_hw_number_t hw)
+{
+       struct sdw_bus *bus = h->host_data;
+
+       irq_set_chip_data(virq, bus);
+       irq_set_chip(virq, &bus->irq_chip);
+       irq_set_nested_thread(virq, 1);
+       irq_set_noprobe(virq);
+
+       return 0;
+}
+
+static const struct irq_domain_ops sdw_domain_ops = {
+       .map    = sdw_irq_map,
+};
+
+int sdw_irq_create(struct sdw_bus *bus,
+                  struct fwnode_handle *fwnode)
+{
+       bus->irq_chip.name = dev_name(bus->dev);
+
+       bus->domain = irq_domain_create_linear(fwnode, SDW_MAX_DEVICES,
+                                              &sdw_domain_ops, bus);
+       if (!bus->domain) {
+               dev_err(bus->dev, "Failed to add IRQ domain\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+void sdw_irq_delete(struct sdw_bus *bus)
+{
+       irq_domain_remove(bus->domain);
+}
+
+void sdw_irq_create_mapping(struct sdw_slave *slave)
+{
+       slave->irq = irq_create_mapping(slave->bus->domain, slave->dev_num);
+       if (!slave->irq)
+               dev_warn(&slave->dev, "Failed to map IRQ\n");
+}
+
+void sdw_irq_dispose_mapping(struct sdw_slave *slave)
+{
+       irq_dispose_mapping(irq_find_mapping(slave->bus->domain, slave->dev_num));
+}
diff --git a/drivers/soundwire/irq.h b/drivers/soundwire/irq.h
new file mode 100644 (file)
index 0000000..58a5804
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Cirrus Logic, Inc. and
+ *                    Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef __SDW_IRQ_H
+#define __SDW_IRQ_H
+
+#include <linux/soundwire/sdw.h>
+#include <linux/fwnode.h>
+
+#if IS_ENABLED(CONFIG_IRQ_DOMAIN)
+
+int sdw_irq_create(struct sdw_bus *bus,
+                  struct fwnode_handle *fwnode);
+void sdw_irq_delete(struct sdw_bus *bus);
+void sdw_irq_create_mapping(struct sdw_slave *slave);
+void sdw_irq_dispose_mapping(struct sdw_slave *slave);
+
+#else /* CONFIG_IRQ_DOMAIN */
+
+static inline int sdw_irq_create(struct sdw_bus *bus,
+                                struct fwnode_handle *fwnode)
+{
+       return 0;
+}
+
+static inline void sdw_irq_delete(struct sdw_bus *bus)
+{
+}
+
+static inline void sdw_irq_create_mapping(struct sdw_slave *slave)
+{
+}
+
+static inline void sdw_irq_dispose_mapping(struct sdw_slave *slave)
+{
+}
+
+#endif /* CONFIG_IRQ_DOMAIN */
+
+#endif /* __SDW_IRQ_H */
index 0ca21ff..e422485 100644 (file)
@@ -353,8 +353,9 @@ static int npcm_fiu_uma_read(struct spi_mem *mem,
                uma_cfg |= ilog2(op->cmd.buswidth);
                uma_cfg |= ilog2(op->addr.buswidth)
                        << NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
-               uma_cfg |= ilog2(op->dummy.buswidth)
-                       << NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
+               if (op->dummy.nbytes)
+                       uma_cfg |= ilog2(op->dummy.buswidth)
+                               << NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
                uma_cfg |= ilog2(op->data.buswidth)
                        << NPCM_FIU_UMA_CFG_RDBPCK_SHIFT;
                uma_cfg |= op->dummy.nbytes << NPCM_FIU_UMA_CFG_DBSIZ_SHIFT;
index 372d647..3c15f6a 100644 (file)
@@ -217,12 +217,12 @@ unlock:
        return rc;
 }
 
+/* mutex must be held by caller */
 static void destroy_session(struct kref *ref)
 {
        struct amdtee_session *sess = container_of(ref, struct amdtee_session,
                                                   refcount);
 
-       mutex_lock(&session_list_mutex);
        list_del(&sess->list_node);
        mutex_unlock(&session_list_mutex);
        kfree(sess);
@@ -272,7 +272,8 @@ int amdtee_open_session(struct tee_context *ctx,
        if (arg->ret != TEEC_SUCCESS) {
                pr_err("open_session failed %d\n", arg->ret);
                handle_unload_ta(ta_handle);
-               kref_put(&sess->refcount, destroy_session);
+               kref_put_mutex(&sess->refcount, destroy_session,
+                              &session_list_mutex);
                goto out;
        }
 
@@ -290,7 +291,8 @@ int amdtee_open_session(struct tee_context *ctx,
                pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
                handle_close_session(ta_handle, session_info);
                handle_unload_ta(ta_handle);
-               kref_put(&sess->refcount, destroy_session);
+               kref_put_mutex(&sess->refcount, destroy_session,
+                              &session_list_mutex);
                rc = -ENOMEM;
                goto out;
        }
@@ -331,7 +333,7 @@ int amdtee_close_session(struct tee_context *ctx, u32 session)
        handle_close_session(ta_handle, session_info);
        handle_unload_ta(ta_handle);
 
-       kref_put(&sess->refcount, destroy_session);
+       kref_put_mutex(&sess->refcount, destroy_session, &session_list_mutex);
 
        return 0;
 }
index dbdcad8..d8b9c73 100644 (file)
@@ -41,6 +41,7 @@
 #define PHY_PORT_CS1_LINK_STATE_SHIFT  26
 
 #define ICM_TIMEOUT                    5000    /* ms */
+#define ICM_RETRIES                    3
 #define ICM_APPROVE_TIMEOUT            10000   /* ms */
 #define ICM_MAX_LINK                   4
 
@@ -296,10 +297,9 @@ static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
 
 static int icm_request(struct tb *tb, const void *request, size_t request_size,
                       void *response, size_t response_size, size_t npackets,
-                      unsigned int timeout_msec)
+                      int retries, unsigned int timeout_msec)
 {
        struct icm *icm = tb_priv(tb);
-       int retries = 3;
 
        do {
                struct tb_cfg_request *req;
@@ -410,7 +410,7 @@ static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
                return -ENOMEM;
 
        ret = icm_request(tb, &request, sizeof(request), switches,
-                         sizeof(*switches), npackets, ICM_TIMEOUT);
+                         sizeof(*switches), npackets, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                goto err_free;
 
@@ -463,7 +463,7 @@ icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_TIMEOUT);
+                         1, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                return ret;
 
@@ -488,7 +488,7 @@ static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
        memset(&reply, 0, sizeof(reply));
        /* Use larger timeout as establishing tunnels can take some time */
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_APPROVE_TIMEOUT);
+                         1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
        if (ret)
                return ret;
 
@@ -515,7 +515,7 @@ static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_TIMEOUT);
+                         1, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                return ret;
 
@@ -543,7 +543,7 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_TIMEOUT);
+                         1, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                return ret;
 
@@ -577,7 +577,7 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_TIMEOUT);
+                         1, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                return ret;
 
@@ -1020,7 +1020,7 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, 20000);
+                         1, 10, 2000);
        if (ret)
                return ret;
 
@@ -1053,7 +1053,7 @@ static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_APPROVE_TIMEOUT);
+                         1, ICM_RETRIES, ICM_APPROVE_TIMEOUT);
        if (ret)
                return ret;
 
@@ -1081,7 +1081,7 @@ static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_TIMEOUT);
+                         1, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                return ret;
 
@@ -1110,7 +1110,7 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_TIMEOUT);
+                         1, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                return ret;
 
@@ -1144,7 +1144,7 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_TIMEOUT);
+                         1, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                return ret;
 
@@ -1170,7 +1170,7 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_TIMEOUT);
+                         1, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                return ret;
 
@@ -1496,7 +1496,7 @@ icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_TIMEOUT);
+                         1, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                return ret;
 
@@ -1522,7 +1522,7 @@ static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_TIMEOUT);
+                         1, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                return ret;
 
@@ -1543,7 +1543,7 @@ static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_TIMEOUT);
+                         1, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                return ret;
 
@@ -1604,7 +1604,7 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_TIMEOUT);
+                         1, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                return ret;
 
@@ -1626,7 +1626,7 @@ icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, 20000);
+                         1, ICM_RETRIES, 20000);
        if (ret)
                return ret;
 
@@ -2298,7 +2298,7 @@ static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
 
        memset(&reply, 0, sizeof(reply));
        ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
-                         1, ICM_TIMEOUT);
+                         1, ICM_RETRIES, ICM_TIMEOUT);
        if (ret)
                return ret;
 
index 43171cc..bd5815f 100644 (file)
@@ -2725,6 +2725,13 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw)
            !tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL))
                return 0;
 
+       /*
+        * Both lanes need to be in CL0. Here we assume lane 0 already be in
+        * CL0 and check just for lane 1.
+        */
+       if (tb_wait_for_port(down->dual_link_port, false) <= 0)
+               return -ENOTCONN;
+
        ret = tb_port_lane_bonding_enable(up);
        if (ret) {
                tb_port_warn(up, "failed to enable lane bonding\n");
index 747f887..11f2aec 100644 (file)
@@ -382,7 +382,7 @@ static int tmu_mode_init(struct tb_switch *sw)
                } else if (ucap && tb_port_tmu_is_unidirectional(up)) {
                        if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
                                sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
-                       else if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
+                       else if (tmu_rates[TB_SWITCH_TMU_MODE_HIFI_UNI] == rate)
                                sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
                } else if (rate) {
                        sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
index 5b55668..9803f0b 100644 (file)
@@ -703,6 +703,27 @@ out_unlock:
        mutex_unlock(&xdomain_lock);
 }
 
+static void start_handshake(struct tb_xdomain *xd)
+{
+       xd->state = XDOMAIN_STATE_INIT;
+       queue_delayed_work(xd->tb->wq, &xd->state_work,
+                          msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
+}
+
+/* Can be called from state_work */
+static void __stop_handshake(struct tb_xdomain *xd)
+{
+       cancel_delayed_work_sync(&xd->properties_changed_work);
+       xd->properties_changed_retries = 0;
+       xd->state_retries = 0;
+}
+
+static void stop_handshake(struct tb_xdomain *xd)
+{
+       cancel_delayed_work_sync(&xd->state_work);
+       __stop_handshake(xd);
+}
+
 static void tb_xdp_handle_request(struct work_struct *work)
 {
        struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
@@ -765,6 +786,15 @@ static void tb_xdp_handle_request(struct work_struct *work)
        case UUID_REQUEST:
                tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
                ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
+               /*
+                * If we've stopped the discovery with an error such as
+                * timing out, we will restart the handshake now that we
+                * received UUID request from the remote host.
+                */
+               if (!ret && xd && xd->state == XDOMAIN_STATE_ERROR) {
+                       dev_dbg(&xd->dev, "restarting handshake\n");
+                       start_handshake(xd);
+               }
                break;
 
        case LINK_STATE_STATUS_REQUEST:
@@ -1521,6 +1551,13 @@ static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
                           msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
 }
 
+static void tb_xdomain_failed(struct tb_xdomain *xd)
+{
+       xd->state = XDOMAIN_STATE_ERROR;
+       queue_delayed_work(xd->tb->wq, &xd->state_work,
+                          msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
+}
+
 static void tb_xdomain_state_work(struct work_struct *work)
 {
        struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
@@ -1547,7 +1584,7 @@ static void tb_xdomain_state_work(struct work_struct *work)
                if (ret) {
                        if (ret == -EAGAIN)
                                goto retry_state;
-                       xd->state = XDOMAIN_STATE_ERROR;
+                       tb_xdomain_failed(xd);
                } else {
                        tb_xdomain_queue_properties_changed(xd);
                        if (xd->bonding_possible)
@@ -1612,7 +1649,7 @@ static void tb_xdomain_state_work(struct work_struct *work)
                if (ret) {
                        if (ret == -EAGAIN)
                                goto retry_state;
-                       xd->state = XDOMAIN_STATE_ERROR;
+                       tb_xdomain_failed(xd);
                } else {
                        xd->state = XDOMAIN_STATE_ENUMERATED;
                }
@@ -1623,6 +1660,8 @@ static void tb_xdomain_state_work(struct work_struct *work)
                break;
 
        case XDOMAIN_STATE_ERROR:
+               dev_dbg(&xd->dev, "discovery failed, stopping handshake\n");
+               __stop_handshake(xd);
                break;
 
        default:
@@ -1833,21 +1872,6 @@ static void tb_xdomain_release(struct device *dev)
        kfree(xd);
 }
 
-static void start_handshake(struct tb_xdomain *xd)
-{
-       xd->state = XDOMAIN_STATE_INIT;
-       queue_delayed_work(xd->tb->wq, &xd->state_work,
-                          msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
-}
-
-static void stop_handshake(struct tb_xdomain *xd)
-{
-       cancel_delayed_work_sync(&xd->properties_changed_work);
-       cancel_delayed_work_sync(&xd->state_work);
-       xd->properties_changed_retries = 0;
-       xd->state_retries = 0;
-}
-
 static int __maybe_unused tb_xdomain_suspend(struct device *dev)
 {
        stop_handshake(tb_to_xdomain(dev));
index 26dd089..ca972fd 100644 (file)
@@ -1617,7 +1617,7 @@ static int omap8250_suspend(struct device *dev)
 {
        struct omap8250_priv *priv = dev_get_drvdata(dev);
        struct uart_8250_port *up = serial8250_get_port(priv->line);
-       int err;
+       int err = 0;
 
        serial8250_suspend_port(priv->line);
 
@@ -1627,7 +1627,8 @@ static int omap8250_suspend(struct device *dev)
        if (!device_may_wakeup(dev))
                priv->wer = 0;
        serial_out(up, UART_OMAP_WER, priv->wer);
-       err = pm_runtime_force_suspend(dev);
+       if (uart_console(&up->port) && console_suspend_enabled)
+               err = pm_runtime_force_suspend(dev);
        flush_work(&priv->qos_work);
 
        return err;
@@ -1636,11 +1637,15 @@ static int omap8250_suspend(struct device *dev)
 static int omap8250_resume(struct device *dev)
 {
        struct omap8250_priv *priv = dev_get_drvdata(dev);
+       struct uart_8250_port *up = serial8250_get_port(priv->line);
        int err;
 
-       err = pm_runtime_force_resume(dev);
-       if (err)
-               return err;
+       if (uart_console(&up->port) && console_suspend_enabled) {
+               err = pm_runtime_force_resume(dev);
+               if (err)
+                       return err;
+       }
+
        serial8250_resume_port(priv->line);
        /* Paired with pm_runtime_resume_and_get() in omap8250_suspend() */
        pm_runtime_mark_last_busy(dev);
@@ -1717,16 +1722,6 @@ static int omap8250_runtime_suspend(struct device *dev)
 
        if (priv->line >= 0)
                up = serial8250_get_port(priv->line);
-       /*
-        * When using 'no_console_suspend', the console UART must not be
-        * suspended. Since driver suspend is managed by runtime suspend,
-        * preventing runtime suspend (by returning error) will keep device
-        * active during suspend.
-        */
-       if (priv->is_suspending && !console_suspend_enabled) {
-               if (up && uart_console(&up->port))
-                       return -EBUSY;
-       }
 
        if (priv->habit & UART_ERRATA_CLOCK_DISABLE) {
                int ret;
index 7bdc21d..d5ba6e9 100644 (file)
@@ -156,7 +156,7 @@ static void __uart_start(struct uart_state *state)
         * enabled, serial_port_runtime_resume() calls start_tx() again
         * after enabling the device.
         */
-       if (pm_runtime_active(&port_dev->dev))
+       if (!pm_runtime_enabled(port->dev) || pm_runtime_active(port->dev))
                port->ops->start_tx(port);
        pm_runtime_mark_last_busy(&port_dev->dev);
        pm_runtime_put_autosuspend(&port_dev->dev);
@@ -1404,12 +1404,18 @@ static void uart_set_rs485_termination(struct uart_port *port,
 static int uart_rs485_config(struct uart_port *port)
 {
        struct serial_rs485 *rs485 = &port->rs485;
+       unsigned long flags;
        int ret;
 
+       if (!(rs485->flags & SER_RS485_ENABLED))
+               return 0;
+
        uart_sanitize_serial_rs485(port, rs485);
        uart_set_rs485_termination(port, rs485);
 
+       spin_lock_irqsave(&port->lock, flags);
        ret = port->rs485_config(port, NULL, rs485);
+       spin_unlock_irqrestore(&port->lock, flags);
        if (ret)
                memset(rs485, 0, sizeof(*rs485));
 
@@ -2474,11 +2480,10 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
                        if (ret == 0) {
                                if (tty)
                                        uart_change_line_settings(tty, state, NULL);
+                               uart_rs485_config(uport);
                                spin_lock_irq(&uport->lock);
                                if (!(uport->rs485.flags & SER_RS485_ENABLED))
                                        ops->set_mctrl(uport, uport->mctrl);
-                               else
-                                       uart_rs485_config(uport);
                                ops->start_tx(uport);
                                spin_unlock_irq(&uport->lock);
                                tty_port_set_initialized(port, true);
@@ -2587,10 +2592,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
                port->mctrl &= TIOCM_DTR;
                if (!(port->rs485.flags & SER_RS485_ENABLED))
                        port->ops->set_mctrl(port, port->mctrl);
-               else
-                       uart_rs485_config(port);
                spin_unlock_irqrestore(&port->lock, flags);
 
+               uart_rs485_config(port);
+
                /*
                 * If this driver supports console, and it hasn't been
                 * successfully registered yet, try to re-register it.
index c2df075..8382e8c 100644 (file)
@@ -6895,7 +6895,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
                        mask, 0, 1000, 1000);
 
        dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
-               tag, err ? "succeeded" : "failed");
+               tag, err < 0 ? "failed" : "succeeded");
 
 out:
        return err;
index fff9ec9..4b67749 100644 (file)
@@ -1125,6 +1125,9 @@ static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
        unsigned long flags;
        int ret;
 
+       if (request->status != -EINPROGRESS)
+               return 0;
+
        if (!pep->endpoint.desc) {
                dev_err(pdev->dev,
                        "%s: can't dequeue to disabled endpoint\n",
index 4a4dbc2..81a9c9d 100644 (file)
@@ -131,8 +131,7 @@ void cdns_set_active(struct cdns *cdns, u8 set_active);
 #else /* CONFIG_PM_SLEEP */
 static inline int cdns_resume(struct cdns *cdns)
 { return 0; }
-static inline int cdns_set_active(struct cdns *cdns, u8 set_active)
-{ return 0; }
+static inline void cdns_set_active(struct cdns *cdns, u8 set_active) { }
 static inline int cdns_suspend(struct cdns *cdns)
 { return 0; }
 #endif /* CONFIG_PM_SLEEP */
index 3c54b21..0ff47ee 100644 (file)
@@ -151,6 +151,10 @@ int usb_device_supports_lpm(struct usb_device *udev)
        if (udev->quirks & USB_QUIRK_NO_LPM)
                return 0;
 
+       /* Skip if the device BOS descriptor couldn't be read */
+       if (!udev->bos)
+               return 0;
+
        /* USB 2.1 (and greater) devices indicate LPM support through
         * their USB 2.0 Extended Capabilities BOS descriptor.
         */
@@ -327,6 +331,10 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
        if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER)
                return;
 
+       /* Skip if the device BOS descriptor couldn't be read */
+       if (!udev->bos)
+               return;
+
        hub = usb_hub_to_struct_hub(udev->parent);
        /* It doesn't take time to transition the roothub into U0, since it
         * doesn't have an upstream link.
@@ -2704,13 +2712,17 @@ out_authorized:
 static enum usb_ssp_rate get_port_ssp_rate(struct usb_device *hdev,
                                           u32 ext_portstatus)
 {
-       struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap;
+       struct usb_ssp_cap_descriptor *ssp_cap;
        u32 attr;
        u8 speed_id;
        u8 ssac;
        u8 lanes;
        int i;
 
+       if (!hdev->bos)
+               goto out;
+
+       ssp_cap = hdev->bos->ssp_cap;
        if (!ssp_cap)
                goto out;
 
@@ -4215,8 +4227,15 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
                enum usb3_link_state state)
 {
        int timeout;
-       __u8 u1_mel = udev->bos->ss_cap->bU1devExitLat;
-       __le16 u2_mel = udev->bos->ss_cap->bU2DevExitLat;
+       __u8 u1_mel;
+       __le16 u2_mel;
+
+       /* Skip if the device BOS descriptor couldn't be read */
+       if (!udev->bos)
+               return;
+
+       u1_mel = udev->bos->ss_cap->bU1devExitLat;
+       u2_mel = udev->bos->ss_cap->bU2DevExitLat;
 
        /* If the device says it doesn't have *any* exit latency to come out of
         * U1 or U2, it's probably lying.  Assume it doesn't implement that link
index 37897af..d44dd7f 100644 (file)
@@ -153,7 +153,7 @@ static inline int hub_is_superspeedplus(struct usb_device *hdev)
 {
        return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS &&
                le16_to_cpu(hdev->descriptor.bcdUSB) >= 0x0310 &&
-               hdev->bos->ssp_cap);
+               hdev->bos && hdev->bos->ssp_cap);
 }
 
 static inline unsigned hub_power_on_good_delay(struct usb_hub *hub)
index 9c6bf05..343d257 100644 (file)
@@ -279,9 +279,46 @@ int dwc3_core_soft_reset(struct dwc3 *dwc)
         * XHCI driver will reset the host block. If dwc3 was configured for
         * host-only mode or current role is host, then we can return early.
         */
-       if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
+       if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
                return 0;
 
+       /*
+        * If the dr_mode is host and the dwc->current_dr_role is not the
+        * corresponding DWC3_GCTL_PRTCAP_HOST, then the dwc3_core_init_mode
+        * isn't executed yet. Ensure the phy is ready before the controller
+        * updates the GCTL.PRTCAPDIR or other settings by soft-resetting
+        * the phy.
+        *
+        * Note: GUSB3PIPECTL[n] and GUSB2PHYCFG[n] are port settings where n
+        * is port index. If this is a multiport host, then we need to reset
+        * all active ports.
+        */
+       if (dwc->dr_mode == USB_DR_MODE_HOST) {
+               u32 usb3_port;
+               u32 usb2_port;
+
+               usb3_port = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+               usb3_port |= DWC3_GUSB3PIPECTL_PHYSOFTRST;
+               dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
+
+               usb2_port = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+               usb2_port |= DWC3_GUSB2PHYCFG_PHYSOFTRST;
+               dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
+
+               /* Small delay for phy reset assertion */
+               usleep_range(1000, 2000);
+
+               usb3_port &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST;
+               dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), usb3_port);
+
+               usb2_port &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST;
+               dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), usb2_port);
+
+               /* Wait for clock synchronization */
+               msleep(50);
+               return 0;
+       }
+
        reg = dwc3_readl(dwc->regs, DWC3_DCTL);
        reg |= DWC3_DCTL_CSFTRST;
        reg &= ~DWC3_DCTL_RUN_STOP;
index feccf4c..e6ab8cc 100644 (file)
@@ -1156,7 +1156,8 @@ static int ncm_unwrap_ntb(struct gether *port,
                          struct sk_buff_head *list)
 {
        struct f_ncm    *ncm = func_to_ncm(&port->func);
-       __le16          *tmp = (void *) skb->data;
+       unsigned char   *ntb_ptr = skb->data;
+       __le16          *tmp;
        unsigned        index, index2;
        int             ndp_index;
        unsigned        dg_len, dg_len2;
@@ -1169,6 +1170,10 @@ static int ncm_unwrap_ntb(struct gether *port,
        const struct ndp_parser_opts *opts = ncm->parser_opts;
        unsigned        crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
        int             dgram_counter;
+       int             to_process = skb->len;
+
+parse_ntb:
+       tmp = (__le16 *)ntb_ptr;
 
        /* dwSignature */
        if (get_unaligned_le32(tmp) != opts->nth_sign) {
@@ -1215,7 +1220,7 @@ static int ncm_unwrap_ntb(struct gether *port,
                 * walk through NDP
                 * dwSignature
                 */
-               tmp = (void *)(skb->data + ndp_index);
+               tmp = (__le16 *)(ntb_ptr + ndp_index);
                if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
                        INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
                        goto err;
@@ -1272,11 +1277,11 @@ static int ncm_unwrap_ntb(struct gether *port,
                        if (ncm->is_crc) {
                                uint32_t crc, crc2;
 
-                               crc = get_unaligned_le32(skb->data +
+                               crc = get_unaligned_le32(ntb_ptr +
                                                         index + dg_len -
                                                         crc_len);
                                crc2 = ~crc32_le(~0,
-                                                skb->data + index,
+                                                ntb_ptr + index,
                                                 dg_len - crc_len);
                                if (crc != crc2) {
                                        INFO(port->func.config->cdev,
@@ -1303,7 +1308,7 @@ static int ncm_unwrap_ntb(struct gether *port,
                                                         dg_len - crc_len);
                        if (skb2 == NULL)
                                goto err;
-                       skb_put_data(skb2, skb->data + index,
+                       skb_put_data(skb2, ntb_ptr + index,
                                     dg_len - crc_len);
 
                        skb_queue_tail(list, skb2);
@@ -1316,10 +1321,17 @@ static int ncm_unwrap_ntb(struct gether *port,
                } while (ndp_len > 2 * (opts->dgram_item_len * 2));
        } while (ndp_index);
 
-       dev_consume_skb_any(skb);
-
        VDBG(port->func.config->cdev,
             "Parsed NTB with %d frames\n", dgram_counter);
+
+       to_process -= block_len;
+       if (to_process != 0) {
+               ntb_ptr = (unsigned char *)(ntb_ptr + block_len);
+               goto parse_ntb;
+       }
+
+       dev_consume_skb_any(skb);
+
        return 0;
 err:
        skb_queue_purge(list);
index 56b8286..74590f9 100644 (file)
@@ -497,11 +497,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
                /* Get the Buffer address and copy the transmit data.*/
                eprambase = (u32 __force *)(udc->addr + ep->rambase);
                if (ep->is_in) {
-                       memcpy(eprambase, bufferptr, bytestosend);
+                       memcpy_toio((void __iomem *)eprambase, bufferptr,
+                                   bytestosend);
                        udc->write_fn(udc->addr, ep->offset +
                                      XUSB_EP_BUF0COUNT_OFFSET, bufferlen);
                } else {
-                       memcpy(bufferptr, eprambase, bytestosend);
+                       memcpy_toio((void __iomem *)bufferptr, eprambase,
+                                   bytestosend);
                }
                /*
                 * Enable the buffer for transmission.
@@ -515,11 +517,13 @@ static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req,
                eprambase = (u32 __force *)(udc->addr + ep->rambase +
                             ep->ep_usb.maxpacket);
                if (ep->is_in) {
-                       memcpy(eprambase, bufferptr, bytestosend);
+                       memcpy_toio((void __iomem *)eprambase, bufferptr,
+                                   bytestosend);
                        udc->write_fn(udc->addr, ep->offset +
                                      XUSB_EP_BUF1COUNT_OFFSET, bufferlen);
                } else {
-                       memcpy(bufferptr, eprambase, bytestosend);
+                       memcpy_toio((void __iomem *)bufferptr, eprambase,
+                                   bytestosend);
                }
                /*
                 * Enable the buffer for transmission.
@@ -1021,7 +1025,7 @@ static int __xudc_ep0_queue(struct xusb_ep *ep0, struct xusb_req *req)
                           udc->addr);
                length = req->usb_req.actual = min_t(u32, length,
                                                     EP0_MAX_PACKET);
-               memcpy(corebuf, req->usb_req.buf, length);
+               memcpy_toio((void __iomem *)corebuf, req->usb_req.buf, length);
                udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, length);
                udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
        } else {
@@ -1752,7 +1756,7 @@ static void xudc_handle_setup(struct xusb_udc *udc)
 
        /* Load up the chapter 9 command buffer.*/
        ep0rambase = (u32 __force *) (udc->addr + XUSB_SETUP_PKT_ADDR_OFFSET);
-       memcpy(&setup, ep0rambase, 8);
+       memcpy_toio((void __iomem *)&setup, ep0rambase, 8);
 
        udc->setup = setup;
        udc->setup.wValue = cpu_to_le16((u16 __force)setup.wValue);
@@ -1839,7 +1843,7 @@ static void xudc_ep0_out(struct xusb_udc *udc)
                             (ep0->rambase << 2));
                buffer = req->usb_req.buf + req->usb_req.actual;
                req->usb_req.actual = req->usb_req.actual + bytes_to_rx;
-               memcpy(buffer, ep0rambase, bytes_to_rx);
+               memcpy_toio((void __iomem *)buffer, ep0rambase, bytes_to_rx);
 
                if (req->usb_req.length == req->usb_req.actual) {
                        /* Data transfer completed get ready for Status stage */
@@ -1915,7 +1919,7 @@ static void xudc_ep0_in(struct xusb_udc *udc)
                                     (ep0->rambase << 2));
                        buffer = req->usb_req.buf + req->usb_req.actual;
                        req->usb_req.actual = req->usb_req.actual + length;
-                       memcpy(ep0rambase, buffer, length);
+                       memcpy_toio((void __iomem *)ep0rambase, buffer, length);
                }
                udc->write_fn(udc->addr, XUSB_EP_BUF0COUNT_OFFSET, count);
                udc->write_fn(udc->addr, XUSB_BUFFREADY_OFFSET, 1);
index 0054d02..0df5d80 100644 (file)
@@ -1062,19 +1062,19 @@ static void xhci_get_usb3_port_status(struct xhci_port *port, u32 *status,
                *status |= USB_PORT_STAT_C_CONFIG_ERROR << 16;
 
        /* USB3 specific wPortStatus bits */
-       if (portsc & PORT_POWER) {
+       if (portsc & PORT_POWER)
                *status |= USB_SS_PORT_STAT_POWER;
-               /* link state handling */
-               if (link_state == XDEV_U0)
-                       bus_state->suspended_ports &= ~(1 << portnum);
-       }
 
-       /* remote wake resume signaling complete */
-       if (bus_state->port_remote_wakeup & (1 << portnum) &&
+       /* no longer suspended or resuming */
+       if (link_state != XDEV_U3 &&
            link_state != XDEV_RESUME &&
            link_state != XDEV_RECOVERY) {
-               bus_state->port_remote_wakeup &= ~(1 << portnum);
-               usb_hcd_end_port_resume(&hcd->self, portnum);
+               /* remote wake resume signaling complete */
+               if (bus_state->port_remote_wakeup & (1 << portnum)) {
+                       bus_state->port_remote_wakeup &= ~(1 << portnum);
+                       usb_hcd_end_port_resume(&hcd->self, portnum);
+               }
+               bus_state->suspended_ports &= ~(1 << portnum);
        }
 
        xhci_hub_report_usb3_link_state(xhci, status, portsc);
@@ -1131,6 +1131,7 @@ static void xhci_get_usb2_port_status(struct xhci_port *port, u32 *status,
                        usb_hcd_end_port_resume(&port->rhub->hcd->self, portnum);
                }
                port->rexit_active = 0;
+               bus_state->suspended_ports &= ~(1 << portnum);
        }
 }
 
index 8714ab5..0a37f0d 100644 (file)
@@ -2285,8 +2285,8 @@ xhci_add_interrupter(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
        writel(erst_size, &ir->ir_set->erst_size);
 
        erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
-       erst_base &= ERST_PTR_MASK;
-       erst_base |= (ir->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
+       erst_base &= ERST_BASE_RSVDP;
+       erst_base |= ir->erst.erst_dma_addr & ~ERST_BASE_RSVDP;
        xhci_write_64(xhci, erst_base, &ir->ir_set->erst_base);
 
        /* Set the event ring dequeue address of this interrupter */
index 1dde53f..3e5dc07 100644 (file)
@@ -798,7 +798,7 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
 static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
                struct xhci_ring *ring, struct xhci_td *td)
 {
-       struct device *dev = xhci_to_hcd(xhci)->self.controller;
+       struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
        struct xhci_segment *seg = td->bounce_seg;
        struct urb *urb = td->urb;
        size_t len;
@@ -2996,7 +2996,8 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
  */
 static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
                                     struct xhci_interrupter *ir,
-                                    union xhci_trb *event_ring_deq)
+                                    union xhci_trb *event_ring_deq,
+                                    bool clear_ehb)
 {
        u64 temp_64;
        dma_addr_t deq;
@@ -3017,12 +3018,13 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
                        return;
 
                /* Update HC event ring dequeue pointer */
-               temp_64 &= ERST_PTR_MASK;
+               temp_64 &= ERST_DESI_MASK;
                temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
        }
 
        /* Clear the event handler busy flag (RW1C) */
-       temp_64 |= ERST_EHB;
+       if (clear_ehb)
+               temp_64 |= ERST_EHB;
        xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
 }
 
@@ -3103,7 +3105,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
        while (xhci_handle_event(xhci, ir) > 0) {
                if (event_loop++ < TRBS_PER_SEGMENT / 2)
                        continue;
-               xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
+               xhci_update_erst_dequeue(xhci, ir, event_ring_deq, false);
                event_ring_deq = ir->event_ring->dequeue;
 
                /* ring is half-full, force isoc trbs to interrupt more often */
@@ -3113,7 +3115,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
                event_loop = 0;
        }
 
-       xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
+       xhci_update_erst_dequeue(xhci, ir, event_ring_deq, true);
        ret = IRQ_HANDLED;
 
 out:
@@ -3469,7 +3471,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred,
 static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
                         u32 *trb_buff_len, struct xhci_segment *seg)
 {
-       struct device *dev = xhci_to_hcd(xhci)->self.controller;
+       struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
        unsigned int unalign;
        unsigned int max_pkt;
        u32 new_buff_len;
index 7e282b4..5df3704 100644 (file)
@@ -514,7 +514,7 @@ struct xhci_intr_reg {
 #define        ERST_SIZE_MASK          (0xffff << 16)
 
 /* erst_base bitmasks */
-#define ERST_BASE_RSVDP                (0x3f)
+#define ERST_BASE_RSVDP                (GENMASK_ULL(5, 0))
 
 /* erst_dequeue bitmasks */
 /* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
index 3da1a46..57bbe13 100644 (file)
@@ -434,6 +434,7 @@ static const struct usb_device_id onboard_hub_id_table[] = {
        { USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */
        { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */
        { USB_DEVICE(VENDOR_ID_GENESYS, 0x0620) }, /* Genesys Logic GL3523 USB 3.1 */
+       { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 */
        { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */
        { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */
        { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */
index 4026ba6..2a4ab5a 100644 (file)
@@ -47,6 +47,7 @@ static const struct onboard_hub_pdata vialab_vl817_data = {
 };
 
 static const struct of_device_id onboard_hub_match[] = {
+       { .compatible = "usb424,2412", .data = &microchip_usb424_data, },
        { .compatible = "usb424,2514", .data = &microchip_usb424_data, },
        { .compatible = "usb424,2517", .data = &microchip_usb424_data, },
        { .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
index 78c726a..2d62328 100644 (file)
@@ -39,7 +39,7 @@ static const struct musb_register_map musb_regmap[] = {
        { "IntrUsbE",   MUSB_INTRUSBE,  8 },
        { "DevCtl",     MUSB_DEVCTL,    8 },
        { "VControl",   0x68,           32 },
-       { "HWVers",     0x69,           16 },
+       { "HWVers",     MUSB_HWVERS,    16 },
        { "LinkInfo",   MUSB_LINKINFO,  8 },
        { "VPLen",      MUSB_VPLEN,     8 },
        { "HS_EOF1",    MUSB_HS_EOF1,   8 },
index a02c292..bc45077 100644 (file)
@@ -321,10 +321,16 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
        musb_giveback(musb, urb, status);
        qh->is_ready = ready;
 
+       /*
+        * musb->lock had been unlocked in musb_giveback, so qh may
+        * be freed, need to get it again
+        */
+       qh = musb_ep_get_qh(hw_ep, is_in);
+
        /* reclaim resources (and bandwidth) ASAP; deschedule it, and
         * invalidate qh as soon as list_empty(&hep->urb_list)
         */
-       if (list_empty(&qh->hep->urb_list)) {
+       if (qh && list_empty(&qh->hep->urb_list)) {
                struct list_head        *head;
                struct dma_controller   *dma = musb->dma_controller;
 
@@ -2398,6 +2404,7 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                 * and its URB list has emptied, recycle this qh.
                 */
                if (ready && list_empty(&qh->hep->urb_list)) {
+                       musb_ep_set_qh(qh->hw_ep, is_in, NULL);
                        qh->hep->hcpriv = NULL;
                        list_del(&qh->ring);
                        kfree(qh);
index 426c88a..59e0218 100644 (file)
@@ -304,6 +304,11 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
                        typec_altmode_update_active(alt, false);
                        dp->data.status = 0;
                        dp->data.conf = 0;
+                       if (dp->hpd) {
+                               drm_connector_oob_hotplug_event(dp->connector_fwnode);
+                               dp->hpd = false;
+                               sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
+                       }
                        break;
                case DP_CMD_STATUS_UPDATE:
                        dp->data.status = *vdo;
index bb0b847..52c8137 100644 (file)
@@ -381,10 +381,6 @@ static int qcom_pmic_typec_pdphy_enable(struct pmic_typec_pdphy *pmic_typec_pdph
        struct device *dev = pmic_typec_pdphy->dev;
        int ret;
 
-       ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy);
-       if (ret)
-               return ret;
-
        /* PD 2.0, DR=TYPEC_DEVICE, PR=TYPEC_SINK */
        ret = regmap_update_bits(pmic_typec_pdphy->regmap,
                                 pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
@@ -422,8 +418,6 @@ static int qcom_pmic_typec_pdphy_disable(struct pmic_typec_pdphy *pmic_typec_pdp
        ret = regmap_write(pmic_typec_pdphy->regmap,
                           pmic_typec_pdphy->base + USB_PDPHY_EN_CONTROL_REG, 0);
 
-       regulator_disable(pmic_typec_pdphy->vdd_pdphy);
-
        return ret;
 }
 
@@ -447,6 +441,10 @@ int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
        int i;
        int ret;
 
+       ret = regulator_enable(pmic_typec_pdphy->vdd_pdphy);
+       if (ret)
+               return ret;
+
        pmic_typec_pdphy->tcpm_port = tcpm_port;
 
        ret = pmic_typec_pdphy_reset(pmic_typec_pdphy);
@@ -467,6 +465,8 @@ void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
                disable_irq(pmic_typec_pdphy->irq_data[i].irq);
 
        qcom_pmic_typec_pdphy_reset_on(pmic_typec_pdphy);
+
+       regulator_disable(pmic_typec_pdphy->vdd_pdphy);
 }
 
 struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev)
index 384b422..b35c6e0 100644 (file)
@@ -37,6 +37,15 @@ static int ucsi_psy_get_scope(struct ucsi_connector *con,
        struct device *dev = con->ucsi->dev;
 
        device_property_read_u8(dev, "scope", &scope);
+       if (scope == POWER_SUPPLY_SCOPE_UNKNOWN) {
+               u32 mask = UCSI_CAP_ATTR_POWER_AC_SUPPLY |
+                          UCSI_CAP_ATTR_BATTERY_CHARGING;
+
+               if (con->ucsi->cap.attributes & mask)
+                       scope = POWER_SUPPLY_SCOPE_SYSTEM;
+               else
+                       scope = POWER_SUPPLY_SCOPE_DEVICE;
+       }
        val->intval = scope;
        return 0;
 }
index c6dfe3d..61b6455 100644 (file)
@@ -787,6 +787,7 @@ static void ucsi_unregister_partner(struct ucsi_connector *con)
 
        typec_set_mode(con->port, TYPEC_STATE_SAFE);
 
+       typec_partner_set_usb_power_delivery(con->partner, NULL);
        ucsi_unregister_partner_pdos(con);
        ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP);
        typec_unregister_partner(con->partner);
@@ -884,6 +885,7 @@ static void ucsi_handle_connector_change(struct work_struct *work)
        if (ret < 0) {
                dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
                        __func__, ret);
+               clear_bit(EVENT_PENDING, &con->ucsi->flags);
                goto out_unlock;
        }
 
index 5c87817..3dcf83f 100644 (file)
@@ -3440,11 +3440,15 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
        }
 
        info->fix.mmio_start = raddr;
+#if defined(__i386__) || defined(__ia64__)
        /*
         * By using strong UC we force the MTRR to never have an
         * effect on the MMIO region on both non-PAT and PAT systems.
         */
        par->ati_regbase = ioremap_uc(info->fix.mmio_start, 0x1000);
+#else
+       par->ati_regbase = ioremap(info->fix.mmio_start, 0x1000);
+#endif
        if (par->ati_regbase == NULL)
                return -ENOMEM;
 
index 6d4bfee..5b80bf3 100644 (file)
@@ -382,7 +382,7 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
 {
        u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
        u32 height = area->height, width = area->width;
-       unsigned long const bits_per_line = p->fix.line_length*8u;
+       unsigned int const bits_per_line = p->fix.line_length * 8u;
        unsigned long __iomem *base = NULL;
        int bits = BITS_PER_LONG, bytes = bits >> 3;
        unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
index c1eda31..7b8bd3a 100644 (file)
@@ -316,7 +316,7 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
 {
        u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
        u32 height = area->height, width = area->width;
-       unsigned long const bits_per_line = p->fix.line_length*8u;
+       unsigned int const bits_per_line = p->fix.line_length * 8u;
        unsigned long *base = NULL;
        int bits = BITS_PER_LONG, bytes = bits >> 3;
        unsigned dst_idx = 0, src_idx = 0, rev_copy = 0;
index 167585a..719b99a 100644 (file)
@@ -1406,7 +1406,7 @@ struct mmphw_ctrl {
 
        /*pathes*/
        int path_num;
-       struct mmphw_path_plat path_plats[];
+       struct mmphw_path_plat path_plats[] __counted_by(path_num);
 };
 
 static inline int overlay_is_vid(struct mmp_overlay *overlay)
index f28cb90..42c96f1 100644 (file)
@@ -1645,13 +1645,13 @@ static int omapfb_do_probe(struct platform_device *pdev,
        }
        fbdev->int_irq = platform_get_irq(pdev, 0);
        if (fbdev->int_irq < 0) {
-               r = ENXIO;
+               r = -ENXIO;
                goto cleanup;
        }
 
        fbdev->ext_irq = platform_get_irq(pdev, 1);
        if (fbdev->ext_irq < 0) {
-               r = ENXIO;
+               r = -ENXIO;
                goto cleanup;
        }
 
index 3d76ce1..cf0f706 100644 (file)
@@ -1214,7 +1214,7 @@ static struct platform_driver sa1100fb_driver = {
        },
 };
 
-int __init sa1100fb_init(void)
+static int __init sa1100fb_init(void)
 {
        if (fb_get_options("sa1100fb", NULL))
                return -ENODEV;
index a1a6783..e1f421e 100644 (file)
@@ -1928,10 +1928,10 @@ static void uvesafb_exit(void)
                }
        }
 
-       cn_del_callback(&uvesafb_cn_id);
        driver_remove_file(&uvesafb_driver.driver, &driver_attr_v86d);
        platform_device_unregister(uvesafb_device);
        platform_driver_unregister(&uvesafb_driver);
+       cn_del_callback(&uvesafb_cn_id);
 }
 
 module_exit(uvesafb_exit);
index 0bb86e6..1b2136f 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <linux/irqnr.h>
 #include <linux/pci.h>
+#include <linux/rcupdate.h>
 #include <linux/spinlock.h>
 #include <linux/cpuhotplug.h>
 #include <linux/atomic.h>
@@ -96,6 +97,7 @@ enum xen_irq_type {
 struct irq_info {
        struct list_head list;
        struct list_head eoi_list;
+       struct rcu_work rwork;
        short refcnt;
        u8 spurious_cnt;
        u8 is_accounted;
@@ -147,22 +149,12 @@ const struct evtchn_ops *evtchn_ops;
 static DEFINE_MUTEX(irq_mapping_update_lock);
 
 /*
- * Lock protecting event handling loop against removing event channels.
- * Adding of event channels is no issue as the associated IRQ becomes active
- * only after everything is setup (before request_[threaded_]irq() the handler
- * can't be entered for an event, as the event channel will be unmasked only
- * then).
- */
-static DEFINE_RWLOCK(evtchn_rwlock);
-
-/*
  * Lock hierarchy:
  *
  * irq_mapping_update_lock
- *   evtchn_rwlock
- *     IRQ-desc lock
- *       percpu eoi_list_lock
- *         irq_info->lock
+ *   IRQ-desc lock
+ *     percpu eoi_list_lock
+ *       irq_info->lock
  */
 
 static LIST_HEAD(xen_irq_list_head);
@@ -306,6 +298,22 @@ static void channels_on_cpu_inc(struct irq_info *info)
        info->is_accounted = 1;
 }
 
+static void delayed_free_irq(struct work_struct *work)
+{
+       struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
+                                            rwork);
+       unsigned int irq = info->irq;
+
+       /* Remove the info pointer only now, with no potential users left. */
+       set_info_for_irq(irq, NULL);
+
+       kfree(info);
+
+       /* Legacy IRQ descriptors are managed by the arch. */
+       if (irq >= nr_legacy_irqs())
+               irq_free_desc(irq);
+}
+
 /* Constructors for packed IRQ information. */
 static int xen_irq_info_common_setup(struct irq_info *info,
                                     unsigned irq,
@@ -668,33 +676,36 @@ static void xen_irq_lateeoi_worker(struct work_struct *work)
 
        eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
 
-       read_lock_irqsave(&evtchn_rwlock, flags);
+       rcu_read_lock();
 
        while (true) {
-               spin_lock(&eoi->eoi_list_lock);
+               spin_lock_irqsave(&eoi->eoi_list_lock, flags);
 
                info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
                                                eoi_list);
 
-               if (info == NULL || now < info->eoi_time) {
-                       spin_unlock(&eoi->eoi_list_lock);
+               if (info == NULL)
+                       break;
+
+               if (now < info->eoi_time) {
+                       mod_delayed_work_on(info->eoi_cpu, system_wq,
+                                           &eoi->delayed,
+                                           info->eoi_time - now);
                        break;
                }
 
                list_del_init(&info->eoi_list);
 
-               spin_unlock(&eoi->eoi_list_lock);
+               spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
 
                info->eoi_time = 0;
 
                xen_irq_lateeoi_locked(info, false);
        }
 
-       if (info)
-               mod_delayed_work_on(info->eoi_cpu, system_wq,
-                                   &eoi->delayed, info->eoi_time - now);
+       spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
 
-       read_unlock_irqrestore(&evtchn_rwlock, flags);
+       rcu_read_unlock();
 }
 
 static void xen_cpu_init_eoi(unsigned int cpu)
@@ -709,16 +720,15 @@ static void xen_cpu_init_eoi(unsigned int cpu)
 void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
 {
        struct irq_info *info;
-       unsigned long flags;
 
-       read_lock_irqsave(&evtchn_rwlock, flags);
+       rcu_read_lock();
 
        info = info_for_irq(irq);
 
        if (info)
                xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
 
-       read_unlock_irqrestore(&evtchn_rwlock, flags);
+       rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
 
@@ -732,6 +742,7 @@ static void xen_irq_init(unsigned irq)
 
        info->type = IRQT_UNBOUND;
        info->refcnt = -1;
+       INIT_RCU_WORK(&info->rwork, delayed_free_irq);
 
        set_info_for_irq(irq, info);
        /*
@@ -789,31 +800,18 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
 static void xen_free_irq(unsigned irq)
 {
        struct irq_info *info = info_for_irq(irq);
-       unsigned long flags;
 
        if (WARN_ON(!info))
                return;
 
-       write_lock_irqsave(&evtchn_rwlock, flags);
-
        if (!list_empty(&info->eoi_list))
                lateeoi_list_del(info);
 
        list_del(&info->list);
 
-       set_info_for_irq(irq, NULL);
-
        WARN_ON(info->refcnt > 0);
 
-       write_unlock_irqrestore(&evtchn_rwlock, flags);
-
-       kfree(info);
-
-       /* Legacy IRQ descriptors are managed by the arch. */
-       if (irq < nr_legacy_irqs())
-               return;
-
-       irq_free_desc(irq);
+       queue_rcu_work(system_wq, &info->rwork);
 }
 
 /* Not called for lateeoi events. */
@@ -1711,7 +1709,14 @@ int xen_evtchn_do_upcall(void)
        int cpu = smp_processor_id();
        struct evtchn_loop_ctrl ctrl = { 0 };
 
-       read_lock(&evtchn_rwlock);
+       /*
+        * When closing an event channel the associated IRQ must not be freed
+        * until all cpus have left the event handling loop. This is ensured
+        * by taking the rcu_read_lock() while handling events, as freeing of
+        * the IRQ is handled via queue_rcu_work() _after_ closing the event
+        * channel.
+        */
+       rcu_read_lock();
 
        do {
                vcpu_info->evtchn_upcall_pending = 0;
@@ -1724,7 +1729,7 @@ int xen_evtchn_do_upcall(void)
 
        } while (vcpu_info->evtchn_upcall_pending);
 
-       read_unlock(&evtchn_rwlock);
+       rcu_read_unlock();
 
        /*
         * Increment irq_epoch only now to defer EOIs only for
index caf0bbd..90aaedc 100644 (file)
@@ -313,7 +313,7 @@ static struct btrfs_delayed_item *btrfs_alloc_delayed_item(u16 data_len,
 {
        struct btrfs_delayed_item *item;
 
-       item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
+       item = kmalloc(struct_size(item, data, data_len), GFP_NOFS);
        if (item) {
                item->data_len = data_len;
                item->type = type;
index dc1085b..1da2131 100644 (file)
@@ -95,7 +95,7 @@ struct btrfs_delayed_item {
        bool logged;
        /* The maximum leaf size is 64K, so u16 is more than enough. */
        u16 data_len;
-       char data[];
+       char data[] __counted_by(data_len);
 };
 
 static inline void btrfs_init_delayed_root(
index f49e597..1a093ec 100644 (file)
@@ -954,10 +954,6 @@ static int btrfs_parse_subvol_options(const char *options, char **subvol_name,
 
                        *subvol_objectid = subvolid;
                        break;
-               case Opt_err:
-                       btrfs_err(NULL, "unrecognized mount option '%s'", p);
-                       error = -EINVAL;
-                       goto out;
                default:
                        break;
                }
index 5a5a8d4..b9ef6f5 100644 (file)
@@ -5109,7 +5109,7 @@ static void init_alloc_chunk_ctl_policy_regular(
        ASSERT(space_info);
 
        ctl->max_chunk_size = READ_ONCE(space_info->chunk_size);
-       ctl->max_stripe_size = ctl->max_chunk_size;
+       ctl->max_stripe_size = min_t(u64, ctl->max_chunk_size, SZ_1G);
 
        if (ctl->type & BTRFS_BLOCK_GROUP_SYSTEM)
                ctl->devs_max = min_t(int, ctl->devs_max, BTRFS_MAX_DEVS_SYS_CHUNK);
index e1f31b8..5b5112c 100644 (file)
@@ -460,7 +460,7 @@ int ceph_fname_to_usr(const struct ceph_fname *fname, struct fscrypt_str *tname,
 out:
        fscrypt_fname_free_buffer(&_tname);
 out_inode:
-       if ((dir != fname->dir) && !IS_ERR(dir)) {
+       if (dir != fname->dir) {
                if ((dir->i_state & I_NEW))
                        discard_new_inode(dir);
                else
index b1da02f..b5f8038 100644 (file)
@@ -2969,7 +2969,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
                ret = do_splice_direct(src_file, &src_off, dst_file,
                                       &dst_off, src_objlen, flags);
                /* Abort on short copies or on error */
-               if (ret < src_objlen) {
+               if (ret < (long)src_objlen) {
                        dout("Failed partial copy (%zd)\n", ret);
                        goto out;
                }
index 800ab79..b79100f 100644 (file)
@@ -769,9 +769,7 @@ int ceph_fill_file_size(struct inode *inode, int issued,
                        ci->i_truncate_seq = truncate_seq;
 
                        /* the MDS should have revoked these caps */
-                       WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
-                                              CEPH_CAP_FILE_RD |
-                                              CEPH_CAP_FILE_WR |
+                       WARN_ON_ONCE(issued & (CEPH_CAP_FILE_RD |
                                               CEPH_CAP_FILE_LAZYIO));
                        /*
                         * If we hold relevant caps, or in the case where we're
index a0ad7a0..98589aa 100644 (file)
@@ -192,17 +192,19 @@ int vfs_parse_fs_string(struct fs_context *fc, const char *key,
 EXPORT_SYMBOL(vfs_parse_fs_string);
 
 /**
- * generic_parse_monolithic - Parse key[=val][,key[=val]]* mount data
+ * vfs_parse_monolithic_sep - Parse key[=val][,key[=val]]* mount data
  * @fc: The superblock configuration to fill in.
  * @data: The data to parse
+ * @sep: callback for separating next option
  *
- * Parse a blob of data that's in key[=val][,key[=val]]* form.  This can be
- * called from the ->monolithic_mount_data() fs_context operation.
+ * Parse a blob of data that's in key[=val][,key[=val]]* form with a custom
+ * option separator callback.
  *
  * Returns 0 on success or the error returned by the ->parse_option() fs_context
  * operation on failure.
  */
-int generic_parse_monolithic(struct fs_context *fc, void *data)
+int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
+                            char *(*sep)(char **))
 {
        char *options = data, *key;
        int ret = 0;
@@ -214,7 +216,7 @@ int generic_parse_monolithic(struct fs_context *fc, void *data)
        if (ret)
                return ret;
 
-       while ((key = strsep(&options, ",")) != NULL) {
+       while ((key = sep(&options)) != NULL) {
                if (*key) {
                        size_t v_len = 0;
                        char *value = strchr(key, '=');
@@ -233,6 +235,28 @@ int generic_parse_monolithic(struct fs_context *fc, void *data)
 
        return ret;
 }
+EXPORT_SYMBOL(vfs_parse_monolithic_sep);
+
+static char *vfs_parse_comma_sep(char **s)
+{
+       return strsep(s, ",");
+}
+
+/**
+ * generic_parse_monolithic - Parse key[=val][,key[=val]]* mount data
+ * @fc: The superblock configuration to fill in.
+ * @data: The data to parse
+ *
+ * Parse a blob of data that's in key[=val][,key[=val]]* form.  This can be
+ * called from the ->monolithic_mount_data() fs_context operation.
+ *
+ * Returns 0 on success or the error returned by the ->parse_option() fs_context
+ * operation on failure.
+ */
+int generic_parse_monolithic(struct fs_context *fc, void *data)
+{
+       return vfs_parse_monolithic_sep(fc, data, vfs_parse_comma_sep);
+}
 EXPORT_SYMBOL(generic_parse_monolithic);
 
 /**
index 567ee54..94565bd 100644 (file)
@@ -188,7 +188,7 @@ getname_flags(const char __user *filename, int flags, int *empty)
                }
        }
 
-       result->refcnt = 1;
+       atomic_set(&result->refcnt, 1);
        /* The empty path is special. */
        if (unlikely(!len)) {
                if (empty)
@@ -249,7 +249,7 @@ getname_kernel(const char * filename)
        memcpy((char *)result->name, filename, len);
        result->uptr = NULL;
        result->aname = NULL;
-       result->refcnt = 1;
+       atomic_set(&result->refcnt, 1);
        audit_getname(result);
 
        return result;
@@ -261,9 +261,10 @@ void putname(struct filename *name)
        if (IS_ERR(name))
                return;
 
-       BUG_ON(name->refcnt <= 0);
+       if (WARN_ON_ONCE(!atomic_read(&name->refcnt)))
+               return;
 
-       if (--name->refcnt > 0)
+       if (!atomic_dec_and_test(&name->refcnt))
                return;
 
        if (name->name != name->iname) {
index f69c451..62fe0b6 100644 (file)
@@ -1585,16 +1585,25 @@ static int fanotify_test_fsid(struct dentry *dentry, __kernel_fsid_t *fsid)
 }
 
 /* Check if filesystem can encode a unique fid */
-static int fanotify_test_fid(struct dentry *dentry)
+static int fanotify_test_fid(struct dentry *dentry, unsigned int flags)
 {
+       unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS;
+       const struct export_operations *nop = dentry->d_sb->s_export_op;
+
+       /*
+        * We need to make sure that the filesystem supports encoding of
+        * file handles so user can use name_to_handle_at() to compare fids
+        * reported with events to the file handle of watched objects.
+        */
+       if (!nop)
+               return -EOPNOTSUPP;
+
        /*
-        * We need to make sure that the file system supports at least
-        * encoding a file handle so user can use name_to_handle_at() to
-        * compare fid returned with event to the file handle of watched
-        * objects. However, even the relaxed AT_HANDLE_FID flag requires
-        * at least empty export_operations for ecoding unique file ids.
+        * For sb/mount mark, we also need to make sure that the filesystem
+        * supports decoding file handles, so user has a way to map back the
+        * reported fids to filesystem objects.
         */
-       if (!dentry->d_sb->s_export_op)
+       if (mark_type != FAN_MARK_INODE && !nop->fh_to_dentry)
                return -EOPNOTSUPP;
 
        return 0;
@@ -1812,7 +1821,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
                if (ret)
                        goto path_put_and_out;
 
-               ret = fanotify_test_fid(path.dentry);
+               ret = fanotify_test_fid(path.dentry, flags);
                if (ret)
                        goto path_put_and_out;
 
index a9d82bb..63f7025 100644 (file)
@@ -1106,10 +1106,10 @@ repack:
                }
        }
 
-       /* 
+       /*
         * The code below may require additional cluster (to extend attribute list)
-        * and / or one MFT record 
-        * It is too complex to undo operations if -ENOSPC occurs deep inside 
+        * and / or one MFT record
+        * It is too complex to undo operations if -ENOSPC occurs deep inside
         * in 'ni_insert_nonresident'.
         * Return in advance -ENOSPC here if there are no free cluster and no free MFT.
         */
@@ -1736,10 +1736,8 @@ repack:
                        le_b = NULL;
                        attr_b = ni_find_attr(ni, NULL, &le_b, ATTR_DATA, NULL,
                                              0, NULL, &mi_b);
-                       if (!attr_b) {
-                               err = -ENOENT;
-                               goto out;
-                       }
+                       if (!attr_b)
+                               return -ENOENT;
 
                        attr = attr_b;
                        le = le_b;
index 42631b3..7c01735 100644 (file)
@@ -52,7 +52,8 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
 
        if (!attr->non_res) {
                lsize = le32_to_cpu(attr->res.data_size);
-               le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
+               /* attr is resident: lsize < record_size (1K or 4K) */
+               le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
                if (!le) {
                        err = -ENOMEM;
                        goto out;
@@ -80,7 +81,17 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr)
                if (err < 0)
                        goto out;
 
-               le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN);
+               /* attr is nonresident.
+                * The worst case:
+                * 1T (2^40) extremely fragmented file.
+                * cluster = 4K (2^12) => 2^28 fragments
+                * 2^9 fragments per one record => 2^19 records
+                * 2^5 bytes of ATTR_LIST_ENTRY per one record => 2^24 bytes.
+                *
+                * the result is 16M bytes per attribute list.
+                * Use kvmalloc to allocate in range [several Kbytes - dozen Mbytes]
+                */
+               le = kvmalloc(al_aligned(lsize), GFP_KERNEL);
                if (!le) {
                        err = -ENOMEM;
                        goto out;
index 107e808..63f14a0 100644 (file)
@@ -125,6 +125,7 @@ void wnd_close(struct wnd_bitmap *wnd)
        struct rb_node *node, *next;
 
        kfree(wnd->free_bits);
+       wnd->free_bits = NULL;
        run_close(&wnd->run);
 
        node = rb_first(&wnd->start_tree);
@@ -659,7 +660,8 @@ int wnd_init(struct wnd_bitmap *wnd, struct super_block *sb, size_t nbits)
                wnd->bits_last = wbits;
 
        wnd->free_bits =
-               kcalloc(wnd->nwnd, sizeof(u16), GFP_NOFS | __GFP_NOWARN);
+               kvmalloc_array(wnd->nwnd, sizeof(u16), GFP_KERNEL | __GFP_ZERO);
+
        if (!wnd->free_bits)
                return -ENOMEM;
 
index 063a665..ec0566b 100644 (file)
@@ -309,7 +309,11 @@ static inline int ntfs_filldir(struct ntfs_sb_info *sbi, struct ntfs_inode *ni,
                return 0;
        }
 
-       dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
+       /* NTFS: symlinks are "dir + reparse" or "file + reparse" */
+       if (fname->dup.fa & FILE_ATTRIBUTE_REPARSE_POINT)
+               dt_type = DT_LNK;
+       else
+               dt_type = (fname->dup.fa & FILE_ATTRIBUTE_DIRECTORY) ? DT_DIR : DT_REG;
 
        return !dir_emit(ctx, (s8 *)name, name_len, ino, dt_type);
 }
index 962f12c..1f7a194 100644 (file)
@@ -745,8 +745,8 @@ static ssize_t ntfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 }
 
 static ssize_t ntfs_file_splice_read(struct file *in, loff_t *ppos,
-                                    struct pipe_inode_info *pipe,
-                                    size_t len, unsigned int flags)
+                                    struct pipe_inode_info *pipe, size_t len,
+                                    unsigned int flags)
 {
        struct inode *inode = in->f_mapping->host;
        struct ntfs_inode *ni = ntfs_i(inode);
index 2b85cb1..dad976a 100644 (file)
@@ -2148,7 +2148,7 @@ out1:
 
        for (i = 0; i < pages_per_frame; i++) {
                pg = pages[i];
-               if (i == idx)
+               if (i == idx || !pg)
                        continue;
                unlock_page(pg);
                put_page(pg);
@@ -3208,6 +3208,12 @@ static bool ni_update_parent(struct ntfs_inode *ni, struct NTFS_DUP_INFO *dup,
                if (!fname || !memcmp(&fname->dup, dup, sizeof(fname->dup)))
                        continue;
 
+               /* Check simple case when parent inode equals current inode. */
+               if (ino_get(&fname->home) == ni->vfs_inode.i_ino) {
+                       ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+                       continue;
+               }
+
                /* ntfs_iget5 may sleep. */
                dir = ntfs_iget5(sb, &fname->home, NULL);
                if (IS_ERR(dir)) {
index 12f28cd..98ccb66 100644 (file)
@@ -2168,8 +2168,10 @@ file_is_valid:
 
                        if (!page) {
                                page = kmalloc(log->page_size, GFP_NOFS);
-                               if (!page)
-                                       return -ENOMEM;
+                               if (!page) {
+                                       err = -ENOMEM;
+                                       goto out;
+                               }
                        }
 
                        /*
index 33afee0..fbfe21d 100644 (file)
@@ -983,18 +983,11 @@ out:
        if (err)
                return err;
 
-       mark_inode_dirty(&ni->vfs_inode);
+       mark_inode_dirty_sync(&ni->vfs_inode);
        /* verify(!ntfs_update_mftmirr()); */
 
-       /*
-        * If we used wait=1, sync_inode_metadata waits for the io for the
-        * inode to finish. It hangs when media is removed.
-        * So wait=0 is sent down to sync_inode_metadata
-        * and filemap_fdatawrite is used for the data blocks.
-        */
-       err = sync_inode_metadata(&ni->vfs_inode, 0);
-       if (!err)
-               err = filemap_fdatawrite(ni->vfs_inode.i_mapping);
+       /* write mft record on disk. */
+       err = _ni_write_inode(&ni->vfs_inode, 1);
 
        return err;
 }
@@ -2461,10 +2454,12 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
 {
        CLST end, i, zone_len, zlen;
        struct wnd_bitmap *wnd = &sbi->used.bitmap;
+       bool dirty = false;
 
        down_write_nested(&wnd->rw_lock, BITMAP_MUTEX_CLUSTERS);
        if (!wnd_is_used(wnd, lcn, len)) {
-               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
+               /* mark volume as dirty out of wnd->rw_lock */
+               dirty = true;
 
                end = lcn + len;
                len = 0;
@@ -2518,6 +2513,8 @@ void mark_as_free_ex(struct ntfs_sb_info *sbi, CLST lcn, CLST len, bool trim)
 
 out:
        up_write(&wnd->rw_lock);
+       if (dirty)
+               ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
 }
 
 /*
index 124c6e8..cf92b24 100644 (file)
@@ -729,6 +729,9 @@ static struct NTFS_DE *hdr_find_e(const struct ntfs_index *indx,
        u32 total = le32_to_cpu(hdr->total);
        u16 offs[128];
 
+       if (unlikely(!cmp))
+               return NULL;
+
 fill_table:
        if (end > total)
                return NULL;
index eb2ed07..d6d021e 100644 (file)
@@ -170,8 +170,8 @@ next_attr:
                nt2kernel(std5->cr_time, &ni->i_crtime);
 #endif
                nt2kernel(std5->a_time, &inode->i_atime);
-               ctime = inode_get_ctime(inode);
                nt2kernel(std5->c_time, &ctime);
+               inode_set_ctime_to_ts(inode, ctime);
                nt2kernel(std5->m_time, &inode->i_mtime);
 
                ni->std_fa = std5->fa;
@@ -1660,7 +1660,8 @@ struct inode *ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
        d_instantiate(dentry, inode);
 
        /* Set original time. inode times (i_ctime) may be changed in ntfs_init_acl. */
-       inode->i_atime = inode->i_mtime = inode_set_ctime_to_ts(inode, ni->i_crtime);
+       inode->i_atime = inode->i_mtime =
+               inode_set_ctime_to_ts(inode, ni->i_crtime);
        dir->i_mtime = inode_set_ctime_to_ts(dir, ni->i_crtime);
 
        mark_inode_dirty(dir);
index ad430d5..eedacf9 100644 (file)
@@ -156,8 +156,8 @@ static int ntfs_link(struct dentry *ode, struct inode *dir, struct dentry *de)
        err = ntfs_link_inode(inode, de);
 
        if (!err) {
-               dir->i_mtime = inode_set_ctime_to_ts(inode,
-                                                    inode_set_ctime_current(dir));
+               dir->i_mtime = inode_set_ctime_to_ts(
+                       inode, inode_set_ctime_current(dir));
                mark_inode_dirty(inode);
                mark_inode_dirty(dir);
                d_instantiate(de, inode);
@@ -373,7 +373,7 @@ static int ntfs_atomic_open(struct inode *dir, struct dentry *dentry,
 
 #ifdef CONFIG_NTFS3_FS_POSIX_ACL
        if (IS_POSIXACL(dir)) {
-               /* 
+               /*
                 * Load in cache current acl to avoid ni_lock(dir):
                 * ntfs_create_inode -> ntfs_init_acl -> posix_acl_create ->
                 * ntfs_get_acl -> ntfs_get_acl_ex -> ni_lock
index 98b76d1..86aecbb 100644 (file)
@@ -847,7 +847,7 @@ struct OBJECT_ID {
        // Birth Volume Id is the Object Id of the Volume on.
        // which the Object Id was allocated. It never changes.
        struct GUID BirthVolumeId; //0x10:
-       
+
        // Birth Object Id is the first Object Id that was
        // ever assigned to this MFT Record. I.e. If the Object Id
        // is changed for some reason, this field will reflect the
index 629403e..0e6a277 100644 (file)
@@ -42,9 +42,11 @@ enum utf16_endian;
 #define MINUS_ONE_T                    ((size_t)(-1))
 /* Biggest MFT / smallest cluster */
 #define MAXIMUM_BYTES_PER_MFT          4096
+#define MAXIMUM_SHIFT_BYTES_PER_MFT    12
 #define NTFS_BLOCKS_PER_MFT_RECORD     (MAXIMUM_BYTES_PER_MFT / 512)
 
 #define MAXIMUM_BYTES_PER_INDEX                4096
+#define MAXIMUM_SHIFT_BYTES_PER_INDEX  12
 #define NTFS_BLOCKS_PER_INODE          (MAXIMUM_BYTES_PER_INDEX / 512)
 
 /* NTFS specific error code when fixup failed. */
@@ -495,8 +497,6 @@ int ntfs_getattr(struct mnt_idmap *idmap, const struct path *path,
                 struct kstat *stat, u32 request_mask, u32 flags);
 int ntfs3_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
                  struct iattr *attr);
-void ntfs_sparse_cluster(struct inode *inode, struct page *page0, CLST vcn,
-                        CLST len);
 int ntfs_file_open(struct inode *inode, struct file *file);
 int ntfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                __u64 start, __u64 len);
index c12ebff..53629b1 100644 (file)
@@ -189,12 +189,19 @@ out:
        return err;
 }
 
+/*
+ * mi_enum_attr - start/continue attributes enumeration in record.
+ *
+ * NOTE: mi->mrec - memory of size sbi->record_size
+ * here we sure that mi->mrec->total == sbi->record_size (see mi_read)
+ */
 struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
 {
        const struct MFT_REC *rec = mi->mrec;
        u32 used = le32_to_cpu(rec->used);
-       u32 t32, off, asize;
+       u32 t32, off, asize, prev_type;
        u16 t16;
+       u64 data_size, alloc_size, tot_size;
 
        if (!attr) {
                u32 total = le32_to_cpu(rec->total);
@@ -213,6 +220,7 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
                if (!is_rec_inuse(rec))
                        return NULL;
 
+               prev_type = 0;
                attr = Add2Ptr(rec, off);
        } else {
                /* Check if input attr inside record. */
@@ -226,11 +234,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
                        return NULL;
                }
 
-               if (off + asize < off) {
-                       /* Overflow check. */
+               /* Overflow check. */
+               if (off + asize < off)
                        return NULL;
-               }
 
+               prev_type = le32_to_cpu(attr->type);
                attr = Add2Ptr(attr, asize);
                off += asize;
        }
@@ -250,7 +258,11 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
 
        /* 0x100 is last known attribute for now. */
        t32 = le32_to_cpu(attr->type);
-       if ((t32 & 0xf) || (t32 > 0x100))
+       if (!t32 || (t32 & 0xf) || (t32 > 0x100))
+               return NULL;
+
+       /* attributes in record must be ordered by type */
+       if (t32 < prev_type)
                return NULL;
 
        /* Check overflow and boundary. */
@@ -259,16 +271,15 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
 
        /* Check size of attribute. */
        if (!attr->non_res) {
+               /* Check resident fields. */
                if (asize < SIZEOF_RESIDENT)
                        return NULL;
 
                t16 = le16_to_cpu(attr->res.data_off);
-
                if (t16 > asize)
                        return NULL;
 
-               t32 = le32_to_cpu(attr->res.data_size);
-               if (t16 + t32 > asize)
+               if (t16 + le32_to_cpu(attr->res.data_size) > asize)
                        return NULL;
 
                t32 = sizeof(short) * attr->name_len;
@@ -278,21 +289,52 @@ struct ATTRIB *mi_enum_attr(struct mft_inode *mi, struct ATTRIB *attr)
                return attr;
        }
 
-       /* Check some nonresident fields. */
-       if (attr->name_len &&
-           le16_to_cpu(attr->name_off) + sizeof(short) * attr->name_len >
-                   le16_to_cpu(attr->nres.run_off)) {
+       /* Check nonresident fields. */
+       if (attr->non_res != 1)
+               return NULL;
+
+       t16 = le16_to_cpu(attr->nres.run_off);
+       if (t16 > asize)
+               return NULL;
+
+       t32 = sizeof(short) * attr->name_len;
+       if (t32 && le16_to_cpu(attr->name_off) + t32 > t16)
+               return NULL;
+
+       /* Check start/end vcn. */
+       if (le64_to_cpu(attr->nres.svcn) > le64_to_cpu(attr->nres.evcn) + 1)
                return NULL;
-       }
 
-       if (attr->nres.svcn || !is_attr_ext(attr)) {
+       data_size = le64_to_cpu(attr->nres.data_size);
+       if (le64_to_cpu(attr->nres.valid_size) > data_size)
+               return NULL;
+
+       alloc_size = le64_to_cpu(attr->nres.alloc_size);
+       if (data_size > alloc_size)
+               return NULL;
+
+       t32 = mi->sbi->cluster_mask;
+       if (alloc_size & t32)
+               return NULL;
+
+       if (!attr->nres.svcn && is_attr_ext(attr)) {
+               /* First segment of sparse/compressed attribute */
+               if (asize + 8 < SIZEOF_NONRESIDENT_EX)
+                       return NULL;
+
+               tot_size = le64_to_cpu(attr->nres.total_size);
+               if (tot_size & t32)
+                       return NULL;
+
+               if (tot_size > alloc_size)
+                       return NULL;
+       } else {
                if (asize + 8 < SIZEOF_NONRESIDENT)
                        return NULL;
 
                if (attr->nres.c_unit)
                        return NULL;
-       } else if (asize + 8 < SIZEOF_NONRESIDENT_EX)
-               return NULL;
+       }
 
        return attr;
 }
index 5661a36..f763e32 100644 (file)
@@ -453,15 +453,23 @@ static struct proc_dir_entry *proc_info_root;
  * ntfs3.1
  * cluster size
  * number of clusters
+ * total number of mft records
+ * number of used mft records ~= number of files + folders
+ * real state of ntfs "dirty"/"clean"
+ * current state of ntfs "dirty"/"clean"
 */
 static int ntfs3_volinfo(struct seq_file *m, void *o)
 {
        struct super_block *sb = m->private;
        struct ntfs_sb_info *sbi = sb->s_fs_info;
 
-       seq_printf(m, "ntfs%d.%d\n%u\n%zu\n", sbi->volume.major_ver,
-                  sbi->volume.minor_ver, sbi->cluster_size,
-                  sbi->used.bitmap.nbits);
+       seq_printf(m, "ntfs%d.%d\n%u\n%zu\n\%zu\n%zu\n%s\n%s\n",
+                  sbi->volume.major_ver, sbi->volume.minor_ver,
+                  sbi->cluster_size, sbi->used.bitmap.nbits,
+                  sbi->mft.bitmap.nbits,
+                  sbi->mft.bitmap.nbits - wnd_zeroes(&sbi->mft.bitmap),
+                  sbi->volume.real_dirty ? "dirty" : "clean",
+                  (sbi->volume.flags & VOLUME_FLAG_DIRTY) ? "dirty" : "clean");
 
        return 0;
 }
@@ -488,9 +496,13 @@ static ssize_t ntfs3_label_write(struct file *file, const char __user *buffer,
 {
        int err;
        struct super_block *sb = pde_data(file_inode(file));
-       struct ntfs_sb_info *sbi = sb->s_fs_info;
        ssize_t ret = count;
-       u8 *label = kmalloc(count, GFP_NOFS);
+       u8 *label;
+
+       if (sb_rdonly(sb))
+               return -EROFS;
+
+       label = kmalloc(count, GFP_NOFS);
 
        if (!label)
                return -ENOMEM;
@@ -502,7 +514,7 @@ static ssize_t ntfs3_label_write(struct file *file, const char __user *buffer,
        while (ret > 0 && label[ret - 1] == '\n')
                ret -= 1;
 
-       err = ntfs_set_label(sbi, label, ret);
+       err = ntfs_set_label(sb->s_fs_info, label, ret);
 
        if (err < 0) {
                ntfs_err(sb, "failed (%d) to write label", err);
@@ -576,20 +588,30 @@ static noinline void ntfs3_put_sbi(struct ntfs_sb_info *sbi)
        wnd_close(&sbi->mft.bitmap);
        wnd_close(&sbi->used.bitmap);
 
-       if (sbi->mft.ni)
+       if (sbi->mft.ni) {
                iput(&sbi->mft.ni->vfs_inode);
+               sbi->mft.ni = NULL;
+       }
 
-       if (sbi->security.ni)
+       if (sbi->security.ni) {
                iput(&sbi->security.ni->vfs_inode);
+               sbi->security.ni = NULL;
+       }
 
-       if (sbi->reparse.ni)
+       if (sbi->reparse.ni) {
                iput(&sbi->reparse.ni->vfs_inode);
+               sbi->reparse.ni = NULL;
+       }
 
-       if (sbi->objid.ni)
+       if (sbi->objid.ni) {
                iput(&sbi->objid.ni->vfs_inode);
+               sbi->objid.ni = NULL;
+       }
 
-       if (sbi->volume.ni)
+       if (sbi->volume.ni) {
                iput(&sbi->volume.ni->vfs_inode);
+               sbi->volume.ni = NULL;
+       }
 
        ntfs_update_mftmirr(sbi, 0);
 
@@ -836,7 +858,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
        struct ntfs_sb_info *sbi = sb->s_fs_info;
        int err;
        u32 mb, gb, boot_sector_size, sct_per_clst, record_size;
-       u64 sectors, clusters, mlcn, mlcn2;
+       u64 sectors, clusters, mlcn, mlcn2, dev_size0;
        struct NTFS_BOOT *boot;
        struct buffer_head *bh;
        struct MFT_REC *rec;
@@ -845,6 +867,9 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
        u32 boot_off = 0;
        const char *hint = "Primary boot";
 
+       /* Save original dev_size. Used with alternative boot. */
+       dev_size0 = dev_size;
+
        sbi->volume.blocks = dev_size >> PAGE_SHIFT;
 
        bh = ntfs_bread(sb, 0);
@@ -853,6 +878,11 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
 
 check_boot:
        err = -EINVAL;
+
+       /* Corrupted image; do not read OOB */
+       if (bh->b_size - sizeof(*boot) < boot_off)
+               goto out;
+
        boot = (struct NTFS_BOOT *)Add2Ptr(bh->b_data, boot_off);
 
        if (memcmp(boot->system_id, "NTFS    ", sizeof("NTFS    ") - 1)) {
@@ -899,9 +929,17 @@ check_boot:
                goto out;
        }
 
-       sbi->record_size = record_size =
-               boot->record_size < 0 ? 1 << (-boot->record_size) :
-                                       (u32)boot->record_size << cluster_bits;
+       if (boot->record_size >= 0) {
+               record_size = (u32)boot->record_size << cluster_bits;
+       } else if (-boot->record_size <= MAXIMUM_SHIFT_BYTES_PER_MFT) {
+               record_size = 1u << (-boot->record_size);
+       } else {
+               ntfs_err(sb, "%s: invalid record size %d.", hint,
+                        boot->record_size);
+               goto out;
+       }
+
+       sbi->record_size = record_size;
        sbi->record_bits = blksize_bits(record_size);
        sbi->attr_size_tr = (5 * record_size >> 4); // ~320 bytes
 
@@ -918,9 +956,15 @@ check_boot:
                goto out;
        }
 
-       sbi->index_size = boot->index_size < 0 ?
-                                 1u << (-boot->index_size) :
-                                 (u32)boot->index_size << cluster_bits;
+       if (boot->index_size >= 0) {
+               sbi->index_size = (u32)boot->index_size << cluster_bits;
+       } else if (-boot->index_size <= MAXIMUM_SHIFT_BYTES_PER_INDEX) {
+               sbi->index_size = 1u << (-boot->index_size);
+       } else {
+               ntfs_err(sb, "%s: invalid index size %d.", hint,
+                        boot->index_size);
+               goto out;
+       }
 
        /* Check index record size. */
        if (sbi->index_size < SECTOR_SIZE || !is_power_of_2(sbi->index_size)) {
@@ -1055,17 +1099,17 @@ check_boot:
 
        if (bh->b_blocknr && !sb_rdonly(sb)) {
                /*
-            * Alternative boot is ok but primary is not ok.
-            * Do not update primary boot here 'cause it may be faked boot.
-            * Let ntfs to be mounted and update boot later.
-            */
+                * Alternative boot is ok but primary is not ok.
+                * Do not update primary boot here 'cause it may be faked boot.
+                * Let ntfs to be mounted and update boot later.
+                */
                *boot2 = kmemdup(boot, sizeof(*boot), GFP_NOFS | __GFP_NOWARN);
        }
 
 out:
-       if (err == -EINVAL && !bh->b_blocknr && dev_size > PAGE_SHIFT) {
+       if (err == -EINVAL && !bh->b_blocknr && dev_size0 > PAGE_SHIFT) {
                u32 block_size = min_t(u32, sector_size, PAGE_SIZE);
-               u64 lbo = dev_size - sizeof(*boot);
+               u64 lbo = dev_size0 - sizeof(*boot);
 
                /*
                 * Try alternative boot (last sector)
@@ -1079,6 +1123,7 @@ out:
 
                boot_off = lbo & (block_size - 1);
                hint = "Alternative boot";
+               dev_size = dev_size0; /* restore original size. */
                goto check_boot;
        }
        brelse(bh);
@@ -1367,7 +1412,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
        }
 
        bytes = inode->i_size;
-       sbi->def_table = t = kmalloc(bytes, GFP_NOFS | __GFP_NOWARN);
+       sbi->def_table = t = kvmalloc(bytes, GFP_KERNEL);
        if (!t) {
                err = -ENOMEM;
                goto put_inode_out;
@@ -1521,9 +1566,9 @@ load_root:
 
        if (boot2) {
                /*
-            * Alternative boot is ok but primary is not ok.
-            * Volume is recognized as NTFS. Update primary boot.
-            */
+                * Alternative boot is ok but primary is not ok.
+                * Volume is recognized as NTFS. Update primary boot.
+                */
                struct buffer_head *bh0 = sb_getblk(sb, 0);
                if (bh0) {
                        if (buffer_locked(bh0))
@@ -1564,6 +1609,7 @@ put_inode_out:
 out:
        ntfs3_put_sbi(sbi);
        kfree(boot2);
+       ntfs3_put_sbi(sbi);
        return err;
 }
 
@@ -1757,7 +1803,6 @@ static int __init init_ntfs_fs(void)
        if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS))
                pr_info("ntfs3: Read-only LZX/Xpress compression included\n");
 
-
 #ifdef CONFIG_PROC_FS
        /* Create "/proc/fs/ntfs3" */
        proc_info_root = proc_mkdir("fs/ntfs3", NULL);
@@ -1799,7 +1844,6 @@ static void __exit exit_ntfs_fs(void)
        if (proc_info_root)
                remove_proc_entry("fs/ntfs3", NULL);
 #endif
-
 }
 
 MODULE_LICENSE("GPL");
index 29fd391..4920548 100644 (file)
@@ -211,7 +211,8 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
        size = le32_to_cpu(info->size);
 
        /* Enumerate all xattrs. */
-       for (ret = 0, off = 0; off < size; off += ea_size) {
+       ret = 0;
+       for (off = 0; off + sizeof(struct EA_FULL) < size; off += ea_size) {
                ea = Add2Ptr(ea_all, off);
                ea_size = unpacked_ea_size(ea);
 
@@ -219,6 +220,10 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer,
                        break;
 
                if (buffer) {
+                       /* Check if we can use field ea->name */
+                       if (off + ea_size > size)
+                               break;
+
                        if (ret + ea->name_len + 1 > bytes_per_buffer) {
                                err = -ERANGE;
                                goto out;
index 95b7515..f6ff23f 100644 (file)
@@ -157,6 +157,34 @@ const struct fs_parameter_spec ovl_parameter_spec[] = {
        {}
 };
 
+static char *ovl_next_opt(char **s)
+{
+       char *sbegin = *s;
+       char *p;
+
+       if (sbegin == NULL)
+               return NULL;
+
+       for (p = sbegin; *p; p++) {
+               if (*p == '\\') {
+                       p++;
+                       if (!*p)
+                               break;
+               } else if (*p == ',') {
+                       *p = '\0';
+                       *s = p + 1;
+                       return sbegin;
+               }
+       }
+       *s = NULL;
+       return sbegin;
+}
+
+static int ovl_parse_monolithic(struct fs_context *fc, void *data)
+{
+       return vfs_parse_monolithic_sep(fc, data, ovl_next_opt);
+}
+
 static ssize_t ovl_parse_param_split_lowerdirs(char *str)
 {
        ssize_t nr_layers = 1, nr_colons = 0;
@@ -164,7 +192,8 @@ static ssize_t ovl_parse_param_split_lowerdirs(char *str)
 
        for (s = d = str;; s++, d++) {
                if (*s == '\\') {
-                       s++;
+                       /* keep esc chars in split lowerdir */
+                       *d++ = *s++;
                } else if (*s == ':') {
                        bool next_colon = (*(s + 1) == ':');
 
@@ -239,7 +268,7 @@ static void ovl_unescape(char *s)
        }
 }
 
-static int ovl_mount_dir(const char *name, struct path *path)
+static int ovl_mount_dir(const char *name, struct path *path, bool upper)
 {
        int err = -ENOMEM;
        char *tmp = kstrdup(name, GFP_KERNEL);
@@ -248,7 +277,7 @@ static int ovl_mount_dir(const char *name, struct path *path)
                ovl_unescape(tmp);
                err = ovl_mount_dir_noesc(tmp, path);
 
-               if (!err && path->dentry->d_flags & DCACHE_OP_REAL) {
+               if (!err && upper && path->dentry->d_flags & DCACHE_OP_REAL) {
                        pr_err("filesystem on '%s' not supported as upperdir\n",
                               tmp);
                        path_put_init(path);
@@ -269,7 +298,7 @@ static int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
        struct path path;
        char *dup;
 
-       err = ovl_mount_dir(name, &path);
+       err = ovl_mount_dir(name, &path, true);
        if (err)
                return err;
 
@@ -321,12 +350,6 @@ static void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
  *     Set "/lower1", "/lower2", and "/lower3" as lower layers and
  *     "/data1" and "/data2" as data lower layers. Any existing lower
  *     layers are replaced.
- * (2) lowerdir=:/lower4
- *     Append "/lower4" to current stack of lower layers. This requires
- *     that there already is at least one lower layer configured.
- * (3) lowerdir=::/lower5
- *     Append data "/lower5" as data lower layer. This requires that
- *     there's at least one regular lower layer present.
  */
 static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
 {
@@ -348,49 +371,9 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
                return 0;
        }
 
-       if (strncmp(name, "::", 2) == 0) {
-               /*
-                * This is a data layer.
-                * There must be at least one regular lower layer
-                * specified.
-                */
-               if (ctx->nr == 0) {
-                       pr_err("data lower layers without regular lower layers not allowed");
-                       return -EINVAL;
-               }
-
-               /* Skip the leading "::". */
-               name += 2;
-               data_layer = true;
-               /*
-                * A data layer is automatically an append as there
-                * must've been at least one regular lower layer.
-                */
-               append = true;
-       } else if (*name == ':') {
-               /*
-                * This is a regular lower layer.
-                * If users want to append a layer enforce that they
-                * have already specified a first layer before. It's
-                * better to be strict.
-                */
-               if (ctx->nr == 0) {
-                       pr_err("cannot append layer if no previous layer has been specified");
-                       return -EINVAL;
-               }
-
-               /*
-                * Once a sequence of data layers has started regular
-                * lower layers are forbidden.
-                */
-               if (ctx->nr_data > 0) {
-                       pr_err("regular lower layers cannot follow data lower layers");
-                       return -EINVAL;
-               }
-
-               /* Skip the leading ":". */
-               name++;
-               append = true;
+       if (*name == ':') {
+               pr_err("cannot append lower layer");
+               return -EINVAL;
        }
 
        dup = kstrdup(name, GFP_KERNEL);
@@ -472,7 +455,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
                l = &ctx->lower[nr];
                memset(l, 0, sizeof(*l));
 
-               err = ovl_mount_dir_noesc(dup_iter, &l->path);
+               err = ovl_mount_dir(dup_iter, &l->path, false);
                if (err)
                        goto out_put;
 
@@ -682,6 +665,7 @@ static int ovl_reconfigure(struct fs_context *fc)
 }
 
 static const struct fs_context_operations ovl_context_ops = {
+       .parse_monolithic = ovl_parse_monolithic,
        .parse_param = ovl_parse_param,
        .get_tree    = ovl_get_tree,
        .reconfigure = ovl_reconfigure,
@@ -950,16 +934,23 @@ int ovl_show_options(struct seq_file *m, struct dentry *dentry)
        struct super_block *sb = dentry->d_sb;
        struct ovl_fs *ofs = OVL_FS(sb);
        size_t nr, nr_merged_lower = ofs->numlayer - ofs->numdatalayer;
-       char **lowerdatadirs = &ofs->config.lowerdirs[nr_merged_lower];
-
-       /* lowerdirs[] starts from offset 1 */
-       seq_printf(m, ",lowerdir=%s", ofs->config.lowerdirs[1]);
-       /* dump regular lower layers */
-       for (nr = 2; nr < nr_merged_lower; nr++)
-               seq_printf(m, ":%s", ofs->config.lowerdirs[nr]);
-       /* dump data lower layers */
-       for (nr = 0; nr < ofs->numdatalayer; nr++)
-               seq_printf(m, "::%s", lowerdatadirs[nr]);
+
+       /*
+        * lowerdirs[] starts from offset 1, then
+        * >= 0 regular lower layers prefixed with : and
+        * >= 0 data-only lower layers prefixed with ::
+        *
+        * we need to escase comma and space like seq_show_option() does and
+        * we also need to escape the colon separator from lowerdir paths.
+        */
+       seq_puts(m, ",lowerdir=");
+       for (nr = 1; nr < ofs->numlayer; nr++) {
+               if (nr > 1)
+                       seq_putc(m, ':');
+               if (nr >= nr_merged_lower)
+                       seq_putc(m, ':');
+               seq_escape(m, ofs->config.lowerdirs[nr], ":, \t\n\\");
+       }
        if (ofs->config.upperdir) {
                seq_show_option(m, "upperdir", ofs->config.upperdir);
                seq_show_option(m, "workdir", ofs->config.workdir);
index 9e72bfe..31e897a 100644 (file)
@@ -233,19 +233,18 @@ static void put_quota_format(struct quota_format_type *fmt)
  * All dquots are placed to the end of inuse_list when first created, and this
  * list is used for invalidate operation, which must look at every dquot.
  *
- * When the last reference of a dquot will be dropped, the dquot will be
- * added to releasing_dquots. We'd then queue work item which would call
+ * When the last reference of a dquot is dropped, the dquot is added to
+ * releasing_dquots. We'll then queue work item which will call
  * synchronize_srcu() and after that perform the final cleanup of all the
- * dquots on the list. Both releasing_dquots and free_dquots use the
- * dq_free list_head in the dquot struct. When a dquot is removed from
- * releasing_dquots, a reference count is always subtracted, and if
- * dq_count == 0 at that point, the dquot will be added to the free_dquots.
+ * dquots on the list. Each cleaned up dquot is moved to free_dquots list.
+ * Both releasing_dquots and free_dquots use the dq_free list_head in the dquot
+ * struct.
  *
- * Unused dquots (dq_count == 0) are added to the free_dquots list when freed,
- * and this list is searched whenever we need an available dquot.  Dquots are
- * removed from the list as soon as they are used again, and
- * dqstats.free_dquots gives the number of dquots on the list. When
- * dquot is invalidated it's completely released from memory.
+ * Unused and cleaned up dquots are in the free_dquots list and this list is
+ * searched whenever we need an available dquot. Dquots are removed from the
+ * list as soon as they are used again and dqstats.free_dquots gives the number
+ * of dquots on the list. When dquot is invalidated it's completely released
+ * from memory.
  *
  * Dirty dquots are added to the dqi_dirty_list of quota_info when mark
  * dirtied, and this list is searched when writing dirty dquots back to
@@ -321,6 +320,7 @@ static inline void put_dquot_last(struct dquot *dquot)
 static inline void put_releasing_dquots(struct dquot *dquot)
 {
        list_add_tail(&dquot->dq_free, &releasing_dquots);
+       set_bit(DQ_RELEASING_B, &dquot->dq_flags);
 }
 
 static inline void remove_free_dquot(struct dquot *dquot)
@@ -328,8 +328,10 @@ static inline void remove_free_dquot(struct dquot *dquot)
        if (list_empty(&dquot->dq_free))
                return;
        list_del_init(&dquot->dq_free);
-       if (!atomic_read(&dquot->dq_count))
+       if (!test_bit(DQ_RELEASING_B, &dquot->dq_flags))
                dqstats_dec(DQST_FREE_DQUOTS);
+       else
+               clear_bit(DQ_RELEASING_B, &dquot->dq_flags);
 }
 
 static inline void put_inuse(struct dquot *dquot)
@@ -581,12 +583,6 @@ restart:
                        continue;
                /* Wait for dquot users */
                if (atomic_read(&dquot->dq_count)) {
-                       /* dquot in releasing_dquots, flush and retry */
-                       if (!list_empty(&dquot->dq_free)) {
-                               spin_unlock(&dq_list_lock);
-                               goto restart;
-                       }
-
                        atomic_inc(&dquot->dq_count);
                        spin_unlock(&dq_list_lock);
                        /*
@@ -606,6 +602,15 @@ restart:
                        goto restart;
                }
                /*
+                * The last user already dropped its reference but dquot didn't
+                * get fully cleaned up yet. Restart the scan which flushes the
+                * work cleaning up released dquots.
+                */
+               if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
+                       spin_unlock(&dq_list_lock);
+                       goto restart;
+               }
+               /*
                 * Quota now has no users and it has been written on last
                 * dqput()
                 */
@@ -696,6 +701,13 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
                                                 dq_dirty);
 
                        WARN_ON(!dquot_active(dquot));
+                       /* If the dquot is releasing we should not touch it */
+                       if (test_bit(DQ_RELEASING_B, &dquot->dq_flags)) {
+                               spin_unlock(&dq_list_lock);
+                               flush_delayed_work(&quota_release_work);
+                               spin_lock(&dq_list_lock);
+                               continue;
+                       }
 
                        /* Now we have active dquot from which someone is
                         * holding reference so we can safely just increase
@@ -809,18 +821,18 @@ static void quota_release_workfn(struct work_struct *work)
        /* Exchange the list head to avoid livelock. */
        list_replace_init(&releasing_dquots, &rls_head);
        spin_unlock(&dq_list_lock);
+       synchronize_srcu(&dquot_srcu);
 
 restart:
-       synchronize_srcu(&dquot_srcu);
        spin_lock(&dq_list_lock);
        while (!list_empty(&rls_head)) {
                dquot = list_first_entry(&rls_head, struct dquot, dq_free);
-               /* Dquot got used again? */
-               if (atomic_read(&dquot->dq_count) > 1) {
-                       remove_free_dquot(dquot);
-                       atomic_dec(&dquot->dq_count);
-                       continue;
-               }
+               WARN_ON_ONCE(atomic_read(&dquot->dq_count));
+               /*
+                * Note that DQ_RELEASING_B protects us from racing with
+                * invalidate_dquots() calls so we are safe to work with the
+                * dquot even after we drop dq_list_lock.
+                */
                if (dquot_dirty(dquot)) {
                        spin_unlock(&dq_list_lock);
                        /* Commit dquot before releasing */
@@ -834,7 +846,6 @@ restart:
                }
                /* Dquot is inactive and clean, now move it to free list */
                remove_free_dquot(dquot);
-               atomic_dec(&dquot->dq_count);
                put_dquot_last(dquot);
        }
        spin_unlock(&dq_list_lock);
@@ -875,6 +886,7 @@ void dqput(struct dquot *dquot)
        BUG_ON(!list_empty(&dquot->dq_free));
 #endif
        put_releasing_dquots(dquot);
+       atomic_dec(&dquot->dq_count);
        spin_unlock(&dq_list_lock);
        queue_delayed_work(system_unbound_wq, &quota_release_work, 1);
 }
@@ -963,7 +975,7 @@ we_slept:
                dqstats_inc(DQST_LOOKUPS);
        }
        /* Wait for dq_lock - after this we know that either dquot_release() is
-        * already finished or it will be canceled due to dq_count > 1 test */
+        * already finished or it will be canceled due to dq_count > 0 test */
        wait_on_dquot(dquot);
        /* Read the dquot / allocate space in quota file */
        if (!dquot_active(dquot)) {
index e2be8ae..fe1bf5b 100644 (file)
@@ -15,6 +15,7 @@
 static struct cached_fid *init_cached_dir(const char *path);
 static void free_cached_dir(struct cached_fid *cfid);
 static void smb2_close_cached_fid(struct kref *ref);
+static void cfids_laundromat_worker(struct work_struct *work);
 
 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
                                                    const char *path,
@@ -169,15 +170,18 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
                return -ENOENT;
        }
        /*
-        * At this point we either have a lease already and we can just
-        * return it. If not we are guaranteed to be the only thread accessing
-        * this cfid.
+        * Return cached fid if it has a lease.  Otherwise, it is either a new
+        * entry or laundromat worker removed it from @cfids->entries.  Caller
+        * will put last reference if the latter.
         */
+       spin_lock(&cfids->cfid_list_lock);
        if (cfid->has_lease) {
+               spin_unlock(&cfids->cfid_list_lock);
                *ret_cfid = cfid;
                kfree(utf16_path);
                return 0;
        }
+       spin_unlock(&cfids->cfid_list_lock);
 
        /*
         * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
@@ -294,9 +298,11 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
                        goto oshr_free;
                }
        }
+       spin_lock(&cfids->cfid_list_lock);
        cfid->dentry = dentry;
        cfid->time = jiffies;
        cfid->has_lease = true;
+       spin_unlock(&cfids->cfid_list_lock);
 
 oshr_free:
        kfree(utf16_path);
@@ -305,24 +311,28 @@ oshr_free:
        free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
        free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
        spin_lock(&cfids->cfid_list_lock);
-       if (rc && !cfid->has_lease) {
-               if (cfid->on_list) {
-                       list_del(&cfid->entry);
-                       cfid->on_list = false;
-                       cfids->num_entries--;
+       if (!cfid->has_lease) {
+               if (rc) {
+                       if (cfid->on_list) {
+                               list_del(&cfid->entry);
+                               cfid->on_list = false;
+                               cfids->num_entries--;
+                       }
+                       rc = -ENOENT;
+               } else {
+                       /*
+                        * We are guaranteed to have two references at this
+                        * point. One for the caller and one for a potential
+                        * lease. Release the Lease-ref so that the directory
+                        * will be closed when the caller closes the cached
+                        * handle.
+                        */
+                       spin_unlock(&cfids->cfid_list_lock);
+                       kref_put(&cfid->refcount, smb2_close_cached_fid);
+                       goto out;
                }
-               rc = -ENOENT;
        }
        spin_unlock(&cfids->cfid_list_lock);
-       if (!rc && !cfid->has_lease) {
-               /*
-                * We are guaranteed to have two references at this point.
-                * One for the caller and one for a potential lease.
-                * Release the Lease-ref so that the directory will be closed
-                * when the caller closes the cached handle.
-                */
-               kref_put(&cfid->refcount, smb2_close_cached_fid);
-       }
        if (rc) {
                if (cfid->is_open)
                        SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
@@ -330,7 +340,7 @@ oshr_free:
                free_cached_dir(cfid);
                cfid = NULL;
        }
-
+out:
        if (rc == 0) {
                *ret_cfid = cfid;
                atomic_inc(&tcon->num_remote_opens);
@@ -572,53 +582,51 @@ static void free_cached_dir(struct cached_fid *cfid)
        kfree(cfid);
 }
 
-static int
-cifs_cfids_laundromat_thread(void *p)
+static void cfids_laundromat_worker(struct work_struct *work)
 {
-       struct cached_fids *cfids = p;
+       struct cached_fids *cfids;
        struct cached_fid *cfid, *q;
-       struct list_head entry;
+       LIST_HEAD(entry);
 
-       while (!kthread_should_stop()) {
-               ssleep(1);
-               INIT_LIST_HEAD(&entry);
-               if (kthread_should_stop())
-                       return 0;
-               spin_lock(&cfids->cfid_list_lock);
-               list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
-                       if (time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
-                               list_del(&cfid->entry);
-                               list_add(&cfid->entry, &entry);
-                               cfids->num_entries--;
-                       }
-               }
-               spin_unlock(&cfids->cfid_list_lock);
+       cfids = container_of(work, struct cached_fids, laundromat_work.work);
 
-               list_for_each_entry_safe(cfid, q, &entry, entry) {
+       spin_lock(&cfids->cfid_list_lock);
+       list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+               if (cfid->time &&
+                   time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
                        cfid->on_list = false;
-                       list_del(&cfid->entry);
+                       list_move(&cfid->entry, &entry);
+                       cfids->num_entries--;
+                       /* To prevent race with smb2_cached_lease_break() */
+                       kref_get(&cfid->refcount);
+               }
+       }
+       spin_unlock(&cfids->cfid_list_lock);
+
+       list_for_each_entry_safe(cfid, q, &entry, entry) {
+               list_del(&cfid->entry);
+               /*
+                * Cancel and wait for the work to finish in case we are racing
+                * with it.
+                */
+               cancel_work_sync(&cfid->lease_break);
+               if (cfid->has_lease) {
                        /*
-                        * Cancel, and wait for the work to finish in
-                        * case we are racing with it.
+                        * Our lease has not yet been cancelled from the server
+                        * so we need to drop the reference.
                         */
-                       cancel_work_sync(&cfid->lease_break);
-                       if (cfid->has_lease) {
-                               /*
-                                * We lease has not yet been cancelled from
-                                * the server so we need to drop the reference.
-                                */
-                               spin_lock(&cfids->cfid_list_lock);
-                               cfid->has_lease = false;
-                               spin_unlock(&cfids->cfid_list_lock);
-                               kref_put(&cfid->refcount, smb2_close_cached_fid);
-                       }
+                       spin_lock(&cfids->cfid_list_lock);
+                       cfid->has_lease = false;
+                       spin_unlock(&cfids->cfid_list_lock);
+                       kref_put(&cfid->refcount, smb2_close_cached_fid);
                }
+               /* Drop the extra reference opened above */
+               kref_put(&cfid->refcount, smb2_close_cached_fid);
        }
-
-       return 0;
+       queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
+                          dir_cache_timeout * HZ);
 }
 
-
 struct cached_fids *init_cached_dirs(void)
 {
        struct cached_fids *cfids;
@@ -629,19 +637,10 @@ struct cached_fids *init_cached_dirs(void)
        spin_lock_init(&cfids->cfid_list_lock);
        INIT_LIST_HEAD(&cfids->entries);
 
-       /*
-        * since we're in a cifs function already, we know that
-        * this will succeed. No need for try_module_get().
-        */
-       __module_get(THIS_MODULE);
-       cfids->laundromat = kthread_run(cifs_cfids_laundromat_thread,
-                                 cfids, "cifsd-cfid-laundromat");
-       if (IS_ERR(cfids->laundromat)) {
-               cifs_dbg(VFS, "Failed to start cfids laundromat thread.\n");
-               kfree(cfids);
-               module_put(THIS_MODULE);
-               return NULL;
-       }
+       INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
+       queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
+                          dir_cache_timeout * HZ);
+
        return cfids;
 }
 
@@ -657,11 +656,7 @@ void free_cached_dirs(struct cached_fids *cfids)
        if (cfids == NULL)
                return;
 
-       if (cfids->laundromat) {
-               kthread_stop(cfids->laundromat);
-               cfids->laundromat = NULL;
-               module_put(THIS_MODULE);
-       }
+       cancel_delayed_work_sync(&cfids->laundromat_work);
 
        spin_lock(&cfids->cfid_list_lock);
        list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
index a82ff2c..81ba0fd 100644 (file)
@@ -57,7 +57,7 @@ struct cached_fids {
        spinlock_t cfid_list_lock;
        int num_entries;
        struct list_head entries;
-       struct task_struct *laundromat;
+       struct delayed_work laundromat_work;
 };
 
 extern struct cached_fids *init_cached_dirs(void);
index 898860a..93262ca 100644 (file)
@@ -231,11 +231,12 @@ void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err)
 {
        struct smb2_hdr *rsp_hdr;
 
-       if (work->next_smb2_rcv_hdr_off)
-               rsp_hdr = ksmbd_resp_buf_next(work);
-       else
-               rsp_hdr = smb2_get_msg(work->response_buf);
+       rsp_hdr = smb2_get_msg(work->response_buf);
        rsp_hdr->Status = err;
+
+       work->iov_idx = 0;
+       work->iov_cnt = 0;
+       work->next_smb2_rcv_hdr_off = 0;
        smb2_set_err_rsp(work);
 }
 
@@ -6151,12 +6152,12 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
                memcpy(aux_payload_buf, rpc_resp->payload, rpc_resp->payload_sz);
 
                nbytes = rpc_resp->payload_sz;
-               kvfree(rpc_resp);
                err = ksmbd_iov_pin_rsp_read(work, (void *)rsp,
                                             offsetof(struct smb2_read_rsp, Buffer),
                                             aux_payload_buf, nbytes);
                if (err)
                        goto out;
+               kvfree(rpc_resp);
        } else {
                err = ksmbd_iov_pin_rsp(work, (void *)rsp,
                                        offsetof(struct smb2_read_rsp, Buffer));
index c4b80ab..c91eac6 100644 (file)
@@ -106,7 +106,7 @@ int ksmbd_query_inode_status(struct inode *inode)
        ci = __ksmbd_inode_lookup(inode);
        if (ci) {
                ret = KSMBD_INODE_STATUS_OK;
-               if (ci->m_flags & S_DEL_PENDING)
+               if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
                        ret = KSMBD_INODE_STATUS_PENDING_DELETE;
                atomic_dec(&ci->m_count);
        }
@@ -116,7 +116,7 @@ int ksmbd_query_inode_status(struct inode *inode)
 
 bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
 {
-       return (fp->f_ci->m_flags & S_DEL_PENDING);
+       return (fp->f_ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS));
 }
 
 void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
@@ -603,6 +603,9 @@ err_out:
 void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
                         unsigned int state)
 {
+       if (!fp)
+               return;
+
        write_lock(&ft->lock);
        fp->f_state = state;
        write_unlock(&ft->lock);
index e9cc481..f9f4d69 100644 (file)
@@ -1001,6 +1001,12 @@ xfs_ag_shrink_space(
                error = -ENOSPC;
                goto resv_init_out;
        }
+
+       /* Update perag geometry */
+       pag->block_count -= delta;
+       __xfs_agino_range(pag->pag_mount, pag->block_count, &pag->agino_min,
+                               &pag->agino_max);
+
        xfs_ialloc_log_agi(*tpp, agibp, XFS_AGI_LENGTH);
        xfs_alloc_log_agf(*tpp, agfbp, XFS_AGF_LENGTH);
        return 0;
index d98e8e7..090c3ea 100644 (file)
@@ -10,7 +10,6 @@
 #include "xfs_log_format.h"
 #include "xfs_trans_resv.h"
 #include "xfs_mount.h"
-#include "xfs_format.h"
 #include "scrub/xfile.h"
 #include "scrub/xfarray.h"
 #include "scrub/scrub.h"
index 7468148..9ecfdcd 100644 (file)
@@ -62,7 +62,8 @@ xfs_extent_busy_insert_list(
        rb_link_node(&new->rb_node, parent, rbp);
        rb_insert_color(&new->rb_node, &pag->pagb_tree);
 
-       list_add(&new->list, busy_list);
+       /* always process discard lists in fifo order */
+       list_add_tail(&new->list, busy_list);
        spin_unlock(&pag->pagb_lock);
 }
 
index 1c1e617..2b3b05c 100644 (file)
@@ -584,6 +584,11 @@ xfs_vn_getattr(
                }
        }
 
+       if ((request_mask & STATX_CHANGE_COOKIE) && IS_I_VERSION(inode)) {
+               stat->change_cookie = inode_query_iversion(inode);
+               stat->result_mask |= STATX_CHANGE_COOKIE;
+       }
+
        /*
         * Note: If you add another clause to set an attribute flag, please
         * update attributes_mask below.
index 4a9bbd3..a7daa52 100644 (file)
@@ -126,8 +126,8 @@ xfs_dax_notify_ddev_failure(
                struct xfs_rmap_irec    ri_low = { };
                struct xfs_rmap_irec    ri_high;
                struct xfs_agf          *agf;
-               xfs_agblock_t           agend;
                struct xfs_perag        *pag;
+               xfs_agblock_t           range_agend;
 
                pag = xfs_perag_get(mp, agno);
                error = xfs_alloc_read_agf(pag, tp, 0, &agf_bp);
@@ -148,10 +148,10 @@ xfs_dax_notify_ddev_failure(
                        ri_high.rm_startblock = XFS_FSB_TO_AGBNO(mp, end_fsbno);
 
                agf = agf_bp->b_addr;
-               agend = min(be32_to_cpu(agf->agf_length),
+               range_agend = min(be32_to_cpu(agf->agf_length) - 1,
                                ri_high.rm_startblock);
                notify.startblock = ri_low.rm_startblock;
-               notify.blockcount = agend - ri_low.rm_startblock;
+               notify.blockcount = range_agend + 1 - ri_low.rm_startblock;
 
                error = xfs_rmap_query_range(cur, &ri_low, &ri_high,
                                xfs_dax_failure_fn, &notify);
index 94181fe..3f34ebb 100644 (file)
@@ -465,9 +465,4 @@ extern int acpi_processor_ffh_lpi_probe(unsigned int cpu);
 extern int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi);
 #endif
 
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-extern int arch_register_cpu(int cpu);
-extern void arch_unregister_cpu(int cpu);
-#endif
-
 #endif
index cecd2b7..430f0ae 100644 (file)
@@ -36,6 +36,7 @@ struct ms_hyperv_info {
        u32 nested_features;
        u32 max_vp_index;
        u32 max_lp_index;
+       u8 vtl;
        union {
                u32 isolation_config_a;
                struct {
@@ -54,7 +55,6 @@ struct ms_hyperv_info {
                };
        };
        u64 shared_gpa_boundary;
-       u8 vtl;
 };
 extern struct ms_hyperv_info ms_hyperv;
 extern bool hv_nested;
index f9544d9..ac65f06 100644 (file)
@@ -68,8 +68,7 @@ enum drm_sched_priority {
        DRM_SCHED_PRIORITY_HIGH,
        DRM_SCHED_PRIORITY_KERNEL,
 
-       DRM_SCHED_PRIORITY_COUNT,
-       DRM_SCHED_PRIORITY_UNSET = -2
+       DRM_SCHED_PRIORITY_COUNT
 };
 
 /* Used to chose between FIFO and RR jobs scheduling */
index bb3cb00..e748bc9 100644 (file)
@@ -82,6 +82,8 @@ struct timer_map {
        struct arch_timer_context *emul_ptimer;
 };
 
+void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map);
+
 struct arch_timer_cpu {
        struct arch_timer_context timers[NR_KVM_TIMERS];
 
@@ -145,4 +147,9 @@ u64 timer_get_cval(struct arch_timer_context *ctxt);
 void kvm_timer_cpu_up(void);
 void kvm_timer_cpu_down(void);
 
+static inline bool has_cntpoff(void)
+{
+       return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF));
+}
+
 #endif
index f1b3151..265da00 100644 (file)
@@ -238,7 +238,7 @@ struct css_set {
         * Lists running through all tasks using this cgroup group.
         * mg_tasks lists tasks which belong to this cset but are in the
         * process of being migrated out or in.  Protected by
-        * css_set_rwsem, but, during migration, once tasks are moved to
+        * css_set_lock, but, during migration, once tasks are moved to
         * mg_tasks, it can be read safely while holding cgroup_mutex.
         */
        struct list_head tasks;
index 0abd60a..eb768a8 100644 (file)
@@ -80,6 +80,8 @@ extern __printf(4, 5)
 struct device *cpu_device_create(struct device *parent, void *drvdata,
                                 const struct attribute_group **groups,
                                 const char *fmt, ...);
+extern int arch_register_cpu(int cpu);
+extern void arch_unregister_cpu(int cpu);
 #ifdef CONFIG_HOTPLUG_CPU
 extern void unregister_cpu(struct cpu *cpu);
 extern ssize_t arch_cpu_probe(const char *, size_t);
index 0d678e9..ebe78bd 100644 (file)
@@ -568,6 +568,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence,
        fence->error = error;
 }
 
+/**
+ * dma_fence_timestamp - helper to get the completion timestamp of a fence
+ * @fence: fence to get the timestamp from.
+ *
+ * After a fence is signaled the timestamp is updated with the signaling time,
+ * but setting the timestamp can race with tasks waiting for the signaling. This
+ * helper busy waits for the correct timestamp to appear.
+ */
+static inline ktime_t dma_fence_timestamp(struct dma_fence *fence)
+{
+       if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
+               return ktime_get();
+
+       while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+               cpu_relax();
+
+       return fence->timestamp;
+}
+
 signed long dma_fence_wait_timeout(struct dma_fence *,
                                   bool intr, signed long timeout);
 signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
index b528f06..4a40823 100644 (file)
@@ -2403,7 +2403,7 @@ struct audit_names;
 struct filename {
        const char              *name;  /* pointer to actual string */
        const __user char       *uptr;  /* original userland pointer */
-       int                     refcnt;
+       atomic_t                refcnt;
        struct audit_names      *aname;
        const char              iname[];
 };
index 96332db..c13e99c 100644 (file)
@@ -136,6 +136,8 @@ extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc);
 extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param);
 extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
                               const char *value, size_t v_size);
+int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
+                            char *(*sep)(char **));
 extern int generic_parse_monolithic(struct fs_context *fc, void *data);
 extern int vfs_get_tree(struct fs_context *fc);
 extern void put_fs_context(struct fs_context *fc);
index 1e58931..0b971b2 100644 (file)
@@ -63,7 +63,6 @@ static inline struct mcb_bus *to_mcb_bus(struct device *dev)
 struct mcb_device {
        struct device dev;
        struct mcb_bus *bus;
-       bool is_added;
        struct mcb_driver *driver;
        u16 id;
        int inst;
index 0b6b59f..56047a4 100644 (file)
@@ -21,6 +21,9 @@ struct jedec_ecc_info {
 /* JEDEC features */
 #define JEDEC_FEATURE_16_BIT_BUS       (1 << 0)
 
+/* JEDEC Optional Commands */
+#define JEDEC_OPT_CMD_READ_CACHE       BIT(1)
+
 struct nand_jedec_params {
        /* rev info and features block */
        /* 'J' 'E' 'S' 'D'  */
index a7376f9..55ab2e4 100644 (file)
@@ -55,6 +55,7 @@
 #define ONFI_SUBFEATURE_PARAM_LEN      4
 
 /* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_READ_CACHE                BIT(1)
 #define ONFI_OPT_CMD_SET_GET_FEATURES  BIT(2)
 
 struct nand_onfi_params {
index 90a141b..c29ace1 100644 (file)
@@ -225,6 +225,7 @@ struct gpio_desc;
  * struct nand_parameters - NAND generic parameters from the parameter page
  * @model: Model name
  * @supports_set_get_features: The NAND chip supports setting/getting features
+ * @supports_read_cache: The NAND chip supports read cache operations
  * @set_feature_list: Bitmap of features that can be set
  * @get_feature_list: Bitmap of features that can be get
  * @onfi: ONFI specific parameters
@@ -233,6 +234,7 @@ struct nand_parameters {
        /* Generic parameters */
        const char *model;
        bool supports_set_get_features;
+       bool supports_read_cache;
        DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
        DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
 
index fd692b4..07071e6 100644 (file)
@@ -285,7 +285,9 @@ static inline void dqstats_dec(unsigned int type)
 #define DQ_FAKE_B      3       /* no limits only usage */
 #define DQ_READ_B      4       /* dquot was read into memory */
 #define DQ_ACTIVE_B    5       /* dquot is active (dquot_release not called) */
-#define DQ_LASTSET_B   6       /* Following 6 bits (see QIF_) are reserved\
+#define DQ_RELEASING_B 6       /* dquot is in releasing_dquots list waiting
+                                * to be cleaned up */
+#define DQ_LASTSET_B   7       /* Following 6 bits (see QIF_) are reserved\
                                 * for the mask of entries set via SETQUOTA\
                                 * quotactl. They are set under dq_data_lock\
                                 * and the quota format handling dquot can\
index 11a4bec..4fa4ef0 100644 (file)
@@ -57,7 +57,7 @@ static inline bool dquot_is_busy(struct dquot *dquot)
 {
        if (test_bit(DQ_MOD_B, &dquot->dq_flags))
                return true;
-       if (atomic_read(&dquot->dq_count) > 1)
+       if (atomic_read(&dquot->dq_count) > 0)
                return true;
        return false;
 }
index 4174c4b..97bfef0 100644 (file)
@@ -1309,7 +1309,7 @@ struct sk_buff_fclones {
  *
  * Returns true if skb is a fast clone, and its clone is not freed.
  * Some drivers call skb_orphan() in their ndo_start_xmit(),
- * so we also check that this didnt happen.
+ * so we also check that didn't happen.
  */
 static inline bool skb_fclone_busy(const struct sock *sk,
                                   const struct sk_buff *skb)
@@ -2016,7 +2016,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
  *     Copy shared buffers into a new sk_buff. We effectively do COW on
  *     packets to handle cases where we have a local reader and forward
  *     and a couple of other messy ones. The normal one is tcpdumping
- *     a packet thats being forwarded.
+ *     a packet that's being forwarded.
  */
 
 /**
index 7b4dd69..27cc1d4 100644 (file)
@@ -3,8 +3,8 @@
 #define _LINUX_VIRTIO_NET_H
 
 #include <linux/if_vlan.h>
+#include <linux/udp.h>
 #include <uapi/linux/tcp.h>
-#include <uapi/linux/udp.h>
 #include <uapi/linux/virtio_net.h>
 
 static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
@@ -151,9 +151,22 @@ retry:
                unsigned int nh_off = p_off;
                struct skb_shared_info *shinfo = skb_shinfo(skb);
 
-               /* UFO may not include transport header in gso_size. */
-               if (gso_type & SKB_GSO_UDP)
+               switch (gso_type & ~SKB_GSO_TCP_ECN) {
+               case SKB_GSO_UDP:
+                       /* UFO may not include transport header in gso_size. */
                        nh_off -= thlen;
+                       break;
+               case SKB_GSO_UDP_L4:
+                       if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+                               return -EINVAL;
+                       if (skb->csum_offset != offsetof(struct udphdr, check))
+                               return -EINVAL;
+                       if (skb->len - p_off > gso_size * UDP_MAX_SEGMENTS)
+                               return -EINVAL;
+                       if (gso_type != SKB_GSO_UDP_L4)
+                               return -EINVAL;
+                       break;
+               }
 
                /* Kernel has a special handling for GSO_BY_FRAGS. */
                if (gso_size == GSO_BY_FRAGS)
index 2d5fcda..082f895 100644 (file)
@@ -56,7 +56,7 @@ struct hci_mon_new_index {
        __u8            type;
        __u8            bus;
        bdaddr_t        bdaddr;
-       char            name[8];
+       char            name[8] __nonstring;
 } __packed;
 #define HCI_MON_NEW_INDEX_SIZE 16
 
index 75a6f48..ebf9bc5 100644 (file)
@@ -258,6 +258,7 @@ struct macsec_context {
        struct macsec_secy *secy;
        struct macsec_rx_sc *rx_sc;
        struct {
+               bool update_pn;
                unsigned char assoc_num;
                u8 key[MACSEC_MAX_KEY_LEN];
                union {
index bd7c3be..423b52e 100644 (file)
@@ -50,6 +50,7 @@ struct netns_xfrm {
        struct list_head        policy_all;
        struct hlist_head       *policy_byidx;
        unsigned int            policy_idx_hmask;
+       unsigned int            idx_generator;
        struct hlist_head       policy_inexact[XFRM_POLICY_MAX];
        struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX];
        unsigned int            policy_count[XFRM_POLICY_MAX * 2];
index b770261..92f7ea6 100644 (file)
@@ -336,7 +336,7 @@ struct sk_filter;
   *    @sk_cgrp_data: cgroup data for this cgroup
   *    @sk_memcg: this socket's memory cgroup association
   *    @sk_write_pending: a write to stream socket waits to start
-  *    @sk_wait_pending: number of threads blocked on this socket
+  *    @sk_disconnects: number of disconnect operations performed on this sock
   *    @sk_state_change: callback to indicate change in the state of the sock
   *    @sk_data_ready: callback to indicate there is data to be processed
   *    @sk_write_space: callback to indicate there is bf sending space available
@@ -429,7 +429,7 @@ struct sock {
        unsigned int            sk_napi_id;
 #endif
        int                     sk_rcvbuf;
-       int                     sk_wait_pending;
+       int                     sk_disconnects;
 
        struct sk_filter __rcu  *sk_filter;
        union {
@@ -1189,8 +1189,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
 }
 
 #define sk_wait_event(__sk, __timeo, __condition, __wait)              \
-       ({      int __rc;                                               \
-               __sk->sk_wait_pending++;                                \
+       ({      int __rc, __dis = __sk->sk_disconnects;                 \
                release_sock(__sk);                                     \
                __rc = __condition;                                     \
                if (!__rc) {                                            \
@@ -1200,8 +1199,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
                }                                                       \
                sched_annotate_sleep();                                 \
                lock_sock(__sk);                                        \
-               __sk->sk_wait_pending--;                                \
-               __rc = __condition;                                     \
+               __rc = __dis == __sk->sk_disconnects ? __condition : -EPIPE; \
                __rc;                                                   \
        })
 
index 7b1a720..4b03ca7 100644 (file)
@@ -141,6 +141,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
 #define TCP_RTO_MAX    ((unsigned)(120*HZ))
 #define TCP_RTO_MIN    ((unsigned)(HZ/5))
 #define TCP_TIMEOUT_MIN        (2U) /* Min timeout for TCP timers in jiffies */
+
+#define TCP_TIMEOUT_MIN_US (2*USEC_PER_MSEC) /* Min TCP timeout in microsecs */
+
 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))    /* RFC6298 2.1 initial RTO value        */
 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ))        /* RFC 1122 initial RTO value, now
                                                 * used as a fallback RTO for the
index d2faec9..433543e 100644 (file)
@@ -469,6 +469,7 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
 
 int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
                            struct snd_pcm_hw_params *params, struct snd_soc_dai *dai);
+int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s);
 
 /* dapm path setup */
 int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
index fa2337a..37f9d3f 100644 (file)
@@ -1126,6 +1126,8 @@ struct snd_soc_pcm_runtime {
        unsigned int pop_wait:1;
        unsigned int fe_compr:1; /* for Dynamic PCM */
 
+       bool initialized;
+
        int num_components;
        struct snd_soc_component *components[]; /* CPU/Codec/Platform */
 };
index 5eaa1fa..833143d 100644 (file)
@@ -39,7 +39,6 @@ TRACE_EVENT(neigh_create,
        ),
 
        TP_fast_assign(
-               struct in6_addr *pin6;
                __be32 *p32;
 
                __entry->family = tbl->family;
@@ -47,7 +46,6 @@ TRACE_EVENT(neigh_create,
                __entry->entries = atomic_read(&tbl->gc_entries);
                __entry->created = n != NULL;
                __entry->gc_exempt = exempt_from_gc;
-               pin6 = (struct in6_addr *)__entry->primary_key6;
                p32 = (__be32 *)__entry->primary_key4;
 
                if (tbl->family == AF_INET)
@@ -57,6 +55,8 @@ TRACE_EVENT(neigh_create,
 
 #if IS_ENABLED(CONFIG_IPV6)
                if (tbl->family == AF_INET6) {
+                       struct in6_addr *pin6;
+
                        pin6 = (struct in6_addr *)__entry->primary_key6;
                        *pin6 = *(struct in6_addr *)pkey;
                }
index eaf9f24..0bade15 100644 (file)
@@ -45,8 +45,8 @@ extern "C" {
 #define NOUVEAU_GETPARAM_HAS_BO_USAGE    15
 #define NOUVEAU_GETPARAM_HAS_PAGEFLIP    16
 
-/**
- * @NOUVEAU_GETPARAM_EXEC_PUSH_MAX
+/*
+ * NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam
  *
  * Query the maximum amount of IBs that can be pushed through a single
  * &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC
index 4d0ad22..9efc423 100644 (file)
@@ -18,11 +18,7 @@ struct sockaddr_ll {
        unsigned short  sll_hatype;
        unsigned char   sll_pkttype;
        unsigned char   sll_halen;
-       union {
-               unsigned char   sll_addr[8];
-               /* Actual length is in sll_halen. */
-               __DECLARE_FLEX_ARRAY(unsigned char, sll_addr_flex);
-       };
+       unsigned char   sll_addr[8];
 };
 
 /* Packet types */
index 77252cb..a722dcb 100644 (file)
@@ -231,7 +231,7 @@ struct mmp_path {
 
        /* layers */
        int overlay_num;
-       struct mmp_overlay overlays[];
+       struct mmp_overlay overlays[] __counted_by(overlay_num);
 };
 
 extern struct mmp_path *mmp_get_path(const char *name);
index 8d2a3bf..47d96e7 100644 (file)
@@ -109,8 +109,6 @@ struct uvesafb_ktask {
        u32 ack;
 };
 
-static int uvesafb_exec(struct uvesafb_ktask *tsk);
-
 #define UVESAFB_EXACT_RES      1
 #define UVESAFB_EXACT_DEPTH    2
 
index d839a80..8d1bc6c 100644 (file)
@@ -2674,7 +2674,11 @@ static void io_pages_free(struct page ***pages, int npages)
 
        if (!pages)
                return;
+
        page_array = *pages;
+       if (!page_array)
+               return;
+
        for (i = 0; i < npages; i++)
                unpin_user_page(page_array[i]);
        kvfree(page_array);
@@ -2758,7 +2762,9 @@ static void io_rings_free(struct io_ring_ctx *ctx)
                ctx->sq_sqes = NULL;
        } else {
                io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
+               ctx->n_ring_pages = 0;
                io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
+               ctx->n_sqe_pages = 0;
        }
 }
 
index 21d2fa8..6f0d6fb 100644 (file)
@@ -2212,7 +2212,7 @@ __audit_reusename(const __user char *uptr)
                if (!n->name)
                        continue;
                if (n->name->uptr == uptr) {
-                       n->name->refcnt++;
+                       atomic_inc(&n->name->refcnt);
                        return n->name;
                }
        }
@@ -2241,7 +2241,7 @@ void __audit_getname(struct filename *name)
        n->name = name;
        n->name_len = AUDIT_NAME_FULL;
        name->aname = n;
-       name->refcnt++;
+       atomic_inc(&name->refcnt);
 }
 
 static inline int audit_copy_fcaps(struct audit_names *name,
@@ -2373,7 +2373,7 @@ out_alloc:
                return;
        if (name) {
                n->name = name;
-               name->refcnt++;
+               atomic_inc(&name->refcnt);
        }
 
 out:
@@ -2500,7 +2500,7 @@ void __audit_inode_child(struct inode *parent,
                if (found_parent) {
                        found_child->name = found_parent->name;
                        found_child->name_len = AUDIT_NAME_FULL;
-                       found_child->name->refcnt++;
+                       atomic_inc(&found_child->name->refcnt);
                }
        }
 
index 007d98c..1394168 100644 (file)
@@ -401,14 +401,16 @@ int bpf_mprog_query(const union bpf_attr *attr, union bpf_attr __user *uattr,
        struct bpf_mprog_cp *cp;
        struct bpf_prog *prog;
        const u32 flags = 0;
+       u32 id, count = 0;
+       u64 revision = 1;
        int i, ret = 0;
-       u32 id, count;
-       u64 revision;
 
        if (attr->query.query_flags || attr->query.attach_flags)
                return -EINVAL;
-       revision = bpf_mprog_revision(entry);
-       count = bpf_mprog_total(entry);
+       if (entry) {
+               revision = bpf_mprog_revision(entry);
+               count = bpf_mprog_total(entry);
+       }
        if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
                return -EFAULT;
        if (copy_to_user(&uattr->query.revision, &revision, sizeof(revision)))
index eb01c31..d77b2f8 100644 (file)
@@ -3796,7 +3796,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
 {
        enum bpf_prog_type ptype;
        struct bpf_prog *prog;
-       u32 mask;
        int ret;
 
        if (CHECK_ATTR(BPF_PROG_ATTACH))
@@ -3805,10 +3804,16 @@ static int bpf_prog_attach(const union bpf_attr *attr)
        ptype = attach_type_to_prog_type(attr->attach_type);
        if (ptype == BPF_PROG_TYPE_UNSPEC)
                return -EINVAL;
-       mask = bpf_mprog_supported(ptype) ?
-              BPF_F_ATTACH_MASK_MPROG : BPF_F_ATTACH_MASK_BASE;
-       if (attr->attach_flags & ~mask)
-               return -EINVAL;
+       if (bpf_mprog_supported(ptype)) {
+               if (attr->attach_flags & ~BPF_F_ATTACH_MASK_MPROG)
+                       return -EINVAL;
+       } else {
+               if (attr->attach_flags & ~BPF_F_ATTACH_MASK_BASE)
+                       return -EINVAL;
+               if (attr->relative_fd ||
+                   attr->expected_revision)
+                       return -EINVAL;
+       }
 
        prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
        if (IS_ERR(prog))
@@ -3878,6 +3883,10 @@ static int bpf_prog_detach(const union bpf_attr *attr)
                        if (IS_ERR(prog))
                                return PTR_ERR(prog);
                }
+       } else if (attr->attach_flags ||
+                  attr->relative_fd ||
+                  attr->expected_revision) {
+               return -EINVAL;
        }
 
        switch (ptype) {
@@ -3913,7 +3922,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
        return ret;
 }
 
-#define BPF_PROG_QUERY_LAST_FIELD query.link_attach_flags
+#define BPF_PROG_QUERY_LAST_FIELD query.revision
 
 static int bpf_prog_query(const union bpf_attr *attr,
                          union bpf_attr __user *uattr)
index 13f0b5d..1338a13 100644 (file)
@@ -123,7 +123,6 @@ int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
 {
        bool ingress = attr->query.attach_type == BPF_TCX_INGRESS;
        struct net *net = current->nsproxy->net_ns;
-       struct bpf_mprog_entry *entry;
        struct net_device *dev;
        int ret;
 
@@ -133,12 +132,7 @@ int tcx_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr)
                ret = -ENODEV;
                goto out;
        }
-       entry = tcx_entry_fetch(dev, ingress);
-       if (!entry) {
-               ret = -ENOENT;
-               goto out;
-       }
-       ret = bpf_mprog_query(attr, uattr, entry);
+       ret = bpf_mprog_query(attr, uattr, tcx_entry_fetch(dev, ingress));
 out:
        rtnl_unlock();
        return ret;
index c0c7d13..873ade1 100644 (file)
@@ -14479,7 +14479,7 @@ static int check_return_code(struct bpf_verifier_env *env)
        struct tnum enforce_attach_type_range = tnum_unknown;
        const struct bpf_prog *prog = env->prog;
        struct bpf_reg_state *reg;
-       struct tnum range = tnum_range(0, 1);
+       struct tnum range = tnum_range(0, 1), const_0 = tnum_const(0);
        enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
        int err;
        struct bpf_func_state *frame = env->cur_state->frame[0];
@@ -14527,8 +14527,8 @@ static int check_return_code(struct bpf_verifier_env *env)
                        return -EINVAL;
                }
 
-               if (!tnum_in(tnum_const(0), reg->var_off)) {
-                       verbose_invalid_scalar(env, reg, &range, "async callback", "R0");
+               if (!tnum_in(const_0, reg->var_off)) {
+                       verbose_invalid_scalar(env, reg, &const_0, "async callback", "R0");
                        return -EINVAL;
                }
                return 0;
index c487ffe..76db6c6 100644 (file)
@@ -360,10 +360,9 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
        }
        css_task_iter_end(&it);
        length = n;
-       /* now sort & (if procs) strip out duplicates */
+       /* now sort & strip out duplicates (tgids or recycled thread PIDs) */
        sort(array, length, sizeof(pid_t), cmppid, NULL);
-       if (type == CGROUP_FILE_PROCS)
-               length = pidlist_uniq(array, length);
+       length = pidlist_uniq(array, length);
 
        l = cgroup_pidlist_find_create(cgrp, type);
        if (!l) {
index 7e0b4dd..0b3af15 100644 (file)
@@ -3740,12 +3740,18 @@ static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progre
 
        seq = prb_next_seq(prb);
 
+       /* Flush the consoles so that records up to @seq are printed. */
+       console_lock();
+       console_unlock();
+
        for (;;) {
                diff = 0;
 
                /*
                 * Hold the console_lock to guarantee safe access to
-                * console->seq.
+                * console->seq. Releasing console_lock flushes more
+                * records in case @seq is still not printed on all
+                * usable consoles.
                 */
                console_lock();
 
index ef7490c..061a30a 100644 (file)
@@ -872,14 +872,16 @@ struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
  *
  * Which allows an EDF like search on (sub)trees.
  */
-static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
+static struct sched_entity *__pick_eevdf(struct cfs_rq *cfs_rq)
 {
        struct rb_node *node = cfs_rq->tasks_timeline.rb_root.rb_node;
        struct sched_entity *curr = cfs_rq->curr;
        struct sched_entity *best = NULL;
+       struct sched_entity *best_left = NULL;
 
        if (curr && (!curr->on_rq || !entity_eligible(cfs_rq, curr)))
                curr = NULL;
+       best = curr;
 
        /*
         * Once selected, run a task until it either becomes non-eligible or
@@ -900,33 +902,75 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
                }
 
                /*
-                * If this entity has an earlier deadline than the previous
-                * best, take this one. If it also has the earliest deadline
-                * of its subtree, we're done.
+                * Now we heap search eligible trees for the best (min_)deadline
                 */
-               if (!best || deadline_gt(deadline, best, se)) {
+               if (!best || deadline_gt(deadline, best, se))
                        best = se;
-                       if (best->deadline == best->min_deadline)
-                               break;
-               }
 
                /*
-                * If the earlest deadline in this subtree is in the fully
-                * eligible left half of our space, go there.
+                * Every se in a left branch is eligible, keep track of the
+                * branch with the best min_deadline
                 */
+               if (node->rb_left) {
+                       struct sched_entity *left = __node_2_se(node->rb_left);
+
+                       if (!best_left || deadline_gt(min_deadline, best_left, left))
+                               best_left = left;
+
+                       /*
+                        * min_deadline is in the left branch. rb_left and all
+                        * descendants are eligible, so immediately switch to the second
+                        * loop.
+                        */
+                       if (left->min_deadline == se->min_deadline)
+                               break;
+               }
+
+               /* min_deadline is at this node, no need to look right */
+               if (se->deadline == se->min_deadline)
+                       break;
+
+               /* else min_deadline is in the right branch. */
+               node = node->rb_right;
+       }
+
+       /*
+        * We ran into an eligible node which is itself the best.
+        * (Or nr_running == 0 and both are NULL)
+        */
+       if (!best_left || (s64)(best_left->min_deadline - best->deadline) > 0)
+               return best;
+
+       /*
+        * Now best_left and all of its children are eligible, and we are just
+        * looking for deadline == min_deadline
+        */
+       node = &best_left->run_node;
+       while (node) {
+               struct sched_entity *se = __node_2_se(node);
+
+               /* min_deadline is the current node */
+               if (se->deadline == se->min_deadline)
+                       return se;
+
+               /* min_deadline is in the left branch */
                if (node->rb_left &&
                    __node_2_se(node->rb_left)->min_deadline == se->min_deadline) {
                        node = node->rb_left;
                        continue;
                }
 
+               /* else min_deadline is in the right branch */
                node = node->rb_right;
        }
+       return NULL;
+}
 
-       if (!best || (curr && deadline_gt(deadline, best, curr)))
-               best = curr;
+static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
+{
+       struct sched_entity *se = __pick_eevdf(cfs_rq);
 
-       if (unlikely(!best)) {
+       if (!se) {
                struct sched_entity *left = __pick_first_entity(cfs_rq);
                if (left) {
                        pr_err("EEVDF scheduling fail, picking leftmost\n");
@@ -934,7 +978,7 @@ static struct sched_entity *pick_eevdf(struct cfs_rq *cfs_rq)
                }
        }
 
-       return best;
+       return se;
 }
 
 #ifdef CONFIG_SCHED_DEBUG
@@ -3613,6 +3657,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                 */
                deadline = div_s64(deadline * old_weight, weight);
                se->deadline = se->vruntime + deadline;
+               min_deadline_cb_propagate(&se->run_node, NULL);
        }
 
 #ifdef CONFIG_SMP
index 3b21f40..881f90f 100644 (file)
@@ -189,7 +189,7 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
 {
        int i, size;
 
-       if (num < 0)
+       if (num <= 0)
                return -EINVAL;
 
        if (!fp->exit_handler) {
@@ -202,8 +202,8 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
                size = fp->nr_maxactive;
        else
                size = num * num_possible_cpus() * 2;
-       if (size < 0)
-               return -E2BIG;
+       if (size <= 0)
+               return -EINVAL;
 
        fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
        if (!fp->rethook)
index b9f053a..a3522b7 100644 (file)
@@ -2166,7 +2166,7 @@ static struct worker *create_worker(struct worker_pool *pool)
 {
        struct worker *worker;
        int id;
-       char id_buf[16];
+       char id_buf[23];
 
        /* ID is needed to determine kthread name */
        id = ida_alloc(&pool->worker_ida, GFP_KERNEL);
@@ -4600,12 +4600,22 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
        }
        cpus_read_unlock();
 
+       /* for unbound pwq, flush the pwq_release_worker ensures that the
+        * pwq_release_workfn() completes before calling kfree(wq).
+        */
+       if (ret)
+               kthread_flush_worker(pwq_release_worker);
+
        return ret;
 
 enomem:
        if (wq->cpu_pwq) {
-               for_each_possible_cpu(cpu)
-                       kfree(*per_cpu_ptr(wq->cpu_pwq, cpu));
+               for_each_possible_cpu(cpu) {
+                       struct pool_workqueue *pwq = *per_cpu_ptr(wq->cpu_pwq, cpu);
+
+                       if (pwq)
+                               kmem_cache_free(pwq_cache, pwq);
+               }
                free_percpu(wq->cpu_pwq);
                wq->cpu_pwq = NULL;
        }
@@ -5782,9 +5792,13 @@ static int workqueue_apply_unbound_cpumask(const cpumask_var_t unbound_cpumask)
        list_for_each_entry(wq, &workqueues, list) {
                if (!(wq->flags & WQ_UNBOUND))
                        continue;
+
                /* creating multiple pwqs breaks ordering guarantee */
-               if (wq->flags & __WQ_ORDERED)
-                       continue;
+               if (!list_empty(&wq->pwqs)) {
+                       if (wq->flags & __WQ_ORDERED_EXPLICIT)
+                               continue;
+                       wq->flags &= ~__WQ_ORDERED;
+               }
 
                ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs, unbound_cpumask);
                if (IS_ERR(ctx)) {
index 8fda308..9bbffe8 100644 (file)
@@ -895,10 +895,13 @@ void __init setup_kmalloc_cache_index_table(void)
 
 static unsigned int __kmalloc_minalign(void)
 {
+       unsigned int minalign = dma_get_cache_alignment();
+
        if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
            is_swiotlb_allocated())
-               return ARCH_KMALLOC_MINALIGN;
-       return dma_get_cache_alignment();
+               minalign = ARCH_KMALLOC_MINALIGN;
+
+       return max(minalign, arch_slab_minalign());
 }
 
 void __init
index 7a6f203..73470cc 100644 (file)
@@ -1627,6 +1627,15 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
                return ERR_PTR(-EOPNOTSUPP);
        }
 
+       /* Reject outgoing connection to device with same BD ADDR against
+        * CVE-2020-26555
+        */
+       if (!bacmp(&hdev->bdaddr, dst)) {
+               bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
+                          dst);
+               return ERR_PTR(-ECONNREFUSED);
+       }
+
        acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
        if (!acl) {
                acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
index 31d02b5..1e1c914 100644 (file)
@@ -26,6 +26,8 @@
 /* Bluetooth HCI event handling. */
 
 #include <asm/unaligned.h>
+#include <linux/crypto.h>
+#include <crypto/algapi.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -3268,6 +3270,16 @@ static void hci_conn_request_evt(struct hci_dev *hdev, void *data,
 
        bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type);
 
+       /* Reject incoming connection from device with same BD ADDR against
+        * CVE-2020-26555
+        */
+       if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) {
+               bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n",
+                          &ev->bdaddr);
+               hci_reject_conn(hdev, &ev->bdaddr);
+               return;
+       }
+
        mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
                                      &flags);
 
@@ -4742,6 +4754,15 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data,
        if (!conn)
                goto unlock;
 
+       /* Ignore NULL link key against CVE-2020-26555 */
+       if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) {
+               bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR",
+                          &ev->bdaddr);
+               hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
+               hci_conn_drop(conn);
+               goto unlock;
+       }
+
        hci_conn_hold(conn);
        conn->disc_timeout = HCI_DISCONN_TIMEOUT;
        hci_conn_drop(conn);
@@ -5274,8 +5295,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
                 * available, then do not declare that OOB data is
                 * present.
                 */
-               if (!memcmp(data->rand256, ZERO_KEY, 16) ||
-                   !memcmp(data->hash256, ZERO_KEY, 16))
+               if (!crypto_memneq(data->rand256, ZERO_KEY, 16) ||
+                   !crypto_memneq(data->hash256, ZERO_KEY, 16))
                        return 0x00;
 
                return 0x02;
@@ -5285,8 +5306,8 @@ static u8 bredr_oob_data_present(struct hci_conn *conn)
         * not supported by the hardware, then check that if
         * P-192 data values are present.
         */
-       if (!memcmp(data->rand192, ZERO_KEY, 16) ||
-           !memcmp(data->hash192, ZERO_KEY, 16))
+       if (!crypto_memneq(data->rand192, ZERO_KEY, 16) ||
+           !crypto_memneq(data->hash192, ZERO_KEY, 16))
                return 0x00;
 
        return 0x01;
@@ -5303,7 +5324,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data,
        hci_dev_lock(hdev);
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
-       if (!conn)
+       if (!conn || !hci_conn_ssp_enabled(conn))
                goto unlock;
 
        hci_conn_hold(conn);
@@ -5550,7 +5571,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data,
        hci_dev_lock(hdev);
 
        conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
-       if (!conn)
+       if (!conn || !hci_conn_ssp_enabled(conn))
                goto unlock;
 
        /* Reset the authentication requirement to unknown */
@@ -7021,6 +7042,14 @@ unlock:
        hci_dev_unlock(hdev);
 }
 
+static int hci_iso_term_big_sync(struct hci_dev *hdev, void *data)
+{
+       u8 handle = PTR_UINT(data);
+
+       return hci_le_terminate_big_sync(hdev, handle,
+                                        HCI_ERROR_LOCAL_HOST_TERM);
+}
+
 static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
                                           struct sk_buff *skb)
 {
@@ -7065,16 +7094,17 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,
                rcu_read_lock();
        }
 
+       rcu_read_unlock();
+
        if (!ev->status && !i)
                /* If no BISes have been connected for the BIG,
                 * terminate. This is in case all bound connections
                 * have been closed before the BIG creation
                 * has completed.
                 */
-               hci_le_terminate_big_sync(hdev, ev->handle,
-                                         HCI_ERROR_LOCAL_HOST_TERM);
+               hci_cmd_sync_queue(hdev, hci_iso_term_big_sync,
+                                  UINT_PTR(ev->handle), NULL);
 
-       rcu_read_unlock();
        hci_dev_unlock(hdev);
 }
 
index 5e4f718..3e7cd33 100644 (file)
@@ -488,7 +488,8 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
                ni->type = hdev->dev_type;
                ni->bus = hdev->bus;
                bacpy(&ni->bdaddr, &hdev->bdaddr);
-               memcpy(ni->name, hdev->name, 8);
+               memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name,
+                              strnlen(hdev->name, sizeof(ni->name)), '\0');
 
                opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
                break;
index d06e07a..a15ab0b 100644 (file)
@@ -5369,6 +5369,7 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
 {
        int err = 0;
        u16 handle = conn->handle;
+       bool disconnect = false;
        struct hci_conn *c;
 
        switch (conn->state) {
@@ -5399,24 +5400,15 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
                hci_dev_unlock(hdev);
                return 0;
        case BT_BOUND:
-               hci_dev_lock(hdev);
-               hci_conn_failed(conn, reason);
-               hci_dev_unlock(hdev);
-               return 0;
+               break;
        default:
-               hci_dev_lock(hdev);
-               conn->state = BT_CLOSED;
-               hci_disconn_cfm(conn, reason);
-               hci_conn_del(conn);
-               hci_dev_unlock(hdev);
-               return 0;
+               disconnect = true;
+               break;
        }
 
        hci_dev_lock(hdev);
 
-       /* Check if the connection hasn't been cleanup while waiting
-        * commands to complete.
-        */
+       /* Check if the connection has been cleaned up concurrently */
        c = hci_conn_hash_lookup_handle(hdev, handle);
        if (!c || c != conn) {
                err = 0;
@@ -5428,7 +5420,13 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
         * or in case of LE it was still scanning so it can be cleanup
         * safely.
         */
-       hci_conn_failed(conn, reason);
+       if (disconnect) {
+               conn->state = BT_CLOSED;
+               hci_disconn_cfm(conn, reason);
+               hci_conn_del(conn);
+       } else {
+               hci_conn_failed(conn, reason);
+       }
 
 unlock:
        hci_dev_unlock(hdev);
index f02b5d3..d1c6f20 100644 (file)
@@ -948,21 +948,18 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
        if (!so->bound || so->tx.state == ISOTP_SHUTDOWN)
                return -EADDRNOTAVAIL;
 
-wait_free_buffer:
-       /* we do not support multiple buffers - for now */
-       if (wq_has_sleeper(&so->wait) && (msg->msg_flags & MSG_DONTWAIT))
-               return -EAGAIN;
+       while (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
+               /* we do not support multiple buffers - for now */
+               if (msg->msg_flags & MSG_DONTWAIT)
+                       return -EAGAIN;
 
-       /* wait for complete transmission of current pdu */
-       err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
-       if (err)
-               goto err_event_drop;
-
-       if (cmpxchg(&so->tx.state, ISOTP_IDLE, ISOTP_SENDING) != ISOTP_IDLE) {
                if (so->tx.state == ISOTP_SHUTDOWN)
                        return -EADDRNOTAVAIL;
 
-               goto wait_free_buffer;
+               /* wait for complete transmission of current pdu */
+               err = wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+               if (err)
+                       goto err_event_drop;
        }
 
        /* PDU size > default => try max_pdu_size */
index 10a41cd..3c8b78d 100644 (file)
@@ -459,8 +459,8 @@ int ceph_tcp_connect(struct ceph_connection *con)
        set_sock_callbacks(sock, con);
 
        con_sock_state_connecting(con);
-       ret = sock->ops->connect(sock, (struct sockaddr *)&ss, sizeof(ss),
-                                O_NONBLOCK);
+       ret = kernel_connect(sock, (struct sockaddr *)&ss, sizeof(ss),
+                            O_NONBLOCK);
        if (ret == -EINPROGRESS) {
                dout("connect %s EINPROGRESS sk_state = %u\n",
                     ceph_pr_addr(&con->peer_addr),
index 85df22f..9f3f893 100644 (file)
@@ -345,7 +345,6 @@ int netdev_name_node_alt_create(struct net_device *dev, const char *name)
 static void __netdev_name_node_alt_destroy(struct netdev_name_node *name_node)
 {
        list_del(&name_node->list);
-       netdev_name_node_del(name_node);
        kfree(name_node->name);
        netdev_name_node_free(name_node);
 }
@@ -364,6 +363,8 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
        if (name_node == dev->name_node || name_node->dev != dev)
                return -EINVAL;
 
+       netdev_name_node_del(name_node);
+       synchronize_rcu();
        __netdev_name_node_alt_destroy(name_node);
 
        return 0;
@@ -380,6 +381,7 @@ static void netdev_name_node_alt_flush(struct net_device *dev)
 /* Device list insertion */
 static void list_netdevice(struct net_device *dev)
 {
+       struct netdev_name_node *name_node;
        struct net *net = dev_net(dev);
 
        ASSERT_RTNL();
@@ -390,6 +392,10 @@ static void list_netdevice(struct net_device *dev)
        hlist_add_head_rcu(&dev->index_hlist,
                           dev_index_hash(net, dev->ifindex));
        write_unlock(&dev_base_lock);
+
+       netdev_for_each_altname(dev, name_node)
+               netdev_name_node_add(net, name_node);
+
        /* We reserved the ifindex, this can't fail */
        WARN_ON(xa_store(&net->dev_by_index, dev->ifindex, dev, GFP_KERNEL));
 
@@ -401,12 +407,16 @@ static void list_netdevice(struct net_device *dev)
  */
 static void unlist_netdevice(struct net_device *dev, bool lock)
 {
+       struct netdev_name_node *name_node;
        struct net *net = dev_net(dev);
 
        ASSERT_RTNL();
 
        xa_erase(&net->dev_by_index, dev->ifindex);
 
+       netdev_for_each_altname(dev, name_node)
+               netdev_name_node_del(name_node);
+
        /* Unlink dev from the device chain */
        if (lock)
                write_lock(&dev_base_lock);
@@ -1086,7 +1096,8 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
 
                for_each_netdev(net, d) {
                        struct netdev_name_node *name_node;
-                       list_for_each_entry(name_node, &d->name_node->list, list) {
+
+                       netdev_for_each_altname(d, name_node) {
                                if (!sscanf(name_node->name, name, &i))
                                        continue;
                                if (i < 0 || i >= max_netdevices)
@@ -1123,6 +1134,26 @@ static int __dev_alloc_name(struct net *net, const char *name, char *buf)
        return -ENFILE;
 }
 
+static int dev_prep_valid_name(struct net *net, struct net_device *dev,
+                              const char *want_name, char *out_name)
+{
+       int ret;
+
+       if (!dev_valid_name(want_name))
+               return -EINVAL;
+
+       if (strchr(want_name, '%')) {
+               ret = __dev_alloc_name(net, want_name, out_name);
+               return ret < 0 ? ret : 0;
+       } else if (netdev_name_in_use(net, want_name)) {
+               return -EEXIST;
+       } else if (out_name != want_name) {
+               strscpy(out_name, want_name, IFNAMSIZ);
+       }
+
+       return 0;
+}
+
 static int dev_alloc_name_ns(struct net *net,
                             struct net_device *dev,
                             const char *name)
@@ -1160,19 +1191,13 @@ EXPORT_SYMBOL(dev_alloc_name);
 static int dev_get_valid_name(struct net *net, struct net_device *dev,
                              const char *name)
 {
-       BUG_ON(!net);
-
-       if (!dev_valid_name(name))
-               return -EINVAL;
-
-       if (strchr(name, '%'))
-               return dev_alloc_name_ns(net, dev, name);
-       else if (netdev_name_in_use(net, name))
-               return -EEXIST;
-       else if (dev->name != name)
-               strscpy(dev->name, name, IFNAMSIZ);
+       char buf[IFNAMSIZ];
+       int ret;
 
-       return 0;
+       ret = dev_prep_valid_name(net, dev, name, buf);
+       if (ret >= 0)
+               strscpy(dev->name, buf, IFNAMSIZ);
+       return ret;
 }
 
 /**
@@ -3292,15 +3317,19 @@ int skb_checksum_help(struct sk_buff *skb)
 
        offset = skb_checksum_start_offset(skb);
        ret = -EINVAL;
-       if (WARN_ON_ONCE(offset >= skb_headlen(skb))) {
+       if (unlikely(offset >= skb_headlen(skb))) {
                DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
+               WARN_ONCE(true, "offset (%d) >= skb_headlen() (%u)\n",
+                         offset, skb_headlen(skb));
                goto out;
        }
        csum = skb_checksum(skb, offset, skb->len - offset, 0);
 
        offset += skb->csum_offset;
-       if (WARN_ON_ONCE(offset + sizeof(__sum16) > skb_headlen(skb))) {
+       if (unlikely(offset + sizeof(__sum16) > skb_headlen(skb))) {
                DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
+               WARN_ONCE(true, "offset+2 (%zu) > skb_headlen() (%u)\n",
+                         offset + sizeof(__sum16), skb_headlen(skb));
                goto out;
        }
        ret = skb_ensure_writable(skb, offset + sizeof(__sum16));
@@ -11033,7 +11062,9 @@ EXPORT_SYMBOL(unregister_netdev);
 int __dev_change_net_namespace(struct net_device *dev, struct net *net,
                               const char *pat, int new_ifindex)
 {
+       struct netdev_name_node *name_node;
        struct net *net_old = dev_net(dev);
+       char new_name[IFNAMSIZ] = {};
        int err, new_nsid;
 
        ASSERT_RTNL();
@@ -11060,10 +11091,15 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
                /* We get here if we can't use the current device name */
                if (!pat)
                        goto out;
-               err = dev_get_valid_name(net, dev, pat);
+               err = dev_prep_valid_name(net, dev, pat, new_name);
                if (err < 0)
                        goto out;
        }
+       /* Check that none of the altnames conflicts. */
+       err = -EEXIST;
+       netdev_for_each_altname(dev, name_node)
+               if (netdev_name_in_use(net, name_node->name))
+                       goto out;
 
        /* Check that new_ifindex isn't used yet. */
        if (new_ifindex) {
@@ -11131,6 +11167,9 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net,
        kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
        netdev_adjacent_add_links(dev);
 
+       if (new_name[0]) /* Rename the netdev to prepared name */
+               strscpy(dev->name, new_name, IFNAMSIZ);
+
        /* Fixup kobjects */
        err = device_rename(&dev->dev, dev->name);
        WARN_ON(err);
index e075e19..fa2e9c5 100644 (file)
@@ -62,6 +62,9 @@ struct netdev_name_node {
 int netdev_get_name(struct net *net, char *name, int ifindex);
 int dev_change_name(struct net_device *dev, const char *newname);
 
+#define netdev_for_each_altname(dev, namenode)                         \
+       list_for_each_entry((namenode), &(dev)->name_node->list, list)
+
 int netdev_name_node_alt_create(struct net_device *dev, const char *name);
 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
 
index f56b8d6..4d16966 100644 (file)
@@ -669,19 +669,19 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
        seq_puts(seq, "     Flags: ");
 
        for (i = 0; i < NR_PKT_FLAGS; i++) {
-               if (i == F_FLOW_SEQ)
+               if (i == FLOW_SEQ_SHIFT)
                        if (!pkt_dev->cflows)
                                continue;
 
-               if (pkt_dev->flags & (1 << i))
+               if (pkt_dev->flags & (1 << i)) {
                        seq_printf(seq, "%s  ", pkt_flag_names[i]);
-               else if (i == F_FLOW_SEQ)
-                       seq_puts(seq, "FLOW_RND  ");
-
 #ifdef CONFIG_XFRM
-               if (i == F_IPSEC && pkt_dev->spi)
-                       seq_printf(seq, "spi:%u", pkt_dev->spi);
+                       if (i == IPSEC_SHIFT && pkt_dev->spi)
+                               seq_printf(seq, "spi:%u  ", pkt_dev->spi);
 #endif
+               } else if (i == FLOW_SEQ_SHIFT) {
+                       seq_puts(seq, "FLOW_RND  ");
+               }
        }
 
        seq_puts(seq, "\n");
index 4a2ec33..53c377d 100644 (file)
@@ -5503,13 +5503,11 @@ static unsigned int
 rtnl_offload_xstats_get_size_hw_s_info_one(const struct net_device *dev,
                                           enum netdev_offload_xstats_type type)
 {
-       bool enabled = netdev_offload_xstats_enabled(dev, type);
-
        return nla_total_size(0) +
                /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST */
                nla_total_size(sizeof(u8)) +
                /* IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED */
-               (enabled ? nla_total_size(sizeof(u8)) : 0) +
+               nla_total_size(sizeof(u8)) +
                0;
 }
 
index f5c4e47..96fbcb9 100644 (file)
@@ -117,7 +117,7 @@ EXPORT_SYMBOL(sk_stream_wait_close);
  */
 int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
 {
-       int err = 0;
+       int ret, err = 0;
        long vm_wait = 0;
        long current_timeo = *timeo_p;
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -142,11 +142,13 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
 
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                sk->sk_write_pending++;
-               sk_wait_event(sk, &current_timeo, READ_ONCE(sk->sk_err) ||
-                                                 (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
-                                                 (sk_stream_memory_free(sk) &&
-                                                 !vm_wait), &wait);
+               ret = sk_wait_event(sk, &current_timeo, READ_ONCE(sk->sk_err) ||
+                                   (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN) ||
+                                   (sk_stream_memory_free(sk) && !vm_wait),
+                                   &wait);
                sk->sk_write_pending--;
+               if (ret < 0)
+                       goto do_error;
 
                if (vm_wait) {
                        vm_wait -= current_timeo;
index 638cad8..51e6e81 100644 (file)
@@ -58,7 +58,6 @@ struct devlink_health_reporter {
        struct devlink *devlink;
        struct devlink_port *devlink_port;
        struct devlink_fmsg *dump_fmsg;
-       struct mutex dump_lock; /* lock parallel read/write from dump buffers */
        u64 graceful_period;
        bool auto_recover;
        bool auto_dump;
@@ -125,7 +124,6 @@ __devlink_health_reporter_create(struct devlink *devlink,
        reporter->graceful_period = graceful_period;
        reporter->auto_recover = !!ops->recover;
        reporter->auto_dump = !!ops->dump;
-       mutex_init(&reporter->dump_lock);
        return reporter;
 }
 
@@ -226,7 +224,6 @@ EXPORT_SYMBOL_GPL(devlink_health_reporter_create);
 static void
 devlink_health_reporter_free(struct devlink_health_reporter *reporter)
 {
-       mutex_destroy(&reporter->dump_lock);
        if (reporter->dump_fmsg)
                devlink_fmsg_free(reporter->dump_fmsg);
        kfree(reporter);
@@ -625,10 +622,10 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
        }
 
        if (reporter->auto_dump) {
-               mutex_lock(&reporter->dump_lock);
+               devl_lock(devlink);
                /* store current dump of current error, for later analysis */
                devlink_health_do_dump(reporter, priv_ctx, NULL);
-               mutex_unlock(&reporter->dump_lock);
+               devl_unlock(devlink);
        }
 
        if (!reporter->auto_recover)
@@ -1262,7 +1259,7 @@ out:
 }
 
 static struct devlink_health_reporter *
-devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
+devlink_health_reporter_get_from_cb_lock(struct netlink_callback *cb)
 {
        const struct genl_info *info = genl_info_dump(cb);
        struct devlink_health_reporter *reporter;
@@ -1272,10 +1269,12 @@ devlink_health_reporter_get_from_cb(struct netlink_callback *cb)
        devlink = devlink_get_from_attrs_lock(sock_net(cb->skb->sk), attrs);
        if (IS_ERR(devlink))
                return NULL;
-       devl_unlock(devlink);
 
        reporter = devlink_health_reporter_get_from_attrs(devlink, attrs);
-       devlink_put(devlink);
+       if (!reporter) {
+               devl_unlock(devlink);
+               devlink_put(devlink);
+       }
        return reporter;
 }
 
@@ -1284,16 +1283,20 @@ int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
 {
        struct devlink_nl_dump_state *state = devlink_dump_state(cb);
        struct devlink_health_reporter *reporter;
+       struct devlink *devlink;
        int err;
 
-       reporter = devlink_health_reporter_get_from_cb(cb);
+       reporter = devlink_health_reporter_get_from_cb_lock(cb);
        if (!reporter)
                return -EINVAL;
 
-       if (!reporter->ops->dump)
+       devlink = reporter->devlink;
+       if (!reporter->ops->dump) {
+               devl_unlock(devlink);
+               devlink_put(devlink);
                return -EOPNOTSUPP;
+       }
 
-       mutex_lock(&reporter->dump_lock);
        if (!state->idx) {
                err = devlink_health_do_dump(reporter, NULL, cb->extack);
                if (err)
@@ -1309,7 +1312,8 @@ int devlink_nl_cmd_health_reporter_dump_get_dumpit(struct sk_buff *skb,
        err = devlink_fmsg_dumpit(reporter->dump_fmsg, skb, cb,
                                  DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET);
 unlock:
-       mutex_unlock(&reporter->dump_lock);
+       devl_unlock(devlink);
+       devlink_put(devlink);
        return err;
 }
 
@@ -1326,9 +1330,7 @@ int devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
        if (!reporter->ops->dump)
                return -EOPNOTSUPP;
 
-       mutex_lock(&reporter->dump_lock);
        devlink_health_dump_clear(reporter);
-       mutex_unlock(&reporter->dump_lock);
        return 0;
 }
 
index 3d2e30e..2713c9b 100644 (file)
@@ -597,7 +597,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
 
        add_wait_queue(sk_sleep(sk), &wait);
        sk->sk_write_pending += writebias;
-       sk->sk_wait_pending++;
 
        /* Basic assumption: if someone sets sk->sk_err, he _must_
         * change state of the socket from TCP_SYN_*.
@@ -613,7 +612,6 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
        }
        remove_wait_queue(sk_sleep(sk), &wait);
        sk->sk_write_pending -= writebias;
-       sk->sk_wait_pending--;
        return timeo;
 }
 
@@ -642,6 +640,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
                        return -EINVAL;
 
                if (uaddr->sa_family == AF_UNSPEC) {
+                       sk->sk_disconnects++;
                        err = sk->sk_prot->disconnect(sk, flags);
                        sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
                        goto out;
@@ -696,6 +695,7 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
                int writebias = (sk->sk_protocol == IPPROTO_TCP) &&
                                tcp_sk(sk)->fastopen_req &&
                                tcp_sk(sk)->fastopen_req->data ? 1 : 0;
+               int dis = sk->sk_disconnects;
 
                /* Error code is set above */
                if (!timeo || !inet_wait_for_connect(sk, timeo, writebias))
@@ -704,6 +704,11 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
                err = sock_intr_errno(timeo);
                if (signal_pending(current))
                        goto out;
+
+               if (dis != sk->sk_disconnects) {
+                       err = -EPIPE;
+                       goto out;
+               }
        }
 
        /* Connection was closed by RST, timeout, ICMP error
@@ -725,6 +730,7 @@ out:
 sock_error:
        err = sock_error(sk) ? : -ECONNABORTED;
        sock->state = SS_UNCONNECTED;
+       sk->sk_disconnects++;
        if (sk->sk_prot->disconnect(sk, flags))
                sock->state = SS_DISCONNECTING;
        goto out;
index 2be2d49..d18f0f0 100644 (file)
@@ -732,7 +732,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
                skb->csum = csum_block_sub(skb->csum, csumdiff,
                                           skb->len - trimlen);
        }
-       pskb_trim(skb, skb->len - trimlen);
+       ret = pskb_trim(skb, skb->len - trimlen);
+       if (unlikely(ret))
+               return ret;
 
        ret = nexthdr[1];
 
index 1ea82bc..5eb1b8d 100644 (file)
@@ -1325,15 +1325,18 @@ __be32 fib_info_update_nhc_saddr(struct net *net, struct fib_nh_common *nhc,
                                 unsigned char scope)
 {
        struct fib_nh *nh;
+       __be32 saddr;
 
        if (nhc->nhc_family != AF_INET)
                return inet_select_addr(nhc->nhc_dev, 0, scope);
 
        nh = container_of(nhc, struct fib_nh, nh_common);
-       nh->nh_saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
-       nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
+       saddr = inet_select_addr(nh->fib_nh_dev, nh->fib_nh_gw4, scope);
 
-       return nh->nh_saddr;
+       WRITE_ONCE(nh->nh_saddr, saddr);
+       WRITE_ONCE(nh->nh_saddr_genid, atomic_read(&net->ipv4.dev_addr_genid));
+
+       return saddr;
 }
 
 __be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
@@ -1347,8 +1350,9 @@ __be32 fib_result_prefsrc(struct net *net, struct fib_result *res)
                struct fib_nh *nh;
 
                nh = container_of(nhc, struct fib_nh, nh_common);
-               if (nh->nh_saddr_genid == atomic_read(&net->ipv4.dev_addr_genid))
-                       return nh->nh_saddr;
+               if (READ_ONCE(nh->nh_saddr_genid) ==
+                   atomic_read(&net->ipv4.dev_addr_genid))
+                       return READ_ONCE(nh->nh_saddr);
        }
 
        return fib_info_update_nhc_saddr(net, nhc, res->fi->fib_scope);
index aeebe88..394a498 100644 (file)
@@ -1145,7 +1145,6 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
        if (newsk) {
                struct inet_connection_sock *newicsk = inet_csk(newsk);
 
-               newsk->sk_wait_pending = 0;
                inet_sk_set_state(newsk, TCP_SYN_RECV);
                newicsk->icsk_bind_hash = NULL;
                newicsk->icsk_bind2_hash = NULL;
index c32f5e2..598c1b1 100644 (file)
@@ -149,8 +149,14 @@ static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
                                         const struct sock *sk)
 {
 #if IS_ENABLED(CONFIG_IPV6)
-       if (sk->sk_family != tb2->family)
-               return false;
+       if (sk->sk_family != tb2->family) {
+               if (sk->sk_family == AF_INET)
+                       return ipv6_addr_v4mapped(&tb2->v6_rcv_saddr) &&
+                               tb2->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
+
+               return ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr) &&
+                       sk->sk_v6_rcv_saddr.s6_addr32[3] == tb2->rcv_saddr;
+       }
 
        if (sk->sk_family == AF_INET6)
                return ipv6_addr_equal(&tb2->v6_rcv_saddr,
@@ -819,19 +825,7 @@ static bool inet_bind2_bucket_match(const struct inet_bind2_bucket *tb,
            tb->l3mdev != l3mdev)
                return false;
 
-#if IS_ENABLED(CONFIG_IPV6)
-       if (sk->sk_family != tb->family) {
-               if (sk->sk_family == AF_INET)
-                       return ipv6_addr_v4mapped(&tb->v6_rcv_saddr) &&
-                               tb->v6_rcv_saddr.s6_addr32[3] == sk->sk_rcv_saddr;
-
-               return false;
-       }
-
-       if (sk->sk_family == AF_INET6)
-               return ipv6_addr_equal(&tb->v6_rcv_saddr, &sk->sk_v6_rcv_saddr);
-#endif
-       return tb->rcv_saddr == sk->sk_rcv_saddr;
+       return inet_bind2_bucket_addr_match(tb, sk);
 }
 
 bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const struct net *net,
index 3f66cde..d3456cf 100644 (file)
@@ -831,7 +831,9 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
                         */
                        if (!skb_queue_empty(&sk->sk_receive_queue))
                                break;
-                       sk_wait_data(sk, &timeo, NULL);
+                       ret = sk_wait_data(sk, &timeo, NULL);
+                       if (ret < 0)
+                               break;
                        if (signal_pending(current)) {
                                ret = sock_intr_errno(timeo);
                                break;
@@ -2442,7 +2444,11 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
                        __sk_flush_backlog(sk);
                } else {
                        tcp_cleanup_rbuf(sk, copied);
-                       sk_wait_data(sk, &timeo, last);
+                       err = sk_wait_data(sk, &timeo, last);
+                       if (err < 0) {
+                               err = copied ? : err;
+                               goto out;
+                       }
                }
 
                if ((flags & MSG_PEEK) &&
@@ -2966,12 +2972,6 @@ int tcp_disconnect(struct sock *sk, int flags)
        int old_state = sk->sk_state;
        u32 seq;
 
-       /* Deny disconnect if other threads are blocked in sk_wait_event()
-        * or inet_wait_for_connect().
-        */
-       if (sk->sk_wait_pending)
-               return -EBUSY;
-
        if (old_state != TCP_CLOSE)
                tcp_set_state(sk, TCP_CLOSE);
 
index 3272682..53b0d62 100644 (file)
@@ -307,6 +307,10 @@ msg_bytes_ready:
                }
 
                data = tcp_msg_wait_data(sk, psock, timeo);
+               if (data < 0) {
+                       copied = data;
+                       goto unlock;
+               }
                if (data && !sk_psock_queue_empty(psock))
                        goto msg_bytes_ready;
                copied = -EAGAIN;
@@ -317,6 +321,8 @@ out:
        tcp_rcv_space_adjust(sk);
        if (copied > 0)
                __tcp_cleanup_rbuf(sk, copied);
+
+unlock:
        release_sock(sk);
        sk_psock_put(sk, psock);
        return copied;
@@ -351,6 +357,10 @@ msg_bytes_ready:
 
                timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
                data = tcp_msg_wait_data(sk, psock, timeo);
+               if (data < 0) {
+                       ret = data;
+                       goto unlock;
+               }
                if (data) {
                        if (!sk_psock_queue_empty(psock))
                                goto msg_bytes_ready;
@@ -361,6 +371,8 @@ msg_bytes_ready:
                copied = -EAGAIN;
        }
        ret = copied;
+
+unlock:
        release_sock(sk);
        sk_psock_put(sk, psock);
        return ret;
index 27140e5..4167e8a 100644 (file)
@@ -1869,6 +1869,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
 #ifdef CONFIG_TLS_DEVICE
            tail->decrypted != skb->decrypted ||
 #endif
+           !mptcp_skb_can_collapse(tail, skb) ||
            thtail->doff != th->doff ||
            memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
                goto no_coalesce;
index aa0fc8c..f072346 100644 (file)
@@ -2456,6 +2456,7 @@ static int tcp_mtu_probe(struct sock *sk)
 
        /* build the payload, and be prepared to abort if this fails. */
        if (tcp_clone_payload(sk, nskb, probe_size)) {
+               tcp_skb_tsorted_anchor_cleanup(nskb);
                consume_skb(nskb);
                return -1;
        }
@@ -2541,6 +2542,18 @@ static bool tcp_pacing_check(struct sock *sk)
        return true;
 }
 
+static bool tcp_rtx_queue_empty_or_single_skb(const struct sock *sk)
+{
+       const struct rb_node *node = sk->tcp_rtx_queue.rb_node;
+
+       /* No skb in the rtx queue. */
+       if (!node)
+               return true;
+
+       /* Only one skb in rtx queue. */
+       return !node->rb_left && !node->rb_right;
+}
+
 /* TCP Small Queues :
  * Control number of packets in qdisc/devices to two packets / or ~1 ms.
  * (These limits are doubled for retransmits)
@@ -2578,12 +2591,12 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
                limit += extra_bytes;
        }
        if (refcount_read(&sk->sk_wmem_alloc) > limit) {
-               /* Always send skb if rtx queue is empty.
+               /* Always send skb if rtx queue is empty or has one skb.
                 * No need to wait for TX completion to call us back,
                 * after softirq/tasklet schedule.
                 * This helps when TX completions are delayed too much.
                 */
-               if (tcp_rtx_queue_empty(sk))
+               if (tcp_rtx_queue_empty_or_single_skb(sk))
                        return false;
 
                set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
@@ -2787,7 +2800,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
-       u32 timeout, rto_delta_us;
+       u32 timeout, timeout_us, rto_delta_us;
        int early_retrans;
 
        /* Don't do any loss probe on a Fast Open connection before 3WHS
@@ -2811,11 +2824,12 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
         * sample is available then probe after TCP_TIMEOUT_INIT.
         */
        if (tp->srtt_us) {
-               timeout = usecs_to_jiffies(tp->srtt_us >> 2);
+               timeout_us = tp->srtt_us >> 2;
                if (tp->packets_out == 1)
-                       timeout += TCP_RTO_MIN;
+                       timeout_us += tcp_rto_min_us(sk);
                else
-                       timeout += TCP_TIMEOUT_MIN;
+                       timeout_us += TCP_TIMEOUT_MIN_US;
+               timeout = usecs_to_jiffies(timeout_us);
        } else {
                timeout = TCP_TIMEOUT_INIT;
        }
index acf4869..bba1011 100644 (file)
@@ -104,7 +104,7 @@ bool tcp_rack_mark_lost(struct sock *sk)
        tp->rack.advanced = 0;
        tcp_rack_detect_loss(sk, &timeout);
        if (timeout) {
-               timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN;
+               timeout = usecs_to_jiffies(timeout + TCP_TIMEOUT_MIN_US);
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
                                          timeout, inet_csk(sk)->icsk_rto);
        }
index fddd0cb..e023d29 100644 (file)
@@ -770,7 +770,9 @@ static inline int esp_remove_trailer(struct sk_buff *skb)
                skb->csum = csum_block_sub(skb->csum, csumdiff,
                                           skb->len - trimlen);
        }
-       pskb_trim(skb, skb->len - trimlen);
+       ret = pskb_trim(skb, skb->len - trimlen);
+       if (unlikely(ret))
+               return ret;
 
        ret = nexthdr[1];
 
index 41a680c..42fb699 100644 (file)
@@ -117,10 +117,10 @@ static void xfrm6_dst_destroy(struct dst_entry *dst)
 {
        struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
 
-       if (likely(xdst->u.rt6.rt6i_idev))
-               in6_dev_put(xdst->u.rt6.rt6i_idev);
        dst_destroy_metrics_generic(dst);
        rt6_uncached_list_del(&xdst->u.rt6);
+       if (likely(xdst->u.rt6.rt6i_idev))
+               in6_dev_put(xdst->u.rt6.rt6i_idev);
        xfrm_dst_destroy(xdst);
 }
 
index 0665ff5..a2db058 100644 (file)
@@ -912,7 +912,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
         */
        if (ieee80211_key_identical(sdata, old_key, key)) {
                ret = -EALREADY;
-               goto unlock;
+               goto out;
        }
 
        key->local = sdata->local;
@@ -940,7 +940,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
 
  out:
        ieee80211_key_free_unused(key);
- unlock:
        mutex_unlock(&sdata->local->key_mtx);
 
        return ret;
index ab62fe4..7a47a58 100644 (file)
@@ -737,6 +737,8 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
 {
        struct mctp_route *tmp, *rt = NULL;
 
+       rcu_read_lock();
+
        list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
                /* TODO: add metrics */
                if (mctp_rt_match_eid(tmp, dnet, daddr)) {
@@ -747,21 +749,29 @@ struct mctp_route *mctp_route_lookup(struct net *net, unsigned int dnet,
                }
        }
 
+       rcu_read_unlock();
+
        return rt;
 }
 
 static struct mctp_route *mctp_route_lookup_null(struct net *net,
                                                 struct net_device *dev)
 {
-       struct mctp_route *rt;
+       struct mctp_route *tmp, *rt = NULL;
 
-       list_for_each_entry_rcu(rt, &net->mctp.routes, list) {
-               if (rt->dev->dev == dev && rt->type == RTN_LOCAL &&
-                   refcount_inc_not_zero(&rt->refs))
-                       return rt;
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(tmp, &net->mctp.routes, list) {
+               if (tmp->dev->dev == dev && tmp->type == RTN_LOCAL &&
+                   refcount_inc_not_zero(&tmp->refs)) {
+                       rt = tmp;
+                       break;
+               }
        }
 
-       return NULL;
+       rcu_read_unlock();
+
+       return rt;
 }
 
 static int mctp_do_fragment_route(struct mctp_route *rt, struct sk_buff *skb,
index c3b83cb..886ab68 100644 (file)
@@ -1298,7 +1298,7 @@ alloc_skb:
        if (copy == 0) {
                u64 snd_una = READ_ONCE(msk->snd_una);
 
-               if (snd_una != msk->snd_nxt) {
+               if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) {
                        tcp_remove_empty_skb(ssk);
                        return 0;
                }
@@ -1306,11 +1306,6 @@ alloc_skb:
                zero_window_probe = true;
                data_seq = snd_una - 1;
                copy = 1;
-
-               /* all mptcp-level data is acked, no skbs should be present into the
-                * ssk write queue
-                */
-               WARN_ON_ONCE(reuse_skb);
        }
 
        copy = min_t(size_t, copy, info->limit - info->sent);
@@ -1339,7 +1334,6 @@ alloc_skb:
        if (reuse_skb) {
                TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
                mpext->data_len += copy;
-               WARN_ON_ONCE(zero_window_probe);
                goto out;
        }
 
@@ -2354,6 +2348,26 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
 #define MPTCP_CF_PUSH          BIT(1)
 #define MPTCP_CF_FASTCLOSE     BIT(2)
 
+/* be sure to send a reset only if the caller asked for it, also
+ * clean completely the subflow status when the subflow reaches
+ * TCP_CLOSE state
+ */
+static void __mptcp_subflow_disconnect(struct sock *ssk,
+                                      struct mptcp_subflow_context *subflow,
+                                      unsigned int flags)
+{
+       if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
+           (flags & MPTCP_CF_FASTCLOSE)) {
+               /* The MPTCP code never wait on the subflow sockets, TCP-level
+                * disconnect should never fail
+                */
+               WARN_ON_ONCE(tcp_disconnect(ssk, 0));
+               mptcp_subflow_ctx_reset(subflow);
+       } else {
+               tcp_shutdown(ssk, SEND_SHUTDOWN);
+       }
+}
+
 /* subflow sockets can be either outgoing (connect) or incoming
  * (accept).
  *
@@ -2391,7 +2405,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
        lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
 
        if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) {
-               /* be sure to force the tcp_disconnect() path,
+               /* be sure to force the tcp_close path
                 * to generate the egress reset
                 */
                ssk->sk_lingertime = 0;
@@ -2401,11 +2415,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
 
        need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk);
        if (!dispose_it) {
-               /* The MPTCP code never wait on the subflow sockets, TCP-level
-                * disconnect should never fail
-                */
-               WARN_ON_ONCE(tcp_disconnect(ssk, 0));
-               mptcp_subflow_ctx_reset(subflow);
+               __mptcp_subflow_disconnect(ssk, subflow, flags);
                release_sock(ssk);
 
                goto out;
@@ -3098,12 +3108,6 @@ static int mptcp_disconnect(struct sock *sk, int flags)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
 
-       /* Deny disconnect if other threads are blocked in sk_wait_event()
-        * or inet_wait_for_connect().
-        */
-       if (sk->sk_wait_pending)
-               return -EBUSY;
-
        /* We are on the fastopen error path. We can't call straight into the
         * subflows cleanup code due to lock nesting (we are already under
         * msk->firstsocket lock).
@@ -3173,7 +3177,6 @@ struct sock *mptcp_sk_clone_init(const struct sock *sk,
                inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk);
 #endif
 
-       nsk->sk_wait_pending = 0;
        __mptcp_init_sock(nsk);
 
        msk = mptcp_sk(nsk);
index a72b6ae..29c6518 100644 (file)
@@ -3166,7 +3166,7 @@ int nft_expr_inner_parse(const struct nft_ctx *ctx, const struct nlattr *nla,
        if (err < 0)
                return err;
 
-       if (!tb[NFTA_EXPR_DATA])
+       if (!tb[NFTA_EXPR_DATA] || !tb[NFTA_EXPR_NAME])
                return -EINVAL;
 
        type = __nft_expr_type_get(ctx->family, tb[NFTA_EXPR_NAME]);
@@ -5556,7 +5556,6 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
        const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
        unsigned char *b = skb_tail_pointer(skb);
        struct nlattr *nest;
-       u64 timeout = 0;
 
        nest = nla_nest_start_noflag(skb, NFTA_LIST_ELEM);
        if (nest == NULL)
@@ -5592,15 +5591,11 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
                         htonl(*nft_set_ext_flags(ext))))
                goto nla_put_failure;
 
-       if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT)) {
-               timeout = *nft_set_ext_timeout(ext);
-               if (nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
-                                nf_jiffies64_to_msecs(timeout),
-                                NFTA_SET_ELEM_PAD))
-                       goto nla_put_failure;
-       } else if (set->flags & NFT_SET_TIMEOUT) {
-               timeout = READ_ONCE(set->timeout);
-       }
+       if (nft_set_ext_exists(ext, NFT_SET_EXT_TIMEOUT) &&
+           nla_put_be64(skb, NFTA_SET_ELEM_TIMEOUT,
+                        nf_jiffies64_to_msecs(*nft_set_ext_timeout(ext)),
+                        NFTA_SET_ELEM_PAD))
+               goto nla_put_failure;
 
        if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPIRATION)) {
                u64 expires, now = get_jiffies_64();
@@ -5615,9 +5610,6 @@ static int nf_tables_fill_setelem(struct sk_buff *skb,
                                 nf_jiffies64_to_msecs(expires),
                                 NFTA_SET_ELEM_PAD))
                        goto nla_put_failure;
-
-               if (reset)
-                       *nft_set_ext_expiration(ext) = now + timeout;
        }
 
        if (nft_set_ext_exists(ext, NFT_SET_EXT_USERDATA)) {
@@ -7615,6 +7607,16 @@ nla_put_failure:
        return -1;
 }
 
+static void audit_log_obj_reset(const struct nft_table *table,
+                               unsigned int base_seq, unsigned int nentries)
+{
+       char *buf = kasprintf(GFP_ATOMIC, "%s:%u", table->name, base_seq);
+
+       audit_log_nfcfg(buf, table->family, nentries,
+                       AUDIT_NFT_OP_OBJ_RESET, GFP_ATOMIC);
+       kfree(buf);
+}
+
 struct nft_obj_filter {
        char            *table;
        u32             type;
@@ -7629,8 +7631,10 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
        struct nftables_pernet *nft_net;
+       unsigned int entries = 0;
        struct nft_object *obj;
        bool reset = false;
+       int rc = 0;
 
        if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) == NFT_MSG_GETOBJ_RESET)
                reset = true;
@@ -7643,6 +7647,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
                if (family != NFPROTO_UNSPEC && family != table->family)
                        continue;
 
+               entries = 0;
                list_for_each_entry_rcu(obj, &table->objects, list) {
                        if (!nft_is_active(net, obj))
                                goto cont;
@@ -7658,34 +7663,27 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
                            filter->type != NFT_OBJECT_UNSPEC &&
                            obj->ops->type->type != filter->type)
                                goto cont;
-                       if (reset) {
-                               char *buf = kasprintf(GFP_ATOMIC,
-                                                     "%s:%u",
-                                                     table->name,
-                                                     nft_net->base_seq);
-
-                               audit_log_nfcfg(buf,
-                                               family,
-                                               obj->handle,
-                                               AUDIT_NFT_OP_OBJ_RESET,
-                                               GFP_ATOMIC);
-                               kfree(buf);
-                       }
 
-                       if (nf_tables_fill_obj_info(skb, net, NETLINK_CB(cb->skb).portid,
-                                                   cb->nlh->nlmsg_seq,
-                                                   NFT_MSG_NEWOBJ,
-                                                   NLM_F_MULTI | NLM_F_APPEND,
-                                                   table->family, table,
-                                                   obj, reset) < 0)
-                               goto done;
+                       rc = nf_tables_fill_obj_info(skb, net,
+                                                    NETLINK_CB(cb->skb).portid,
+                                                    cb->nlh->nlmsg_seq,
+                                                    NFT_MSG_NEWOBJ,
+                                                    NLM_F_MULTI | NLM_F_APPEND,
+                                                    table->family, table,
+                                                    obj, reset);
+                       if (rc < 0)
+                               break;
 
+                       entries++;
                        nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                        idx++;
                }
+               if (reset && entries)
+                       audit_log_obj_reset(table, nft_net->base_seq, entries);
+               if (rc < 0)
+                       break;
        }
-done:
        rcu_read_unlock();
 
        cb->args[0] = idx;
@@ -7790,7 +7788,7 @@ static int nf_tables_getobj(struct sk_buff *skb, const struct nfnl_info *info,
 
                audit_log_nfcfg(buf,
                                family,
-                               obj->handle,
+                               1,
                                AUDIT_NFT_OP_OBJ_RESET,
                                GFP_ATOMIC);
                kfree(buf);
index 53c9e76..f03f4d4 100644 (file)
@@ -698,8 +698,8 @@ nfulnl_log_packet(struct net *net,
        unsigned int plen = 0;
        struct nfnl_log_net *log = nfnl_log_pernet(net);
        const struct nfnl_ct_hook *nfnl_ct = NULL;
+       enum ip_conntrack_info ctinfo = 0;
        struct nf_conn *ct = NULL;
-       enum ip_conntrack_info ctinfo;
 
        if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
                li = li_user;
index 28e2873..928312d 100644 (file)
@@ -298,6 +298,7 @@ static int nft_inner_init(const struct nft_ctx *ctx,
        int err;
 
        if (!tb[NFTA_INNER_FLAGS] ||
+           !tb[NFTA_INNER_NUM] ||
            !tb[NFTA_INNER_HDRSIZE] ||
            !tb[NFTA_INNER_TYPE] ||
            !tb[NFTA_INNER_EXPR])
index 120f6d3..0a689c8 100644 (file)
@@ -179,7 +179,7 @@ void nft_payload_eval(const struct nft_expr *expr,
 
        switch (priv->base) {
        case NFT_PAYLOAD_LL_HEADER:
-               if (!skb_mac_header_was_set(skb))
+               if (!skb_mac_header_was_set(skb) || skb_mac_header_len(skb) == 0)
                        goto err;
 
                if (skb_vlan_tag_present(skb) &&
index 25a7559..2e164a3 100644 (file)
@@ -147,7 +147,7 @@ struct nft_pipapo_match {
        unsigned long * __percpu *scratch;
        size_t bsize_max;
        struct rcu_head rcu;
-       struct nft_pipapo_field f[];
+       struct nft_pipapo_field f[] __counted_by(field_count);
 };
 
 /**
index 2660cea..e34662f 100644 (file)
@@ -568,6 +568,8 @@ static void *nft_rbtree_deactivate(const struct net *net,
                                   nft_rbtree_interval_end(this)) {
                                parent = parent->rb_right;
                                continue;
+                       } else if (nft_set_elem_expired(&rbe->ext)) {
+                               break;
                        } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
                                parent = parent->rb_left;
                                continue;
index 6705bb8..1dac281 100644 (file)
@@ -203,17 +203,13 @@ static struct nfc_llcp_sock *nfc_llcp_sock_get(struct nfc_llcp_local *local,
 
                if (tmp_sock->ssap == ssap && tmp_sock->dsap == dsap) {
                        llcp_sock = tmp_sock;
+                       sock_hold(&llcp_sock->sk);
                        break;
                }
        }
 
        read_unlock(&local->sockets.lock);
 
-       if (llcp_sock == NULL)
-               return NULL;
-
-       sock_hold(&llcp_sock->sk);
-
        return llcp_sock;
 }
 
@@ -346,7 +342,8 @@ static int nfc_llcp_wks_sap(const char *service_name, size_t service_name_len)
 
 static
 struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
-                                           const u8 *sn, size_t sn_len)
+                                           const u8 *sn, size_t sn_len,
+                                           bool needref)
 {
        struct sock *sk;
        struct nfc_llcp_sock *llcp_sock, *tmp_sock;
@@ -382,6 +379,8 @@ struct nfc_llcp_sock *nfc_llcp_sock_from_sn(struct nfc_llcp_local *local,
 
                if (memcmp(sn, tmp_sock->service_name, sn_len) == 0) {
                        llcp_sock = tmp_sock;
+                       if (needref)
+                               sock_hold(&llcp_sock->sk);
                        break;
                }
        }
@@ -423,7 +422,8 @@ u8 nfc_llcp_get_sdp_ssap(struct nfc_llcp_local *local,
                 * to this service name.
                 */
                if (nfc_llcp_sock_from_sn(local, sock->service_name,
-                                         sock->service_name_len) != NULL) {
+                                         sock->service_name_len,
+                                         false) != NULL) {
                        mutex_unlock(&local->sdp_lock);
 
                        return LLCP_SAP_MAX;
@@ -824,16 +824,7 @@ out:
 static struct nfc_llcp_sock *nfc_llcp_sock_get_sn(struct nfc_llcp_local *local,
                                                  const u8 *sn, size_t sn_len)
 {
-       struct nfc_llcp_sock *llcp_sock;
-
-       llcp_sock = nfc_llcp_sock_from_sn(local, sn, sn_len);
-
-       if (llcp_sock == NULL)
-               return NULL;
-
-       sock_hold(&llcp_sock->sk);
-
-       return llcp_sock;
+       return nfc_llcp_sock_from_sn(local, sn, sn_len, true);
 }
 
 static const u8 *nfc_llcp_connect_sn(const struct sk_buff *skb, size_t *sn_len)
@@ -1298,7 +1289,8 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
                        }
 
                        llcp_sock = nfc_llcp_sock_from_sn(local, service_name,
-                                                         service_name_len);
+                                                         service_name_len,
+                                                         true);
                        if (!llcp_sock) {
                                sap = 0;
                                goto add_snl;
@@ -1318,6 +1310,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
 
                                if (sap == LLCP_SAP_MAX) {
                                        sap = 0;
+                                       nfc_llcp_sock_put(llcp_sock);
                                        goto add_snl;
                                }
 
@@ -1335,6 +1328,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
 
                        pr_debug("%p %d\n", llcp_sock, sap);
 
+                       nfc_llcp_sock_put(llcp_sock);
 add_snl:
                        sdp = nfc_llcp_build_sdres_tlv(tid, sap);
                        if (sdp == NULL)
index fff755d..6c9592d 100644 (file)
@@ -909,6 +909,11 @@ static int nci_activate_target(struct nfc_dev *nfc_dev,
                return -EINVAL;
        }
 
+       if (protocol >= NFC_PROTO_MAX) {
+               pr_err("the requested nfc protocol is invalid\n");
+               return -EINVAL;
+       }
+
        if (!(nci_target->supported_protocols & (1 << protocol))) {
                pr_err("target does not support the requested protocol 0x%x\n",
                       protocol);
index 0935527..b68150c 100644 (file)
@@ -151,6 +151,8 @@ static int send_acknowledge(struct nci_spi *nspi, u8 acknowledge)
        int ret;
 
        skb = nci_skb_alloc(nspi->ndev, 0, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
 
        /* add the NCI SPI header to the start of the buffer */
        hdr = skb_push(skb, NCI_SPI_HDR_LEN);
index 8f97648..a84e00b 100644 (file)
@@ -3607,7 +3607,12 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
        if (dev) {
                sll->sll_hatype = dev->type;
                sll->sll_halen = dev->addr_len;
-               memcpy(sll->sll_addr_flex, dev->dev_addr, dev->addr_len);
+
+               /* Let __fortify_memcpy_chk() know the actual buffer size. */
+               memcpy(((struct sockaddr_storage *)sll)->__data +
+                      offsetof(struct sockaddr_ll, sll_addr) -
+                      offsetofend(struct sockaddr_ll, sll_family),
+                      dev->dev_addr, dev->addr_len);
        } else {
                sll->sll_hatype = 0;    /* Bad: we have no ARPHRD_UNSPEC */
                sll->sll_halen = 0;
index 0863089..14cc8fe 100644 (file)
@@ -1180,7 +1180,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
        init_waitqueue_head(&data->read_wait);
 
        mutex_lock(&rfkill_global_mutex);
-       mutex_lock(&data->mtx);
        /*
         * start getting events from elsewhere but hold mtx to get
         * startup events added first
@@ -1192,10 +1191,11 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
                        goto free;
                rfkill_sync(rfkill);
                rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
+               mutex_lock(&data->mtx);
                list_add_tail(&ev->list, &data->events);
+               mutex_unlock(&data->mtx);
        }
        list_add(&data->list, &rfkill_fds);
-       mutex_unlock(&data->mtx);
        mutex_unlock(&rfkill_global_mutex);
 
        file->private_data = data;
@@ -1203,7 +1203,6 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
        return stream_open(inode, file);
 
  free:
-       mutex_unlock(&data->mtx);
        mutex_unlock(&rfkill_global_mutex);
        mutex_destroy(&data->mtx);
        list_for_each_entry_safe(ev, tmp, &data->events, list)
index e9d1b2f..5a81505 100644 (file)
@@ -108,13 +108,13 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
 
        rfkill->clk = devm_clk_get(&pdev->dev, NULL);
 
-       gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
+       gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_ASIS);
        if (IS_ERR(gpio))
                return PTR_ERR(gpio);
 
        rfkill->reset_gpio = gpio;
 
-       gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_OUT_LOW);
+       gpio = devm_gpiod_get_optional(&pdev->dev, "shutdown", GPIOD_ASIS);
        if (IS_ERR(gpio))
                return PTR_ERR(gpio);
 
index da4c179..6663e97 100644 (file)
@@ -366,7 +366,7 @@ static int u32_init(struct tcf_proto *tp)
        idr_init(&root_ht->handle_idr);
 
        if (tp_c == NULL) {
-               tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
+               tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
                if (tp_c == NULL) {
                        kfree(root_ht);
                        return -ENOBUFS;
index 3554085..880c5f1 100644 (file)
@@ -902,6 +902,14 @@ hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
        cl->cl_flags |= HFSC_USC;
 }
 
+static void
+hfsc_upgrade_rt(struct hfsc_class *cl)
+{
+       cl->cl_fsc = cl->cl_rsc;
+       rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
+       cl->cl_flags |= HFSC_FSC;
+}
+
 static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
        [TCA_HFSC_RSC]  = { .len = sizeof(struct tc_service_curve) },
        [TCA_HFSC_FSC]  = { .len = sizeof(struct tc_service_curve) },
@@ -1011,10 +1019,6 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
                if (parent == NULL)
                        return -ENOENT;
        }
-       if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
-               NL_SET_ERR_MSG(extack, "Invalid parent - parent class must have FSC");
-               return -EINVAL;
-       }
 
        if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
                return -EINVAL;
@@ -1065,6 +1069,12 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
        cl->cf_tree = RB_ROOT;
 
        sch_tree_lock(sch);
+       /* Check if the inner class is a misconfigured 'rt' */
+       if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
+               NL_SET_ERR_MSG(extack,
+                              "Forced curve change on parent 'rt' to 'sc'");
+               hfsc_upgrade_rt(parent);
+       }
        qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
        list_add_tail(&cl->siblings, &parent->children);
        if (parent->level == 0)
index 1ab3c5a..746be39 100644 (file)
@@ -2,6 +2,7 @@
 config SMC
        tristate "SMC socket protocol family"
        depends on INET && INFINIBAND
+       depends on m || ISM != m
        help
          SMC-R provides a "sockets over RDMA" solution making use of
          RDMA over Converged Ethernet (RoCE) technology to upgrade
index bacdd97..35ddeba 100644 (file)
@@ -1201,6 +1201,7 @@ static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
                (struct smc_clc_msg_accept_confirm_v2 *)aclc;
        struct smc_clc_first_contact_ext *fce =
                smc_get_clc_first_contact_ext(clc_v2, false);
+       struct net *net = sock_net(&smc->sk);
        int rc;
 
        if (!ini->first_contact_peer || aclc->hdr.version == SMC_V1)
@@ -1210,7 +1211,7 @@ static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
                memcpy(ini->smcrv2.nexthop_mac, &aclc->r0.lcl.mac, ETH_ALEN);
                ini->smcrv2.uses_gateway = false;
        } else {
-               if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr,
+               if (smc_ib_find_route(net, smc->clcsock->sk->sk_rcv_saddr,
                                      smc_ib_gid_to_ipv4(aclc->r0.lcl.gid),
                                      ini->smcrv2.nexthop_mac,
                                      &ini->smcrv2.uses_gateway))
@@ -2361,7 +2362,7 @@ static int smc_listen_find_device(struct smc_sock *new_smc,
                smc_find_ism_store_rc(rc, ini);
                return (!rc) ? 0 : ini->rc;
        }
-       return SMC_CLC_DECL_NOSMCDEV;
+       return prfx_rc;
 }
 
 /* listen worker: finish RDMA setup */
index 9b66d6a..89981db 100644 (file)
@@ -193,7 +193,7 @@ bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
        return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
 }
 
-int smc_ib_find_route(__be32 saddr, __be32 daddr,
+int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr,
                      u8 nexthop_mac[], u8 *uses_gateway)
 {
        struct neighbour *neigh = NULL;
@@ -205,7 +205,7 @@ int smc_ib_find_route(__be32 saddr, __be32 daddr,
 
        if (daddr == cpu_to_be32(INADDR_NONE))
                goto out;
-       rt = ip_route_output_flow(&init_net, &fl4, NULL);
+       rt = ip_route_output_flow(net, &fl4, NULL);
        if (IS_ERR(rt))
                goto out;
        if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET)
@@ -235,6 +235,7 @@ static int smc_ib_determine_gid_rcu(const struct net_device *ndev,
        if (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
            smc_ib_gid_to_ipv4((u8 *)&attr->gid) != cpu_to_be32(INADDR_NONE)) {
                struct in_device *in_dev = __in_dev_get_rcu(ndev);
+               struct net *net = dev_net(ndev);
                const struct in_ifaddr *ifa;
                bool subnet_match = false;
 
@@ -248,7 +249,7 @@ static int smc_ib_determine_gid_rcu(const struct net_device *ndev,
                }
                if (!subnet_match)
                        goto out;
-               if (smcrv2->daddr && smc_ib_find_route(smcrv2->saddr,
+               if (smcrv2->daddr && smc_ib_find_route(net, smcrv2->saddr,
                                                       smcrv2->daddr,
                                                       smcrv2->nexthop_mac,
                                                       &smcrv2->uses_gateway))
index 4df5f8c..ef8ac2b 100644 (file)
@@ -112,7 +112,7 @@ void smc_ib_sync_sg_for_device(struct smc_link *lnk,
 int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
                         unsigned short vlan_id, u8 gid[], u8 *sgid_index,
                         struct smc_init_info_smcrv2 *smcrv2);
-int smc_ib_find_route(__be32 saddr, __be32 daddr,
+int smc_ib_find_route(struct net *net, __be32 saddr, __be32 daddr,
                      u8 nexthop_mac[], u8 *uses_gateway);
 bool smc_ib_is_valid_local_systemid(void);
 int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb);
index aa89289..9d32058 100644 (file)
@@ -92,13 +92,14 @@ do { \
        typeof(_smc_stats) stats = (_smc_stats); \
        typeof(_tech) t = (_tech); \
        typeof(_len) l = (_len); \
-       int _pos = fls64((l) >> 13); \
+       int _pos; \
        typeof(_rc) r = (_rc); \
        int m = SMC_BUF_MAX - 1; \
        this_cpu_inc((*stats).smc[t].key ## _cnt); \
-       if (r <= 0) \
+       if (r <= 0 || l <= 0) \
                break; \
-       _pos = (_pos < m) ? ((l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
+       _pos = fls64((l - 1) >> 13); \
+       _pos = (_pos <= m) ? _pos : m; \
        this_cpu_inc((*stats).smc[t].key ## _pd.buf[_pos]); \
        this_cpu_add((*stats).smc[t].key ## _bytes, r); \
 } \
@@ -138,9 +139,12 @@ while (0)
 do { \
        typeof(_len) _l = (_len); \
        typeof(_tech) t = (_tech); \
-       int _pos = fls((_l) >> 13); \
+       int _pos; \
        int m = SMC_BUF_MAX - 1; \
-       _pos = (_pos < m) ? ((_l == 1 << (_pos + 12)) ? _pos - 1 : _pos) : m; \
+       if (_l <= 0) \
+               break; \
+       _pos = fls((_l - 1) >> 13); \
+       _pos = (_pos <= m) ? _pos : m; \
        this_cpu_inc((*(_smc_stats)).smc[t].k ## _rmbsize.buf[_pos]); \
 } \
 while (0)
index 02f583f..002483e 100644 (file)
@@ -139,8 +139,8 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx)
 
 int wait_on_pending_writer(struct sock *sk, long *timeo)
 {
-       int rc = 0;
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
+       int ret, rc = 0;
 
        add_wait_queue(sk_sleep(sk), &wait);
        while (1) {
@@ -154,9 +154,13 @@ int wait_on_pending_writer(struct sock *sk, long *timeo)
                        break;
                }
 
-               if (sk_wait_event(sk, timeo,
-                                 !READ_ONCE(sk->sk_write_pending), &wait))
+               ret = sk_wait_event(sk, timeo,
+                                   !READ_ONCE(sk->sk_write_pending), &wait);
+               if (ret) {
+                       if (ret < 0)
+                               rc = ret;
                        break;
+               }
        }
        remove_wait_queue(sk_sleep(sk), &wait);
        return rc;
index d1fc295..e9d1e83 100644 (file)
@@ -1291,6 +1291,7 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
+       int ret = 0;
        long timeo;
 
        timeo = sock_rcvtimeo(sk, nonblock);
@@ -1302,6 +1303,9 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
                if (sk->sk_err)
                        return sock_error(sk);
 
+               if (ret < 0)
+                       return ret;
+
                if (!skb_queue_empty(&sk->sk_receive_queue)) {
                        tls_strp_check_rcv(&ctx->strp);
                        if (tls_strp_msg_ready(ctx))
@@ -1320,10 +1324,10 @@ tls_rx_rec_wait(struct sock *sk, struct sk_psock *psock, bool nonblock,
                released = true;
                add_wait_queue(sk_sleep(sk), &wait);
                sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
-               sk_wait_event(sk, &timeo,
-                             tls_strp_msg_ready(ctx) ||
-                             !sk_psock_queue_empty(psock),
-                             &wait);
+               ret = sk_wait_event(sk, &timeo,
+                                   tls_strp_msg_ready(ctx) ||
+                                   !sk_psock_queue_empty(psock),
+                                   &wait);
                sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
                remove_wait_queue(sk_sleep(sk), &wait);
 
@@ -1852,6 +1856,7 @@ static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
                                 bool nonblock)
 {
        long timeo;
+       int ret;
 
        timeo = sock_rcvtimeo(sk, nonblock);
 
@@ -1861,14 +1866,16 @@ static int tls_rx_reader_acquire(struct sock *sk, struct tls_sw_context_rx *ctx,
                ctx->reader_contended = 1;
 
                add_wait_queue(&ctx->wq, &wait);
-               sk_wait_event(sk, &timeo,
-                             !READ_ONCE(ctx->reader_present), &wait);
+               ret = sk_wait_event(sk, &timeo,
+                                   !READ_ONCE(ctx->reader_present), &wait);
                remove_wait_queue(&ctx->wq, &wait);
 
                if (timeo <= 0)
                        return -EAGAIN;
                if (signal_pending(current))
                        return sock_intr_errno(timeo);
+               if (ret < 0)
+                       return ret;
        }
 
        WRITE_ONCE(ctx->reader_present, 1);
index 64e8616..acec41c 100644 (file)
@@ -1622,7 +1622,7 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work)
                list_add_tail(&work->entry, &rdev->wiphy_work_list);
        spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags);
 
-       schedule_work(&rdev->wiphy_work);
+       queue_work(system_unbound_wq, &rdev->wiphy_work);
 }
 EXPORT_SYMBOL_GPL(wiphy_work_queue);
 
index f890540..d2c2640 100644 (file)
@@ -34,6 +34,16 @@ struct xsk_queue *xskq_create(u32 nentries, bool umem_queue)
        q->ring_mask = nentries - 1;
 
        size = xskq_get_ring_size(q, umem_queue);
+
+       /* size which is overflowing or close to SIZE_MAX will become 0 in
+        * PAGE_ALIGN(), checking SIZE_MAX is enough due to the previous
+        * is_power_of_2(), the rest will be handled by vmalloc_user()
+        */
+       if (unlikely(size == SIZE_MAX)) {
+               kfree(q);
+               return NULL;
+       }
+
        size = PAGE_ALIGN(size);
 
        q->ring = vmalloc_user(size);
index b864740..e21cc71 100644 (file)
@@ -380,8 +380,8 @@ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
        skb->dev = dev;
 
        if (err) {
-               dev->stats.rx_errors++;
-               dev->stats.rx_dropped++;
+               DEV_STATS_INC(dev, rx_errors);
+               DEV_STATS_INC(dev, rx_dropped);
 
                return 0;
        }
@@ -426,7 +426,6 @@ static int
 xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
 {
        struct xfrm_if *xi = netdev_priv(dev);
-       struct net_device_stats *stats = &xi->dev->stats;
        struct dst_entry *dst = skb_dst(skb);
        unsigned int length = skb->len;
        struct net_device *tdev;
@@ -473,7 +472,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        tdev = dst->dev;
 
        if (tdev == dev) {
-               stats->collisions++;
+               DEV_STATS_INC(dev, collisions);
                net_warn_ratelimited("%s: Local routing loop detected!\n",
                                     dev->name);
                goto tx_err_dst_release;
@@ -512,13 +511,13 @@ xmit:
        if (net_xmit_eval(err) == 0) {
                dev_sw_netstats_tx_add(dev, 1, length);
        } else {
-               stats->tx_errors++;
-               stats->tx_aborted_errors++;
+               DEV_STATS_INC(dev, tx_errors);
+               DEV_STATS_INC(dev, tx_aborted_errors);
        }
 
        return 0;
 tx_err_link_failure:
-       stats->tx_carrier_errors++;
+       DEV_STATS_INC(dev, tx_carrier_errors);
        dst_link_failure(skb);
 tx_err_dst_release:
        dst_release(dst);
@@ -528,7 +527,6 @@ tx_err_dst_release:
 static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xfrm_if *xi = netdev_priv(dev);
-       struct net_device_stats *stats = &xi->dev->stats;
        struct dst_entry *dst = skb_dst(skb);
        struct flowi fl;
        int ret;
@@ -545,7 +543,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
                        dst = ip6_route_output(dev_net(dev), NULL, &fl.u.ip6);
                        if (dst->error) {
                                dst_release(dst);
-                               stats->tx_carrier_errors++;
+                               DEV_STATS_INC(dev, tx_carrier_errors);
                                goto tx_err;
                        }
                        skb_dst_set(skb, dst);
@@ -561,7 +559,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
                        fl.u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
                        rt = __ip_route_output_key(dev_net(dev), &fl.u.ip4);
                        if (IS_ERR(rt)) {
-                               stats->tx_carrier_errors++;
+                               DEV_STATS_INC(dev, tx_carrier_errors);
                                goto tx_err;
                        }
                        skb_dst_set(skb, &rt->dst);
@@ -580,8 +578,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
        return NETDEV_TX_OK;
 
 tx_err:
-       stats->tx_errors++;
-       stats->tx_dropped++;
+       DEV_STATS_INC(dev, tx_errors);
+       DEV_STATS_INC(dev, tx_dropped);
        kfree_skb(skb);
        return NETDEV_TX_OK;
 }
index d6b4057..d24b4d4 100644 (file)
@@ -851,7 +851,7 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
                struct hlist_node *newpos = NULL;
                bool matches_s, matches_d;
 
-               if (!policy->bydst_reinsert)
+               if (policy->walk.dead || !policy->bydst_reinsert)
                        continue;
 
                WARN_ON_ONCE(policy->family != family);
@@ -1256,8 +1256,11 @@ static void xfrm_hash_rebuild(struct work_struct *work)
                struct xfrm_pol_inexact_bin *bin;
                u8 dbits, sbits;
 
+               if (policy->walk.dead)
+                       continue;
+
                dir = xfrm_policy_id2dir(policy->index);
-               if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
+               if (dir >= XFRM_POLICY_MAX)
                        continue;
 
                if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
@@ -1372,8 +1375,6 @@ EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
  * of an absolute inpredictability of ordering of rules. This will not pass. */
 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
 {
-       static u32 idx_generator;
-
        for (;;) {
                struct hlist_head *list;
                struct xfrm_policy *p;
@@ -1381,8 +1382,8 @@ static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
                int found;
 
                if (!index) {
-                       idx = (idx_generator | dir);
-                       idx_generator += 8;
+                       idx = (net->xfrm.idx_generator | dir);
+                       net->xfrm.idx_generator += 8;
                } else {
                        idx = index;
                        index = 0;
@@ -1823,9 +1824,11 @@ int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
 
 again:
        list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
+               if (pol->walk.dead)
+                       continue;
+
                dir = xfrm_policy_id2dir(pol->index);
-               if (pol->walk.dead ||
-                   dir >= XFRM_POLICY_MAX ||
+               if (dir >= XFRM_POLICY_MAX ||
                    pol->type != type)
                        continue;
 
@@ -1862,9 +1865,11 @@ int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
 
 again:
        list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
+               if (pol->walk.dead)
+                       continue;
+
                dir = xfrm_policy_id2dir(pol->index);
-               if (pol->walk.dead ||
-                   dir >= XFRM_POLICY_MAX ||
+               if (dir >= XFRM_POLICY_MAX ||
                    pol->xdo.dev != dev)
                        continue;
 
@@ -3215,7 +3220,7 @@ no_transform:
        }
 
        for (i = 0; i < num_pols; i++)
-               pols[i]->curlft.use_time = ktime_get_real_seconds();
+               WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
 
        if (num_xfrms < 0) {
                /* Prohibit the flow */
index c6fc50d..85fb5c2 100644 (file)
@@ -44,13 +44,12 @@ static const struct trusted_key_source trusted_key_sources[] = {
 #endif
 };
 
-DEFINE_STATIC_CALL_NULL(trusted_key_init, *trusted_key_sources[0].ops->init);
 DEFINE_STATIC_CALL_NULL(trusted_key_seal, *trusted_key_sources[0].ops->seal);
 DEFINE_STATIC_CALL_NULL(trusted_key_unseal,
                        *trusted_key_sources[0].ops->unseal);
 DEFINE_STATIC_CALL_NULL(trusted_key_get_random,
                        *trusted_key_sources[0].ops->get_random);
-DEFINE_STATIC_CALL_NULL(trusted_key_exit, *trusted_key_sources[0].ops->exit);
+static void (*trusted_key_exit)(void);
 static unsigned char migratable;
 
 enum {
@@ -359,19 +358,16 @@ static int __init init_trusted(void)
                if (!get_random)
                        get_random = kernel_get_random;
 
-               static_call_update(trusted_key_init,
-                                  trusted_key_sources[i].ops->init);
                static_call_update(trusted_key_seal,
                                   trusted_key_sources[i].ops->seal);
                static_call_update(trusted_key_unseal,
                                   trusted_key_sources[i].ops->unseal);
                static_call_update(trusted_key_get_random,
                                   get_random);
-               static_call_update(trusted_key_exit,
-                                  trusted_key_sources[i].ops->exit);
+               trusted_key_exit = trusted_key_sources[i].ops->exit;
                migratable = trusted_key_sources[i].ops->migratable;
 
-               ret = static_call(trusted_key_init)();
+               ret = trusted_key_sources[i].ops->init();
                if (!ret)
                        break;
        }
@@ -388,7 +384,8 @@ static int __init init_trusted(void)
 
 static void __exit cleanup_trusted(void)
 {
-       static_call_cond(trusted_key_exit)();
+       if (trusted_key_exit)
+               (*trusted_key_exit)();
 }
 
 late_initcall(init_trusted);
index f9b7735..c6031f7 100644 (file)
@@ -185,10 +185,14 @@ static int cs35l41_request_firmware_files_spkid(struct cs35l41_hda *cs35l41,
                                            cs35l41->speaker_id, "wmfw");
        if (!ret) {
                /* try cirrus/part-dspN-fwtype-sub<-spkidN><-ampname>.bin */
-               return cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
-                                                    CS35L41_FIRMWARE_ROOT,
-                                                    cs35l41->acpi_subsystem_id, cs35l41->amp_name,
-                                                    cs35l41->speaker_id, "bin");
+               ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+                                                   CS35L41_FIRMWARE_ROOT,
+                                                   cs35l41->acpi_subsystem_id, cs35l41->amp_name,
+                                                   cs35l41->speaker_id, "bin");
+               if (ret)
+                       goto coeff_err;
+
+               return 0;
        }
 
        /* try cirrus/part-dspN-fwtype-sub<-ampname>.wmfw */
@@ -197,10 +201,14 @@ static int cs35l41_request_firmware_files_spkid(struct cs35l41_hda *cs35l41,
                                            cs35l41->amp_name, -1, "wmfw");
        if (!ret) {
                /* try cirrus/part-dspN-fwtype-sub<-spkidN><-ampname>.bin */
-               return cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
-                                                    CS35L41_FIRMWARE_ROOT,
-                                                    cs35l41->acpi_subsystem_id, cs35l41->amp_name,
-                                                    cs35l41->speaker_id, "bin");
+               ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+                                                   CS35L41_FIRMWARE_ROOT,
+                                                   cs35l41->acpi_subsystem_id, cs35l41->amp_name,
+                                                   cs35l41->speaker_id, "bin");
+               if (ret)
+                       goto coeff_err;
+
+               return 0;
        }
 
        /* try cirrus/part-dspN-fwtype-sub<-spkidN>.wmfw */
@@ -215,10 +223,14 @@ static int cs35l41_request_firmware_files_spkid(struct cs35l41_hda *cs35l41,
                                                    cs35l41->amp_name, cs35l41->speaker_id, "bin");
                if (ret)
                        /* try cirrus/part-dspN-fwtype-sub<-spkidN>.bin */
-                       return cs35l41_request_firmware_file(cs35l41, coeff_firmware,
-                                                            coeff_filename, CS35L41_FIRMWARE_ROOT,
-                                                            cs35l41->acpi_subsystem_id, NULL,
-                                                            cs35l41->speaker_id, "bin");
+                       ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware,
+                                                           coeff_filename, CS35L41_FIRMWARE_ROOT,
+                                                           cs35l41->acpi_subsystem_id, NULL,
+                                                           cs35l41->speaker_id, "bin");
+               if (ret)
+                       goto coeff_err;
+
+               return 0;
        }
 
        /* try cirrus/part-dspN-fwtype-sub.wmfw */
@@ -233,12 +245,50 @@ static int cs35l41_request_firmware_files_spkid(struct cs35l41_hda *cs35l41,
                                                    cs35l41->speaker_id, "bin");
                if (ret)
                        /* try cirrus/part-dspN-fwtype-sub<-spkidN>.bin */
-                       return cs35l41_request_firmware_file(cs35l41, coeff_firmware,
-                                                            coeff_filename, CS35L41_FIRMWARE_ROOT,
-                                                            cs35l41->acpi_subsystem_id, NULL,
-                                                            cs35l41->speaker_id, "bin");
+                       ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware,
+                                                           coeff_filename, CS35L41_FIRMWARE_ROOT,
+                                                           cs35l41->acpi_subsystem_id, NULL,
+                                                           cs35l41->speaker_id, "bin");
+               if (ret)
+                       goto coeff_err;
+       }
+
+       return ret;
+coeff_err:
+       release_firmware(*wmfw_firmware);
+       kfree(*wmfw_filename);
+       return ret;
+}
+
+static int cs35l41_fallback_firmware_file(struct cs35l41_hda *cs35l41,
+                                         const struct firmware **wmfw_firmware,
+                                         char **wmfw_filename,
+                                         const struct firmware **coeff_firmware,
+                                         char **coeff_filename)
+{
+       int ret;
+
+       /* Handle fallback */
+       dev_warn(cs35l41->dev, "Falling back to default firmware.\n");
+
+       /* fallback try cirrus/part-dspN-fwtype.wmfw */
+       ret = cs35l41_request_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
+                                           CS35L41_FIRMWARE_ROOT, NULL, NULL, -1, "wmfw");
+       if (ret)
+               goto err;
+
+       /* fallback try cirrus/part-dspN-fwtype.bin */
+       ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
+                                           CS35L41_FIRMWARE_ROOT, NULL, NULL, -1, "bin");
+       if (ret) {
+               release_firmware(*wmfw_firmware);
+               kfree(*wmfw_filename);
+               goto err;
        }
+       return 0;
 
+err:
+       dev_warn(cs35l41->dev, "Unable to find firmware and tuning\n");
        return ret;
 }
 
@@ -254,7 +304,6 @@ static int cs35l41_request_firmware_files(struct cs35l41_hda *cs35l41,
                ret = cs35l41_request_firmware_files_spkid(cs35l41, wmfw_firmware, wmfw_filename,
                                                           coeff_firmware, coeff_filename);
                goto out;
-
        }
 
        /* try cirrus/part-dspN-fwtype-sub<-ampname>.wmfw */
@@ -267,6 +316,9 @@ static int cs35l41_request_firmware_files(struct cs35l41_hda *cs35l41,
                                                    CS35L41_FIRMWARE_ROOT,
                                                    cs35l41->acpi_subsystem_id, cs35l41->amp_name,
                                                    -1, "bin");
+               if (ret)
+                       goto coeff_err;
+
                goto out;
        }
 
@@ -286,32 +338,23 @@ static int cs35l41_request_firmware_files(struct cs35l41_hda *cs35l41,
                                                            CS35L41_FIRMWARE_ROOT,
                                                            cs35l41->acpi_subsystem_id, NULL, -1,
                                                            "bin");
+               if (ret)
+                       goto coeff_err;
        }
 
 out:
-       if (!ret)
-               return 0;
+       if (ret)
+               /* if all attempts at finding firmware fail, try fallback */
+               goto fallback;
 
-       /* Handle fallback */
-       dev_warn(cs35l41->dev, "Falling back to default firmware.\n");
+       return 0;
 
+coeff_err:
        release_firmware(*wmfw_firmware);
        kfree(*wmfw_filename);
-
-       /* fallback try cirrus/part-dspN-fwtype.wmfw */
-       ret = cs35l41_request_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
-                                           CS35L41_FIRMWARE_ROOT, NULL, NULL, -1, "wmfw");
-       if (!ret)
-               /* fallback try cirrus/part-dspN-fwtype.bin */
-               ret = cs35l41_request_firmware_file(cs35l41, coeff_firmware, coeff_filename,
-                                                   CS35L41_FIRMWARE_ROOT, NULL, NULL, -1, "bin");
-
-       if (ret) {
-               release_firmware(*wmfw_firmware);
-               kfree(*wmfw_filename);
-               dev_warn(cs35l41->dev, "Unable to find firmware and tuning\n");
-       }
-       return ret;
+fallback:
+       return cs35l41_fallback_firmware_file(cs35l41, wmfw_firmware, wmfw_filename,
+                                             coeff_firmware, coeff_filename);
 }
 
 #if IS_ENABLED(CONFIG_EFI)
index 751783f..9677c09 100644 (file)
@@ -7078,6 +7078,24 @@ static void alc287_fixup_bind_dacs(struct hda_codec *codec,
                                        0x0); /* Make sure 0x14 was disable */
        }
 }
+/* Fix none verb table of Headset Mic pin */
+static void alc_fixup_headset_mic(struct hda_codec *codec,
+                                  const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+       static const struct hda_pintbl pincfgs[] = {
+               { 0x19, 0x03a1103c },
+               { }
+       };
+
+       switch (action) {
+       case HDA_FIXUP_ACT_PRE_PROBE:
+               snd_hda_apply_pincfgs(codec, pincfgs);
+               alc_update_coef_idx(codec, 0x45, 0xf<<12 | 1<<10, 5<<12);
+               spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
+               break;
+       }
+}
 
 
 enum {
@@ -7343,6 +7361,8 @@ enum {
        ALC245_FIXUP_HP_MUTE_LED_COEFBIT,
        ALC245_FIXUP_HP_X360_MUTE_LEDS,
        ALC287_FIXUP_THINKPAD_I2S_SPK,
+       ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
+       ALC2XX_FIXUP_HEADSET_MIC,
 };
 
 /* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -9441,6 +9461,16 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc287_fixup_bind_dacs,
        },
+       [ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc287_fixup_bind_dacs,
+               .chained = true,
+               .chain_id = ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI,
+       },
+       [ALC2XX_FIXUP_HEADSET_MIC] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_headset_mic,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -9715,6 +9745,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x89d3, "HP EliteBook 645 G9 (MB 89D2)", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
        SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
        SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED),
@@ -9784,6 +9815,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+       SND_PCI_QUIRK(0x1043, 0x1663, "ASUS GU603ZV", ALC285_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
@@ -9854,7 +9886,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
-       SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK),
+       SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
@@ -9988,14 +10020,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
        SND_PCI_QUIRK(0x17aa, 0x22c1, "Thinkpad P1 Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
        SND_PCI_QUIRK(0x17aa, 0x22c2, "Thinkpad X1 Extreme Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
-       SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
-       SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
-       SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
-       SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
-       SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
-       SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
-       SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
-       SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_CS35L41_I2C_2_THINKPAD_ACPI),
+       SND_PCI_QUIRK(0x17aa, 0x22f1, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+       SND_PCI_QUIRK(0x17aa, 0x22f2, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+       SND_PCI_QUIRK(0x17aa, 0x22f3, "Thinkpad", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+       SND_PCI_QUIRK(0x17aa, 0x2316, "Thinkpad P1 Gen 6", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+       SND_PCI_QUIRK(0x17aa, 0x2317, "Thinkpad P1 Gen 6", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+       SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+       SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+       SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
@@ -10091,7 +10123,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
        SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
        SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
-       SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC225_FIXUP_HEADSET_JACK),
+       SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC295_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
 
 #if 0
@@ -10743,6 +10775,8 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
        SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
                {0x19, 0x40000000},
                {0x1a, 0x40000000}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC2XX_FIXUP_HEADSET_MIC,
+               {0x19, 0x40000000}),
        {}
 };
 
index 94e9eb8..15a864d 100644 (file)
@@ -244,6 +244,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
        {
                .driver_data = &acp6x_card,
                .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "82YM"),
+               }
+       },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "UM5302TA"),
                }
index f2e7c6d..f905978 100644 (file)
@@ -706,7 +706,7 @@ static void cs35l56_patch(struct cs35l56_private *cs35l56)
 
        mutex_lock(&cs35l56->base.irq_lock);
 
-       init_completion(&cs35l56->init_completion);
+       reinit_completion(&cs35l56->init_completion);
 
        cs35l56->soft_resetting = true;
        cs35l56_system_reset(&cs35l56->base, !!cs35l56->sdw_peripheral);
@@ -1186,6 +1186,12 @@ post_soft_reset:
        /* Registers could be dirty after soft reset or SoundWire enumeration */
        regcache_sync(cs35l56->base.regmap);
 
+       /* Set ASP1 DOUT to high-impedance when it is not transmitting audio data. */
+       ret = regmap_set_bits(cs35l56->base.regmap, CS35L56_ASP1_CONTROL3,
+                             CS35L56_ASP1_DOUT_HIZ_CTRL_MASK);
+       if (ret)
+               return dev_err_probe(cs35l56->base.dev, ret, "Failed to write ASP1_CONTROL3\n");
+
        cs35l56->base.init_done = true;
        complete(&cs35l56->init_completion);
 
index 974bae4..94a66a3 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/acpi.h>
 #include <linux/device.h>
+#include <linux/gpio/consumer.h>
 #include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/mod_devicetable.h>
index 92e37bc..9f5f1a9 100644 (file)
@@ -34,7 +34,7 @@ static const unsigned int cs42l43_accdet_db_ms[] = {
 static const unsigned int cs42l43_accdet_ramp_ms[] = { 10, 40, 90, 170 };
 
 static const unsigned int cs42l43_accdet_bias_sense[] = {
-       14, 23, 41, 50, 60, 68, 86, 95, 0,
+       14, 24, 43, 52, 61, 71, 90, 99, 0,
 };
 
 static int cs42l43_find_index(struct cs42l43_codec *priv, const char * const prop,
index 581b334..3bbe850 100644 (file)
@@ -59,9 +59,6 @@ static void da7219_aad_btn_det_work(struct work_struct *work)
        bool micbias_up = false;
        int retries = 0;
 
-       /* Disable ground switch */
-       snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
-
        /* Drive headphones/lineout */
        snd_soc_component_update_bits(component, DA7219_HP_L_CTRL,
                            DA7219_HP_L_AMP_OE_MASK,
@@ -155,9 +152,6 @@ static void da7219_aad_hptest_work(struct work_struct *work)
                tonegen_freq_hptest = cpu_to_le16(DA7219_AAD_HPTEST_RAMP_FREQ_INT_OSC);
        }
 
-       /* Disable ground switch */
-       snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
-
        /* Ensure gain ramping at fastest rate */
        gain_ramp_ctrl = snd_soc_component_read(component, DA7219_GAIN_RAMP_CTRL);
        snd_soc_component_write(component, DA7219_GAIN_RAMP_CTRL, DA7219_GAIN_RAMP_RATE_X8);
@@ -421,6 +415,11 @@ static irqreturn_t da7219_aad_irq_thread(int irq, void *data)
                         * handle a removal, and we can check at the end of
                         * hptest if we have a valid result or not.
                         */
+
+                       cancel_delayed_work_sync(&da7219_aad->jack_det_work);
+                       /* Disable ground switch */
+                       snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
+
                        if (statusa & DA7219_JACK_TYPE_STS_MASK) {
                                report |= SND_JACK_HEADSET;
                                mask |= SND_JACK_HEADSET | SND_JACK_LINEOUT;
index 13689e7..09eef60 100644 (file)
@@ -531,7 +531,10 @@ static int hdmi_codec_fill_codec_params(struct snd_soc_dai *dai,
        hp->sample_rate = sample_rate;
        hp->channels = channels;
 
-       hcp->chmap_idx = idx;
+       if (pcm_audio)
+               hcp->chmap_idx = ca_id;
+       else
+               hcp->chmap_idx = HDMI_CODEC_CHMAP_IDX_UNKNOWN;
 
        return 0;
 }
index ec6859e..fff4a8b 100644 (file)
@@ -1675,12 +1675,12 @@ static int wsa_macro_spk_boost_event(struct snd_soc_dapm_widget *w,
        u16 boost_path_ctl, boost_path_cfg1;
        u16 reg, reg_mix;
 
-       if (!strcmp(w->name, "WSA_RX INT0 CHAIN")) {
+       if (!snd_soc_dapm_widget_name_cmp(w, "WSA_RX INT0 CHAIN")) {
                boost_path_ctl = CDC_WSA_BOOST0_BOOST_PATH_CTL;
                boost_path_cfg1 = CDC_WSA_RX0_RX_PATH_CFG1;
                reg = CDC_WSA_RX0_RX_PATH_CTL;
                reg_mix = CDC_WSA_RX0_RX_PATH_MIX_CTL;
-       } else if (!strcmp(w->name, "WSA_RX INT1 CHAIN")) {
+       } else if (!snd_soc_dapm_widget_name_cmp(w, "WSA_RX INT1 CHAIN")) {
                boost_path_ctl = CDC_WSA_BOOST1_BOOST_PATH_CTL;
                boost_path_cfg1 = CDC_WSA_RX1_RX_PATH_CFG1;
                reg = CDC_WSA_RX1_RX_PATH_CTL;
index 1a137ca..7938b52 100644 (file)
@@ -3257,6 +3257,8 @@ int rt5645_set_jack_detect(struct snd_soc_component *component,
                                RT5645_GP1_PIN_IRQ, RT5645_GP1_PIN_IRQ);
                regmap_update_bits(rt5645->regmap, RT5645_GEN_CTRL1,
                                RT5645_DIG_GATE_CTRL, RT5645_DIG_GATE_CTRL);
+               regmap_update_bits(rt5645->regmap, RT5645_DEPOP_M1,
+                               RT5645_HP_CB_MASK, RT5645_HP_CB_PU);
        }
        rt5645_irq(0, rt5645);
 
index b05b4f7..fbad1ed 100644 (file)
@@ -157,11 +157,6 @@ static int rt5682_i2c_probe(struct i2c_client *i2c)
                return ret;
        }
 
-       ret = devm_add_action_or_reset(&i2c->dev, rt5682_i2c_disable_regulators,
-                                      rt5682);
-       if (ret)
-               return ret;
-
        ret = regulator_bulk_enable(ARRAY_SIZE(rt5682->supplies),
                                    rt5682->supplies);
        if (ret) {
@@ -169,6 +164,11 @@ static int rt5682_i2c_probe(struct i2c_client *i2c)
                return ret;
        }
 
+       ret = devm_add_action_or_reset(&i2c->dev, rt5682_i2c_disable_regulators,
+                                      rt5682);
+       if (ret)
+               return ret;
+
        ret = rt5682_get_ldo1(rt5682, &i2c->dev);
        if (ret)
                return ret;
index 86bd6c1..41076be 100644 (file)
@@ -39,7 +39,7 @@ static void tas2780_reset(struct tas2780_priv *tas2780)
                usleep_range(2000, 2050);
        }
 
-       snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
+       ret = snd_soc_component_write(tas2780->component, TAS2780_SW_RST,
                                TAS2780_RST);
        if (ret)
                dev_err(tas2780->dev, "%s:errCode:0x%x Reset error!\n",
index b976c19..420bbf5 100644 (file)
 #define ADC3XXX_BYPASS_RPGA            0x80
 
 /* MICBIAS control bits */
-#define ADC3XXX_MICBIAS_MASK           0x2
+#define ADC3XXX_MICBIAS_MASK           0x3
 #define ADC3XXX_MICBIAS1_SHIFT         5
 #define ADC3XXX_MICBIAS2_SHIFT         3
 
@@ -1099,7 +1099,7 @@ static int adc3xxx_parse_dt_micbias(struct adc3xxx *adc3xxx,
        unsigned int val;
 
        if (!of_property_read_u32(np, propname, &val)) {
-               if (val >= ADC3XXX_MICBIAS_AVDD) {
+               if (val > ADC3XXX_MICBIAS_AVDD) {
                        dev_err(dev, "Invalid property value for '%s'\n", propname);
                        return -EINVAL;
                }
index 6951120..a1f0401 100644 (file)
@@ -1278,7 +1278,31 @@ static int wcd9380_probe(struct sdw_slave *pdev,
        pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
 
-       return component_add(dev, &wcd938x_sdw_component_ops);
+       ret = component_add(dev, &wcd938x_sdw_component_ops);
+       if (ret)
+               goto err_disable_rpm;
+
+       return 0;
+
+err_disable_rpm:
+       pm_runtime_disable(dev);
+       pm_runtime_set_suspended(dev);
+       pm_runtime_dont_use_autosuspend(dev);
+
+       return ret;
+}
+
+static int wcd9380_remove(struct sdw_slave *pdev)
+{
+       struct device *dev = &pdev->dev;
+
+       component_del(dev, &wcd938x_sdw_component_ops);
+
+       pm_runtime_disable(dev);
+       pm_runtime_set_suspended(dev);
+       pm_runtime_dont_use_autosuspend(dev);
+
+       return 0;
 }
 
 static const struct sdw_device_id wcd9380_slave_id[] = {
@@ -1320,6 +1344,7 @@ static const struct dev_pm_ops wcd938x_sdw_pm_ops = {
 
 static struct sdw_driver wcd9380_codec_driver = {
        .probe  = wcd9380_probe,
+       .remove = wcd9380_remove,
        .ops = &wcd9380_slave_ops,
        .id_table = wcd9380_slave_id,
        .driver = {
index a3c6806..d27b919 100644 (file)
@@ -3325,8 +3325,10 @@ static int wcd938x_populate_dt_data(struct wcd938x_priv *wcd938x, struct device
                return dev_err_probe(dev, ret, "Failed to get supplies\n");
 
        ret = regulator_bulk_enable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
-       if (ret)
+       if (ret) {
+               regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
                return dev_err_probe(dev, ret, "Failed to enable supplies\n");
+       }
 
        wcd938x_dt_parse_micbias_info(dev, wcd938x);
 
@@ -3435,7 +3437,8 @@ static int wcd938x_bind(struct device *dev)
        wcd938x->rxdev = wcd938x_sdw_device_get(wcd938x->rxnode);
        if (!wcd938x->rxdev) {
                dev_err(dev, "could not find slave with matching of node\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_unbind;
        }
        wcd938x->sdw_priv[AIF1_PB] = dev_get_drvdata(wcd938x->rxdev);
        wcd938x->sdw_priv[AIF1_PB]->wcd938x = wcd938x;
@@ -3443,46 +3446,47 @@ static int wcd938x_bind(struct device *dev)
        wcd938x->txdev = wcd938x_sdw_device_get(wcd938x->txnode);
        if (!wcd938x->txdev) {
                dev_err(dev, "could not find txslave with matching of node\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_put_rxdev;
        }
        wcd938x->sdw_priv[AIF1_CAP] = dev_get_drvdata(wcd938x->txdev);
        wcd938x->sdw_priv[AIF1_CAP]->wcd938x = wcd938x;
        wcd938x->tx_sdw_dev = dev_to_sdw_dev(wcd938x->txdev);
-       if (!wcd938x->tx_sdw_dev) {
-               dev_err(dev, "could not get txslave with matching of dev\n");
-               return -EINVAL;
-       }
 
        /* As TX is main CSR reg interface, which should not be suspended first.
         * expicilty add the dependency link */
        if (!device_link_add(wcd938x->rxdev, wcd938x->txdev, DL_FLAG_STATELESS |
                            DL_FLAG_PM_RUNTIME)) {
                dev_err(dev, "could not devlink tx and rx\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_put_txdev;
        }
 
        if (!device_link_add(dev, wcd938x->txdev, DL_FLAG_STATELESS |
                                        DL_FLAG_PM_RUNTIME)) {
                dev_err(dev, "could not devlink wcd and tx\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_remove_rxtx_link;
        }
 
        if (!device_link_add(dev, wcd938x->rxdev, DL_FLAG_STATELESS |
                                        DL_FLAG_PM_RUNTIME)) {
                dev_err(dev, "could not devlink wcd and rx\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_remove_tx_link;
        }
 
        wcd938x->regmap = dev_get_regmap(&wcd938x->tx_sdw_dev->dev, NULL);
        if (!wcd938x->regmap) {
                dev_err(dev, "could not get TX device regmap\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto err_remove_rx_link;
        }
 
        ret = wcd938x_irq_init(wcd938x, dev);
        if (ret) {
                dev_err(dev, "%s: IRQ init failed: %d\n", __func__, ret);
-               return ret;
+               goto err_remove_rx_link;
        }
 
        wcd938x->sdw_priv[AIF1_PB]->slave_irq = wcd938x->virq;
@@ -3491,27 +3495,45 @@ static int wcd938x_bind(struct device *dev)
        ret = wcd938x_set_micbias_data(wcd938x);
        if (ret < 0) {
                dev_err(dev, "%s: bad micbias pdata\n", __func__);
-               return ret;
+               goto err_remove_rx_link;
        }
 
        ret = snd_soc_register_component(dev, &soc_codec_dev_wcd938x,
                                         wcd938x_dais, ARRAY_SIZE(wcd938x_dais));
-       if (ret)
+       if (ret) {
                dev_err(dev, "%s: Codec registration failed\n",
                                __func__);
+               goto err_remove_rx_link;
+       }
 
-       return ret;
+       return 0;
 
+err_remove_rx_link:
+       device_link_remove(dev, wcd938x->rxdev);
+err_remove_tx_link:
+       device_link_remove(dev, wcd938x->txdev);
+err_remove_rxtx_link:
+       device_link_remove(wcd938x->rxdev, wcd938x->txdev);
+err_put_txdev:
+       put_device(wcd938x->txdev);
+err_put_rxdev:
+       put_device(wcd938x->rxdev);
+err_unbind:
+       component_unbind_all(dev, wcd938x);
+
+       return ret;
 }
 
 static void wcd938x_unbind(struct device *dev)
 {
        struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
 
+       snd_soc_unregister_component(dev);
        device_link_remove(dev, wcd938x->txdev);
        device_link_remove(dev, wcd938x->rxdev);
        device_link_remove(wcd938x->rxdev, wcd938x->txdev);
-       snd_soc_unregister_component(dev);
+       put_device(wcd938x->txdev);
+       put_device(wcd938x->rxdev);
        component_unbind_all(dev, wcd938x);
 }
 
@@ -3572,13 +3594,13 @@ static int wcd938x_probe(struct platform_device *pdev)
 
        ret = wcd938x_add_slave_components(wcd938x, dev, &match);
        if (ret)
-               return ret;
+               goto err_disable_regulators;
 
        wcd938x_reset(wcd938x);
 
        ret = component_master_add_with_match(dev, &wcd938x_comp_ops, match);
        if (ret)
-               return ret;
+               goto err_disable_regulators;
 
        pm_runtime_set_autosuspend_delay(dev, 1000);
        pm_runtime_use_autosuspend(dev);
@@ -3588,11 +3610,27 @@ static int wcd938x_probe(struct platform_device *pdev)
        pm_runtime_idle(dev);
 
        return 0;
+
+err_disable_regulators:
+       regulator_bulk_disable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+       regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+
+       return ret;
 }
 
 static void wcd938x_remove(struct platform_device *pdev)
 {
-       component_master_del(&pdev->dev, &wcd938x_comp_ops);
+       struct device *dev = &pdev->dev;
+       struct wcd938x_priv *wcd938x = dev_get_drvdata(dev);
+
+       component_master_del(dev, &wcd938x_comp_ops);
+
+       pm_runtime_disable(dev);
+       pm_runtime_set_suspended(dev);
+       pm_runtime_dont_use_autosuspend(dev);
+
+       regulator_bulk_disable(WCD938X_MAX_SUPPLY, wcd938x->supplies);
+       regulator_bulk_free(WCD938X_MAX_SUPPLY, wcd938x->supplies);
 }
 
 #if defined(CONFIG_OF)
index 22c0041..9ea4be5 100644 (file)
@@ -917,7 +917,7 @@ static int jh7110_i2stx0_clk_cfg(struct i2s_clk_config_data *config)
 
 static int dw_i2s_probe(struct platform_device *pdev)
 {
-       const struct i2s_platform_data *pdata = of_device_get_match_data(&pdev->dev);
+       const struct i2s_platform_data *pdata = pdev->dev.platform_data;
        struct dw_i2s_dev *dev;
        struct resource *res;
        int ret, irq;
index 76b5bfc..bab7d34 100644 (file)
@@ -52,8 +52,8 @@ struct codec_priv {
        unsigned long mclk_freq;
        unsigned long free_freq;
        u32 mclk_id;
-       u32 fll_id;
-       u32 pll_id;
+       int fll_id;
+       int pll_id;
 };
 
 /**
@@ -206,7 +206,7 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
        }
 
        /* Specific configuration for PLL */
-       if (codec_priv->pll_id && codec_priv->fll_id) {
+       if (codec_priv->pll_id >= 0 && codec_priv->fll_id >= 0) {
                if (priv->sample_format == SNDRV_PCM_FORMAT_S24_LE)
                        pll_out = priv->sample_rate * 384;
                else
@@ -248,7 +248,7 @@ static int fsl_asoc_card_hw_free(struct snd_pcm_substream *substream)
 
        priv->streams &= ~BIT(substream->stream);
 
-       if (!priv->streams && codec_priv->pll_id && codec_priv->fll_id) {
+       if (!priv->streams && codec_priv->pll_id >= 0 && codec_priv->fll_id >= 0) {
                /* Force freq to be free_freq to avoid error message in codec */
                ret = snd_soc_dai_set_sysclk(asoc_rtd_to_codec(rtd, 0),
                                             codec_priv->mclk_id,
@@ -621,6 +621,10 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
        priv->card.dapm_routes = audio_map;
        priv->card.num_dapm_routes = ARRAY_SIZE(audio_map);
        priv->card.driver_name = DRIVER_NAME;
+
+       priv->codec_priv.fll_id = -1;
+       priv->codec_priv.pll_id = -1;
+
        /* Diversify the card configurations */
        if (of_device_is_compatible(np, "fsl,imx-audio-cs42888")) {
                codec_dai_name = "cs42888";
index 1e4020f..8a9a30d 100644 (file)
@@ -710,10 +710,15 @@ static void fsl_sai_config_disable(struct fsl_sai *sai, int dir)
 {
        unsigned int ofs = sai->soc_data->reg_offset;
        bool tx = dir == TX;
-       u32 xcsr, count = 100;
+       u32 xcsr, count = 100, mask;
+
+       if (sai->soc_data->mclk_with_tere && sai->mclk_direction_output)
+               mask = FSL_SAI_CSR_TERE;
+       else
+               mask = FSL_SAI_CSR_TERE | FSL_SAI_CSR_BCE;
 
        regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx, ofs),
-                          FSL_SAI_CSR_TERE | FSL_SAI_CSR_BCE, 0);
+                          mask, 0);
 
        /* TERE will remain set till the end of current frame */
        do {
index 5b18a4a..2588ec7 100644 (file)
@@ -310,7 +310,8 @@ int asoc_simple_startup(struct snd_pcm_substream *substream)
                if (fixed_sysclk % props->mclk_fs) {
                        dev_err(rtd->dev, "fixed sysclk %u not divisible by mclk_fs %u\n",
                                fixed_sysclk, props->mclk_fs);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto codec_err;
                }
                ret = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_RATE,
                        fixed_rate, fixed_rate);
index 190f113..274417e 100644 (file)
@@ -759,10 +759,12 @@ static int asoc_simple_probe(struct platform_device *pdev)
                struct snd_soc_dai_link *dai_link = priv->dai_link;
                struct simple_dai_props *dai_props = priv->dai_props;
 
+               ret = -EINVAL;
+
                cinfo = dev->platform_data;
                if (!cinfo) {
                        dev_err(dev, "no info for asoc-simple-card\n");
-                       return -EINVAL;
+                       goto err;
                }
 
                if (!cinfo->name ||
@@ -771,7 +773,7 @@ static int asoc_simple_probe(struct platform_device *pdev)
                    !cinfo->platform ||
                    !cinfo->cpu_dai.name) {
                        dev_err(dev, "insufficient asoc_simple_card_info settings\n");
-                       return -EINVAL;
+                       goto err;
                }
 
                cpus                    = dai_link->cpus;
index f8a3e8a..9904a9e 100644 (file)
@@ -808,6 +808,16 @@ static const struct platform_device_id board_ids[] = {
                                        SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK |
                                        SOF_ES8336_JD_INVERTED),
        },
+       {
+               .name = "mtl_es83x6_c1_h02",
+               .driver_data = (kernel_ulong_t)(SOF_ES8336_SSP_CODEC(1) |
+                                       SOF_NO_OF_HDMI_CAPTURE_SSP(2) |
+                                       SOF_HDMI_CAPTURE_1_SSP(0) |
+                                       SOF_HDMI_CAPTURE_2_SSP(2) |
+                                       SOF_SSP_HDMI_CAPTURE_PRESENT |
+                                       SOF_ES8336_SPEAKERS_EN_GPIO1_QUIRK |
+                                       SOF_ES8336_JD_INVERTED),
+       },
        { }
 };
 MODULE_DEVICE_TABLE(platform, board_ids);
index 5a1c750..8426495 100644 (file)
@@ -380,6 +380,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
                .callback = sof_sdw_quirk_cb,
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B14"),
+               },
+               /* No Jack */
+               .driver_data = (void *)SOF_SDW_TGL_HDMI,
+       },
+
+       {
+               .callback = sof_sdw_quirk_cb,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
                        DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0B29"),
                },
                .driver_data = (void *)(SOF_SDW_TGL_HDMI |
index 8e995ed..5103e75 100644 (file)
@@ -656,18 +656,18 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_sdw_machines[] = {
                .sof_tplg_filename = "sof-adl-rt1316-l2-mono-rt714-l3.tplg",
        },
        {
-               .link_mask = 0x3, /* rt1316 on link1 & rt714 on link0 */
-               .links = adl_sdw_rt1316_link1_rt714_link0,
-               .drv_name = "sof_sdw",
-               .sof_tplg_filename = "sof-adl-rt1316-l1-mono-rt714-l0.tplg",
-       },
-       {
                .link_mask = 0x7, /* rt714 on link0 & two rt1316s on link1 and link2 */
                .links = adl_sdw_rt1316_link12_rt714_link0,
                .drv_name = "sof_sdw",
                .sof_tplg_filename = "sof-adl-rt1316-l12-rt714-l0.tplg",
        },
        {
+               .link_mask = 0x3, /* rt1316 on link1 & rt714 on link0 */
+               .links = adl_sdw_rt1316_link1_rt714_link0,
+               .drv_name = "sof_sdw",
+               .sof_tplg_filename = "sof-adl-rt1316-l1-mono-rt714-l0.tplg",
+       },
+       {
                .link_mask = 0x5, /* 2 active links required */
                .links = adl_sdw_rt1316_link2_rt714_link0,
                .drv_name = "sof_sdw",
index 0304246..92498d1 100644 (file)
@@ -30,6 +30,16 @@ static const struct snd_soc_acpi_codecs mtl_rt5682_rt5682s_hp = {
        .codecs = {"10EC5682", "RTL5682"},
 };
 
+static const struct snd_soc_acpi_codecs mtl_essx_83x6 = {
+       .num_codecs = 3,
+       .codecs = { "ESSX8316", "ESSX8326", "ESSX8336"},
+};
+
+static const struct snd_soc_acpi_codecs mtl_lt6911_hdmi = {
+       .num_codecs = 1,
+       .codecs = {"INTC10B0"}
+};
+
 struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[] = {
        {
                .comp_ids = &mtl_rt5682_rt5682s_hp,
@@ -52,6 +62,21 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_mtl_machines[] = {
                .quirk_data = &mtl_rt1019p_amp,
                .sof_tplg_filename = "sof-mtl-rt1019-rt5682.tplg",
        },
+       {
+               .comp_ids = &mtl_essx_83x6,
+               .drv_name = "mtl_es83x6_c1_h02",
+               .machine_quirk = snd_soc_acpi_codec_list,
+               .quirk_data = &mtl_lt6911_hdmi,
+               .sof_tplg_filename = "sof-mtl-es83x6-ssp1-hdmi-ssp02.tplg",
+       },
+       {
+               .comp_ids = &mtl_essx_83x6,
+               .drv_name = "sof-essx8336",
+               .sof_tplg_filename = "sof-mtl-es8336", /* the tplg suffix is added at run time */
+               .tplg_quirk_mask = SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER |
+                                       SND_SOC_ACPI_TPLG_INTEL_SSP_MSB |
+                                       SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER,
+       },
        {},
 };
 EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_mtl_machines);
index b70034c..b8a3cb8 100644 (file)
@@ -773,7 +773,7 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
                if (IS_ERR(priv->extclk)) {
                        ret = PTR_ERR(priv->extclk);
                        if (ret == -EPROBE_DEFER)
-                               return ret;
+                               goto err_priv;
 
                        priv->extclk = NULL;
                }
index ba7c0ae..566033f 100644 (file)
@@ -242,6 +242,7 @@ int snd_soc_component_notify_control(struct snd_soc_component *component,
        char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
        struct snd_kcontrol *kctl;
 
+       /* When updating, change also snd_soc_dapm_widget_name_cmp() */
        if (component->name_prefix)
                snprintf(name, ARRAY_SIZE(name), "%s %s", component->name_prefix, ctl);
        else
index cc442c5..9de98c0 100644 (file)
@@ -1347,7 +1347,7 @@ static int soc_init_pcm_runtime(struct snd_soc_card *card,
        snd_soc_runtime_get_dai_fmt(rtd);
        ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
        if (ret)
-               return ret;
+               goto err;
 
        /* add DPCM sysfs entries */
        soc_dpcm_debugfs_add(rtd);
@@ -1372,17 +1372,26 @@ static int soc_init_pcm_runtime(struct snd_soc_card *card,
        /* create compress_device if possible */
        ret = snd_soc_dai_compress_new(cpu_dai, rtd, num);
        if (ret != -ENOTSUPP)
-               return ret;
+               goto err;
 
        /* create the pcm */
        ret = soc_new_pcm(rtd, num);
        if (ret < 0) {
                dev_err(card->dev, "ASoC: can't create pcm %s :%d\n",
                        dai_link->stream_name, ret);
-               return ret;
+               goto err;
        }
 
-       return snd_soc_pcm_dai_new(rtd);
+       ret = snd_soc_pcm_dai_new(rtd);
+       if (ret < 0)
+               goto err;
+
+       rtd->initialized = true;
+
+       return 0;
+err:
+       snd_soc_link_exit(rtd);
+       return ret;
 }
 
 static void soc_set_name_prefix(struct snd_soc_card *card,
@@ -1445,8 +1454,8 @@ static int soc_probe_component(struct snd_soc_card *card,
        if (component->card) {
                if (component->card != card) {
                        dev_err(component->dev,
-                               "Trying to bind component to card \"%s\" but is already bound to card \"%s\"\n",
-                               card->name, component->card->name);
+                               "Trying to bind component \"%s\" to card \"%s\" but is already bound to card \"%s\"\n",
+                               component->name, card->name, component->card->name);
                        return -ENODEV;
                }
                return 0;
@@ -1980,7 +1989,8 @@ static void soc_cleanup_card_resources(struct snd_soc_card *card)
 
        /* release machine specific resources */
        for_each_card_rtds(card, rtd)
-               snd_soc_link_exit(rtd);
+               if (rtd->initialized)
+                       snd_soc_link_exit(rtd);
        /* remove and free each DAI */
        soc_remove_link_dais(card);
        soc_remove_link_components(card);
index f07e836..312e555 100644 (file)
@@ -2728,6 +2728,18 @@ int snd_soc_dapm_update_dai(struct snd_pcm_substream *substream,
 }
 EXPORT_SYMBOL_GPL(snd_soc_dapm_update_dai);
 
+int snd_soc_dapm_widget_name_cmp(struct snd_soc_dapm_widget *widget, const char *s)
+{
+       struct snd_soc_component *component = snd_soc_dapm_to_component(widget->dapm);
+       const char *wname = widget->name;
+
+       if (component->name_prefix)
+               wname += strlen(component->name_prefix) + 1; /* plus space */
+
+       return strcmp(wname, s);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_widget_name_cmp);
+
 /*
  * dapm_update_widget_flags() - Re-compute widget sink and source flags
  * @w: The widget for which to update the flags
index d0653d7..cad222e 100644 (file)
@@ -44,8 +44,8 @@ static struct device *dmaengine_dma_dev(struct dmaengine_pcm *pcm,
  * platforms which make use of the snd_dmaengine_dai_dma_data struct for their
  * DAI DMA data. Internally the function will first call
  * snd_hwparams_to_dma_slave_config to fill in the slave config based on the
- * hw_params, followed by snd_dmaengine_set_config_from_dai_data to fill in the
- * remaining fields based on the DAI DMA data.
+ * hw_params, followed by snd_dmaengine_pcm_set_config_from_dai_data to fill in
+ * the remaining fields based on the DAI DMA data.
  */
 int snd_dmaengine_pcm_prepare_slave_config(struct snd_pcm_substream *substream,
        struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
index 9935e45..a7ae76e 100644 (file)
@@ -35,7 +35,6 @@ static const struct sof_amd_acp_desc rembrandt_chip_info = {
        .dsp_intr_base  = ACP6X_DSP_SW_INTR_BASE,
        .sram_pte_offset = ACP6X_SRAM_PTE_OFFSET,
        .hw_semaphore_offset = ACP6X_AXI2DAGB_SEM_0,
-       .acp_clkmux_sel = ACP6X_CLKMUX_SEL,
        .fusion_dsp_offset = ACP6X_DSP_FUSION_RUNSTALL,
        .probe_reg_offset = ACP6X_FUTURE_REG_ACLK_0,
 };
index 3719433..666057d 100644 (file)
@@ -336,8 +336,8 @@ static void cx81801_hangup(struct tty_struct *tty)
 }
 
 /* Line discipline .receive_buf() */
-static void cx81801_receive(struct tty_struct *tty, const u8 *cp,
-               const char *fp, int count)
+static void cx81801_receive(struct tty_struct *tty, const u8 *cp, const u8 *fp,
+                           size_t count)
 {
        struct snd_soc_component *component = tty->disc_data;
        const unsigned char *c;
index 985b1ae..409fc11 100644 (file)
@@ -1204,6 +1204,13 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
                        cval->res = 16;
                }
                break;
+       case USB_ID(0x1bcf, 0x2283): /* NexiGo N930AF FHD Webcam */
+               if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
+                       usb_audio_info(chip,
+                               "set resolution quirk: cval->res = 16\n");
+                       cval->res = 16;
+               }
+               break;
        }
 }
 
index 598659d..4e64842 100644 (file)
@@ -1994,7 +1994,11 @@ void snd_usb_audioformat_attributes_quirk(struct snd_usb_audio *chip,
                /* mic works only when ep packet size is set to wMaxPacketSize */
                fp->attributes |= UAC_EP_CS_ATTR_FILL_MAX;
                break;
-
+       case USB_ID(0x3511, 0x2b1e): /* Opencomm2 UC USB Bluetooth dongle */
+               /* mic works only when ep pitch control is not set */
+               if (stream == SNDRV_PCM_STREAM_CAPTURE)
+                       fp->attributes &= ~UAC_EP_CS_ATTR_PITCH_CONTROL;
+               break;
        }
 }
 
@@ -2173,6 +2177,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
                   QUIRK_FLAG_FIXED_RATE),
        DEVICE_FLG(0x0ecb, 0x2069, /* JBL Quantum810 Wireless */
                   QUIRK_FLAG_FIXED_RATE),
+       DEVICE_FLG(0x1bcf, 0x2283, /* NexiGo N930AF FHD Webcam */
+                  QUIRK_FLAG_GET_SAMPLE_RATE),
 
        /* Vendor matches */
        VENDOR_FLG(0x045e, /* MS Lifecam */
index 4798f9d..9de35df 100644 (file)
@@ -26,6 +26,6 @@
 #ifndef __NR_setns
 #define __NR_setns 346
 #endif
-#ifdef __NR_seccomp
+#ifndef __NR_seccomp
 #define __NR_seccomp 354
 #endif
index 27f5e7d..264eeb9 100644 (file)
@@ -1171,12 +1171,79 @@ static int process_ip_string(FILE *f, char *ip_string, int type)
        return 0;
 }
 
+/*
+ * Only IPv4 subnet strings needs to be converted to plen
+ * For IPv6 the subnet is already privided in plen format
+ */
+static int kvp_subnet_to_plen(char *subnet_addr_str)
+{
+       int plen = 0;
+       struct in_addr subnet_addr4;
+
+       /*
+        * Convert subnet address to binary representation
+        */
+       if (inet_pton(AF_INET, subnet_addr_str, &subnet_addr4) == 1) {
+               uint32_t subnet_mask = ntohl(subnet_addr4.s_addr);
+
+               while (subnet_mask & 0x80000000) {
+                       plen++;
+                       subnet_mask <<= 1;
+               }
+       } else {
+               return -1;
+       }
+
+       return plen;
+}
+
+static int process_ip_string_nm(FILE *f, char *ip_string, char *subnet,
+                               int is_ipv6)
+{
+       char addr[INET6_ADDRSTRLEN];
+       char subnet_addr[INET6_ADDRSTRLEN];
+       int error, i = 0;
+       int ip_offset = 0, subnet_offset = 0;
+       int plen;
+
+       memset(addr, 0, sizeof(addr));
+       memset(subnet_addr, 0, sizeof(subnet_addr));
+
+       while (parse_ip_val_buffer(ip_string, &ip_offset, addr,
+                                  (MAX_IP_ADDR_SIZE * 2)) &&
+                                  parse_ip_val_buffer(subnet,
+                                                      &subnet_offset,
+                                                      subnet_addr,
+                                                      (MAX_IP_ADDR_SIZE *
+                                                       2))) {
+               if (!is_ipv6)
+                       plen = kvp_subnet_to_plen((char *)subnet_addr);
+               else
+                       plen = atoi(subnet_addr);
+
+               if (plen < 0)
+                       return plen;
+
+               error = fprintf(f, "address%d=%s/%d\n", ++i, (char *)addr,
+                               plen);
+               if (error < 0)
+                       return error;
+
+               memset(addr, 0, sizeof(addr));
+               memset(subnet_addr, 0, sizeof(subnet_addr));
+       }
+
+       return 0;
+}
+
 static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
 {
        int error = 0;
-       char if_file[PATH_MAX];
-       FILE *file;
+       char if_filename[PATH_MAX];
+       char nm_filename[PATH_MAX];
+       FILE *ifcfg_file, *nmfile;
        char cmd[PATH_MAX];
+       int is_ipv6 = 0;
        char *mac_addr;
        int str_len;
 
@@ -1197,7 +1264,7 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
         * in a given distro to configure the interface and so are free
         * ignore information that may not be relevant.
         *
-        * Here is the format of the ip configuration file:
+        * Here is the ifcfg format of the ip configuration file:
         *
         * HWADDR=macaddr
         * DEVICE=interface name
@@ -1220,6 +1287,32 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
         * tagged as IPV6_DEFAULTGW and IPV6 NETMASK will be tagged as
         * IPV6NETMASK.
         *
+        * Here is the keyfile format of the ip configuration file:
+        *
+        * [ethernet]
+        * mac-address=macaddr
+        * [connection]
+        * interface-name=interface name
+        *
+        * [ipv4]
+        * method=<protocol> (where <protocol> is "auto" if DHCP is configured
+        *                       or "manual" if no boot-time protocol should be used)
+        *
+        * address1=ipaddr1/plen
+        * address2=ipaddr2/plen
+        *
+        * gateway=gateway1;gateway2
+        *
+        * dns=dns1;dns2
+        *
+        * [ipv6]
+        * address1=ipaddr1/plen
+        * address2=ipaddr2/plen
+        *
+        * gateway=gateway1;gateway2
+        *
+        * dns=dns1;dns2
+        *
         * The host can specify multiple ipv4 and ipv6 addresses to be
         * configured for the interface. Furthermore, the configuration
         * needs to be persistent. A subsequent GET call on the interface
@@ -1227,14 +1320,29 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
         * call.
         */
 
-       snprintf(if_file, sizeof(if_file), "%s%s%s", KVP_CONFIG_LOC,
-               "/ifcfg-", if_name);
+       /*
+        * We are populating both ifcfg and nmconnection files
+        */
+       snprintf(if_filename, sizeof(if_filename), "%s%s%s", KVP_CONFIG_LOC,
+                "/ifcfg-", if_name);
 
-       file = fopen(if_file, "w");
+       ifcfg_file = fopen(if_filename, "w");
 
-       if (file == NULL) {
+       if (!ifcfg_file) {
                syslog(LOG_ERR, "Failed to open config file; error: %d %s",
-                               errno, strerror(errno));
+                      errno, strerror(errno));
+               return HV_E_FAIL;
+       }
+
+       snprintf(nm_filename, sizeof(nm_filename), "%s%s%s%s", KVP_CONFIG_LOC,
+                "/", if_name, ".nmconnection");
+
+       nmfile = fopen(nm_filename, "w");
+
+       if (!nmfile) {
+               syslog(LOG_ERR, "Failed to open config file; error: %d %s",
+                      errno, strerror(errno));
+               fclose(ifcfg_file);
                return HV_E_FAIL;
        }
 
@@ -1248,14 +1356,31 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
                goto setval_error;
        }
 
-       error = kvp_write_file(file, "HWADDR", "", mac_addr);
-       free(mac_addr);
+       error = kvp_write_file(ifcfg_file, "HWADDR", "", mac_addr);
+       if (error < 0)
+               goto setmac_error;
+
+       error = kvp_write_file(ifcfg_file, "DEVICE", "", if_name);
+       if (error < 0)
+               goto setmac_error;
+
+       error = fprintf(nmfile, "\n[connection]\n");
+       if (error < 0)
+               goto setmac_error;
+
+       error = kvp_write_file(nmfile, "interface-name", "", if_name);
        if (error)
-               goto setval_error;
+               goto setmac_error;
 
-       error = kvp_write_file(file, "DEVICE", "", if_name);
+       error = fprintf(nmfile, "\n[ethernet]\n");
+       if (error < 0)
+               goto setmac_error;
+
+       error = kvp_write_file(nmfile, "mac-address", "", mac_addr);
        if (error)
-               goto setval_error;
+               goto setmac_error;
+
+       free(mac_addr);
 
        /*
         * The dhcp_enabled flag is only for IPv4. In the case the host only
@@ -1263,47 +1388,91 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
         * proceed to parse and pass the IPv6 information to the
         * disto-specific script hv_set_ifconfig.
         */
+
+       /*
+        * First populate the ifcfg file format
+        */
        if (new_val->dhcp_enabled) {
-               error = kvp_write_file(file, "BOOTPROTO", "", "dhcp");
+               error = kvp_write_file(ifcfg_file, "BOOTPROTO", "", "dhcp");
                if (error)
                        goto setval_error;
-
        } else {
-               error = kvp_write_file(file, "BOOTPROTO", "", "none");
+               error = kvp_write_file(ifcfg_file, "BOOTPROTO", "", "none");
                if (error)
                        goto setval_error;
        }
 
-       /*
-        * Write the configuration for ipaddress, netmask, gateway and
-        * name servers.
-        */
-
-       error = process_ip_string(file, (char *)new_val->ip_addr, IPADDR);
+       error = process_ip_string(ifcfg_file, (char *)new_val->ip_addr,
+                                 IPADDR);
        if (error)
                goto setval_error;
 
-       error = process_ip_string(file, (char *)new_val->sub_net, NETMASK);
+       error = process_ip_string(ifcfg_file, (char *)new_val->sub_net,
+                                 NETMASK);
        if (error)
                goto setval_error;
 
-       error = process_ip_string(file, (char *)new_val->gate_way, GATEWAY);
+       error = process_ip_string(ifcfg_file, (char *)new_val->gate_way,
+                                 GATEWAY);
        if (error)
                goto setval_error;
 
-       error = process_ip_string(file, (char *)new_val->dns_addr, DNS);
+       error = process_ip_string(ifcfg_file, (char *)new_val->dns_addr, DNS);
        if (error)
                goto setval_error;
 
-       fclose(file);
+       if (new_val->addr_family == ADDR_FAMILY_IPV6) {
+               error = fprintf(nmfile, "\n[ipv6]\n");
+               if (error < 0)
+                       goto setval_error;
+               is_ipv6 = 1;
+       } else {
+               error = fprintf(nmfile, "\n[ipv4]\n");
+               if (error < 0)
+                       goto setval_error;
+       }
+
+       /*
+        * Now we populate the keyfile format
+        */
+
+       if (new_val->dhcp_enabled) {
+               error = kvp_write_file(nmfile, "method", "", "auto");
+               if (error < 0)
+                       goto setval_error;
+       } else {
+               error = kvp_write_file(nmfile, "method", "", "manual");
+               if (error < 0)
+                       goto setval_error;
+       }
+
+       /*
+        * Write the configuration for ipaddress, netmask, gateway and
+        * name services
+        */
+       error = process_ip_string_nm(nmfile, (char *)new_val->ip_addr,
+                                    (char *)new_val->sub_net, is_ipv6);
+       if (error < 0)
+               goto setval_error;
+
+       error = fprintf(nmfile, "gateway=%s\n", (char *)new_val->gate_way);
+       if (error < 0)
+               goto setval_error;
+
+       error = fprintf(nmfile, "dns=%s\n", (char *)new_val->dns_addr);
+       if (error < 0)
+               goto setval_error;
+
+       fclose(nmfile);
+       fclose(ifcfg_file);
 
        /*
         * Now that we have populated the configuration file,
         * invoke the external script to do its magic.
         */
 
-       str_len = snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s",
-                          "hv_set_ifconfig", if_file);
+       str_len = snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s %s",
+                          "hv_set_ifconfig", if_filename, nm_filename);
        /*
         * This is a little overcautious, but it's necessary to suppress some
         * false warnings from gcc 8.0.1.
@@ -1316,14 +1485,16 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
 
        if (system(cmd)) {
                syslog(LOG_ERR, "Failed to execute cmd '%s'; error: %d %s",
-                               cmd, errno, strerror(errno));
+                      cmd, errno, strerror(errno));
                return HV_E_FAIL;
        }
        return 0;
-
+setmac_error:
+       free(mac_addr);
 setval_error:
        syslog(LOG_ERR, "Failed to write config file");
-       fclose(file);
+       fclose(ifcfg_file);
+       fclose(nmfile);
        return error;
 }
 
index d10fe35..ae5a7a8 100755 (executable)
 #
 # This example script is based on a RHEL environment.
 #
-# Here is the format of the ip configuration file:
+# Here is the ifcfg format of the ip configuration file:
 #
 # HWADDR=macaddr
 # DEVICE=interface name
 # BOOTPROTO=<protocol> (where <protocol> is "dhcp" if DHCP is configured
-#                       or "none" if no boot-time protocol should be used)
+#                      or "none" if no boot-time protocol should be used)
 #
 # IPADDR0=ipaddr1
 # IPADDR1=ipaddr2
 # tagged as IPV6_DEFAULTGW and IPV6 NETMASK will be tagged as
 # IPV6NETMASK.
 #
+# Here is the keyfile format of the ip configuration file:
+#
+# [ethernet]
+# mac-address=macaddr
+# [connection]
+# interface-name=interface name
+#
+# [ipv4]
+# method=<protocol> (where <protocol> is "auto" if DHCP is configured
+#                       or "manual" if no boot-time protocol should be used)
+#
+# address1=ipaddr1/plen
+# address=ipaddr2/plen
+#
+# gateway=gateway1;gateway2
+#
+# dns=dns1;
+#
+# [ipv6]
+# address1=ipaddr1/plen
+# address2=ipaddr1/plen
+#
+# gateway=gateway1;gateway2
+#
+# dns=dns1;dns2
+#
 # The host can specify multiple ipv4 and ipv6 addresses to be
 # configured for the interface. Furthermore, the configuration
 # needs to be persistent. A subsequent GET call on the interface
 # call.
 #
 
-
-
 echo "IPV6INIT=yes" >> $1
 echo "NM_CONTROLLED=no" >> $1
 echo "PEERDNS=yes" >> $1
 echo "ONBOOT=yes" >> $1
 
-
 cp $1 /etc/sysconfig/network-scripts/
 
+chmod 600 $2
+interface=$(echo $2 | awk -F - '{ print $2 }')
+filename="${2##*/}"
+
+sed '/\[connection\]/a autoconnect=true' $2 > /etc/NetworkManager/system-connections/${filename}
 
-interface=$(echo $1 | awk -F - '{ print $2 }')
 
 /sbin/ifdown $interface 2>/dev/null
 /sbin/ifup $interface 2>/dev/null
index 3a8d849..2cb2518 100644 (file)
 static const char * const devlink_op_strmap[] = {
        [3] = "get",
        [7] = "port-get",
-       [DEVLINK_CMD_SB_GET] = "sb-get",
-       [DEVLINK_CMD_SB_POOL_GET] = "sb-pool-get",
-       [DEVLINK_CMD_SB_PORT_POOL_GET] = "sb-port-pool-get",
-       [DEVLINK_CMD_SB_TC_POOL_BIND_GET] = "sb-tc-pool-bind-get",
+       [13] = "sb-get",
+       [17] = "sb-pool-get",
+       [21] = "sb-port-pool-get",
+       [25] = "sb-tc-pool-bind-get",
        [DEVLINK_CMD_PARAM_GET] = "param-get",
        [DEVLINK_CMD_REGION_GET] = "region-get",
        [DEVLINK_CMD_INFO_GET] = "info-get",
        [DEVLINK_CMD_HEALTH_REPORTER_GET] = "health-reporter-get",
-       [DEVLINK_CMD_TRAP_GET] = "trap-get",
-       [DEVLINK_CMD_TRAP_GROUP_GET] = "trap-group-get",
-       [DEVLINK_CMD_TRAP_POLICER_GET] = "trap-policer-get",
-       [DEVLINK_CMD_RATE_GET] = "rate-get",
-       [DEVLINK_CMD_LINECARD_GET] = "linecard-get",
+       [63] = "trap-get",
+       [67] = "trap-group-get",
+       [71] = "trap-policer-get",
+       [76] = "rate-get",
+       [80] = "linecard-get",
        [DEVLINK_CMD_SELFTESTS_GET] = "selftests-get",
 };
 
@@ -838,7 +838,7 @@ devlink_sb_get(struct ynl_sock *ys, struct devlink_sb_get_req *req)
        rsp = calloc(1, sizeof(*rsp));
        yrs.yarg.data = rsp;
        yrs.cb = devlink_sb_get_rsp_parse;
-       yrs.rsp_cmd = DEVLINK_CMD_SB_GET;
+       yrs.rsp_cmd = 13;
 
        err = ynl_exec(ys, nlh, &yrs);
        if (err < 0)
@@ -876,7 +876,7 @@ devlink_sb_get_dump(struct ynl_sock *ys, struct devlink_sb_get_req_dump *req)
        yds.ys = ys;
        yds.alloc_sz = sizeof(struct devlink_sb_get_list);
        yds.cb = devlink_sb_get_rsp_parse;
-       yds.rsp_cmd = DEVLINK_CMD_SB_GET;
+       yds.rsp_cmd = 13;
        yds.rsp_policy = &devlink_nest;
 
        nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_GET, 1);
@@ -987,7 +987,7 @@ devlink_sb_pool_get(struct ynl_sock *ys, struct devlink_sb_pool_get_req *req)
        rsp = calloc(1, sizeof(*rsp));
        yrs.yarg.data = rsp;
        yrs.cb = devlink_sb_pool_get_rsp_parse;
-       yrs.rsp_cmd = DEVLINK_CMD_SB_POOL_GET;
+       yrs.rsp_cmd = 17;
 
        err = ynl_exec(ys, nlh, &yrs);
        if (err < 0)
@@ -1026,7 +1026,7 @@ devlink_sb_pool_get_dump(struct ynl_sock *ys,
        yds.ys = ys;
        yds.alloc_sz = sizeof(struct devlink_sb_pool_get_list);
        yds.cb = devlink_sb_pool_get_rsp_parse;
-       yds.rsp_cmd = DEVLINK_CMD_SB_POOL_GET;
+       yds.rsp_cmd = 17;
        yds.rsp_policy = &devlink_nest;
 
        nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_POOL_GET, 1);
@@ -1147,7 +1147,7 @@ devlink_sb_port_pool_get(struct ynl_sock *ys,
        rsp = calloc(1, sizeof(*rsp));
        yrs.yarg.data = rsp;
        yrs.cb = devlink_sb_port_pool_get_rsp_parse;
-       yrs.rsp_cmd = DEVLINK_CMD_SB_PORT_POOL_GET;
+       yrs.rsp_cmd = 21;
 
        err = ynl_exec(ys, nlh, &yrs);
        if (err < 0)
@@ -1187,7 +1187,7 @@ devlink_sb_port_pool_get_dump(struct ynl_sock *ys,
        yds.ys = ys;
        yds.alloc_sz = sizeof(struct devlink_sb_port_pool_get_list);
        yds.cb = devlink_sb_port_pool_get_rsp_parse;
-       yds.rsp_cmd = DEVLINK_CMD_SB_PORT_POOL_GET;
+       yds.rsp_cmd = 21;
        yds.rsp_policy = &devlink_nest;
 
        nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_PORT_POOL_GET, 1);
@@ -1316,7 +1316,7 @@ devlink_sb_tc_pool_bind_get(struct ynl_sock *ys,
        rsp = calloc(1, sizeof(*rsp));
        yrs.yarg.data = rsp;
        yrs.cb = devlink_sb_tc_pool_bind_get_rsp_parse;
-       yrs.rsp_cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET;
+       yrs.rsp_cmd = 25;
 
        err = ynl_exec(ys, nlh, &yrs);
        if (err < 0)
@@ -1356,7 +1356,7 @@ devlink_sb_tc_pool_bind_get_dump(struct ynl_sock *ys,
        yds.ys = ys;
        yds.alloc_sz = sizeof(struct devlink_sb_tc_pool_bind_get_list);
        yds.cb = devlink_sb_tc_pool_bind_get_rsp_parse;
-       yds.rsp_cmd = DEVLINK_CMD_SB_TC_POOL_BIND_GET;
+       yds.rsp_cmd = 25;
        yds.rsp_policy = &devlink_nest;
 
        nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_SB_TC_POOL_BIND_GET, 1);
@@ -2183,7 +2183,7 @@ devlink_trap_get(struct ynl_sock *ys, struct devlink_trap_get_req *req)
        rsp = calloc(1, sizeof(*rsp));
        yrs.yarg.data = rsp;
        yrs.cb = devlink_trap_get_rsp_parse;
-       yrs.rsp_cmd = DEVLINK_CMD_TRAP_GET;
+       yrs.rsp_cmd = 63;
 
        err = ynl_exec(ys, nlh, &yrs);
        if (err < 0)
@@ -2223,7 +2223,7 @@ devlink_trap_get_dump(struct ynl_sock *ys,
        yds.ys = ys;
        yds.alloc_sz = sizeof(struct devlink_trap_get_list);
        yds.cb = devlink_trap_get_rsp_parse;
-       yds.rsp_cmd = DEVLINK_CMD_TRAP_GET;
+       yds.rsp_cmd = 63;
        yds.rsp_policy = &devlink_nest;
 
        nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_GET, 1);
@@ -2336,7 +2336,7 @@ devlink_trap_group_get(struct ynl_sock *ys,
        rsp = calloc(1, sizeof(*rsp));
        yrs.yarg.data = rsp;
        yrs.cb = devlink_trap_group_get_rsp_parse;
-       yrs.rsp_cmd = DEVLINK_CMD_TRAP_GROUP_GET;
+       yrs.rsp_cmd = 67;
 
        err = ynl_exec(ys, nlh, &yrs);
        if (err < 0)
@@ -2376,7 +2376,7 @@ devlink_trap_group_get_dump(struct ynl_sock *ys,
        yds.ys = ys;
        yds.alloc_sz = sizeof(struct devlink_trap_group_get_list);
        yds.cb = devlink_trap_group_get_rsp_parse;
-       yds.rsp_cmd = DEVLINK_CMD_TRAP_GROUP_GET;
+       yds.rsp_cmd = 67;
        yds.rsp_policy = &devlink_nest;
 
        nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_GROUP_GET, 1);
@@ -2483,7 +2483,7 @@ devlink_trap_policer_get(struct ynl_sock *ys,
        rsp = calloc(1, sizeof(*rsp));
        yrs.yarg.data = rsp;
        yrs.cb = devlink_trap_policer_get_rsp_parse;
-       yrs.rsp_cmd = DEVLINK_CMD_TRAP_POLICER_GET;
+       yrs.rsp_cmd = 71;
 
        err = ynl_exec(ys, nlh, &yrs);
        if (err < 0)
@@ -2523,7 +2523,7 @@ devlink_trap_policer_get_dump(struct ynl_sock *ys,
        yds.ys = ys;
        yds.alloc_sz = sizeof(struct devlink_trap_policer_get_list);
        yds.cb = devlink_trap_policer_get_rsp_parse;
-       yds.rsp_cmd = DEVLINK_CMD_TRAP_POLICER_GET;
+       yds.rsp_cmd = 71;
        yds.rsp_policy = &devlink_nest;
 
        nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_TRAP_POLICER_GET, 1);
@@ -2642,7 +2642,7 @@ devlink_rate_get(struct ynl_sock *ys, struct devlink_rate_get_req *req)
        rsp = calloc(1, sizeof(*rsp));
        yrs.yarg.data = rsp;
        yrs.cb = devlink_rate_get_rsp_parse;
-       yrs.rsp_cmd = DEVLINK_CMD_RATE_GET;
+       yrs.rsp_cmd = 76;
 
        err = ynl_exec(ys, nlh, &yrs);
        if (err < 0)
@@ -2682,7 +2682,7 @@ devlink_rate_get_dump(struct ynl_sock *ys,
        yds.ys = ys;
        yds.alloc_sz = sizeof(struct devlink_rate_get_list);
        yds.cb = devlink_rate_get_rsp_parse;
-       yds.rsp_cmd = DEVLINK_CMD_RATE_GET;
+       yds.rsp_cmd = 76;
        yds.rsp_policy = &devlink_nest;
 
        nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_RATE_GET, 1);
@@ -2786,7 +2786,7 @@ devlink_linecard_get(struct ynl_sock *ys, struct devlink_linecard_get_req *req)
        rsp = calloc(1, sizeof(*rsp));
        yrs.yarg.data = rsp;
        yrs.cb = devlink_linecard_get_rsp_parse;
-       yrs.rsp_cmd = DEVLINK_CMD_LINECARD_GET;
+       yrs.rsp_cmd = 80;
 
        err = ynl_exec(ys, nlh, &yrs);
        if (err < 0)
@@ -2825,7 +2825,7 @@ devlink_linecard_get_dump(struct ynl_sock *ys,
        yds.ys = ys;
        yds.alloc_sz = sizeof(struct devlink_linecard_get_list);
        yds.cb = devlink_linecard_get_rsp_parse;
-       yds.rsp_cmd = DEVLINK_CMD_LINECARD_GET;
+       yds.rsp_cmd = 80;
        yds.rsp_policy = &devlink_nest;
 
        nlh = ynl_gemsg_start_dump(ys, ys->family_id, DEVLINK_CMD_LINECARD_GET, 1);
index 6c93215..67f985f 100644 (file)
@@ -45,7 +45,7 @@ static inline __u32 ifindex_from_link_fd(int fd)
        return link_info.tcx.ifindex;
 }
 
-static inline void __assert_mprog_count(int target, int expected, bool miniq, int ifindex)
+static inline void __assert_mprog_count(int target, int expected, int ifindex)
 {
        __u32 count = 0, attach_flags = 0;
        int err;
@@ -53,20 +53,22 @@ static inline void __assert_mprog_count(int target, int expected, bool miniq, in
        err = bpf_prog_query(ifindex, target, 0, &attach_flags,
                             NULL, &count);
        ASSERT_EQ(count, expected, "count");
-       if (!expected && !miniq)
-               ASSERT_EQ(err, -ENOENT, "prog_query");
-       else
-               ASSERT_EQ(err, 0, "prog_query");
+       ASSERT_EQ(err, 0, "prog_query");
 }
 
 static inline void assert_mprog_count(int target, int expected)
 {
-       __assert_mprog_count(target, expected, false, loopback);
+       __assert_mprog_count(target, expected, loopback);
 }
 
 static inline void assert_mprog_count_ifindex(int ifindex, int target, int expected)
 {
-       __assert_mprog_count(target, expected, false, ifindex);
+       __assert_mprog_count(target, expected, ifindex);
+}
+
+static inline void tc_skel_reset_all_seen(struct test_tc_link *skel)
+{
+       memset(skel->bss, 0, sizeof(*skel->bss));
 }
 
 #endif /* TC_HELPERS */
index 74fc1fe..bc98411 100644 (file)
@@ -65,6 +65,7 @@ void serial_test_tc_links_basic(void)
        ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
        ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -97,6 +98,7 @@ void serial_test_tc_links_basic(void)
        ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
        ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -187,6 +189,7 @@ static void test_tc_links_before_target(int target)
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
        ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -194,9 +197,6 @@ static void test_tc_links_before_target(int target)
        ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
        ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
 
-       skel->bss->seen_tc1 = false;
-       skel->bss->seen_tc2 = false;
-
        LIBBPF_OPTS_RESET(optl,
                .flags = BPF_F_BEFORE,
                .relative_fd = bpf_program__fd(skel->progs.tc2),
@@ -246,6 +246,7 @@ static void test_tc_links_before_target(int target)
        ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
        ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -342,6 +343,7 @@ static void test_tc_links_after_target(int target)
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
        ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -349,9 +351,6 @@ static void test_tc_links_after_target(int target)
        ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
        ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
 
-       skel->bss->seen_tc1 = false;
-       skel->bss->seen_tc2 = false;
-
        LIBBPF_OPTS_RESET(optl,
                .flags = BPF_F_AFTER,
                .relative_fd = bpf_program__fd(skel->progs.tc1),
@@ -401,6 +400,7 @@ static void test_tc_links_after_target(int target)
        ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
        ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -502,6 +502,7 @@ static void test_tc_links_revision_target(int target)
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
        ASSERT_EQ(optq.link_ids[2], 0, "prog_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -581,22 +582,20 @@ static void test_tc_chain_classic(int target, bool chain_tc_old)
 
        assert_mprog_count(target, 2);
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
        ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
        ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
 
-       skel->bss->seen_tc1 = false;
-       skel->bss->seen_tc2 = false;
-       skel->bss->seen_tc3 = false;
-
        err = bpf_link__detach(skel->links.tc2);
        if (!ASSERT_OK(err, "prog_detach"))
                goto cleanup;
 
        assert_mprog_count(target, 1);
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -707,16 +706,13 @@ static void test_tc_links_replace_target(int target)
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
        ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
        ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
        ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
 
-       skel->bss->seen_tc1 = false;
-       skel->bss->seen_tc2 = false;
-       skel->bss->seen_tc3 = false;
-
        LIBBPF_OPTS_RESET(optl,
                .flags = BPF_F_REPLACE,
                .relative_fd = bpf_program__fd(skel->progs.tc2),
@@ -781,16 +777,13 @@ static void test_tc_links_replace_target(int target)
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
        ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
        ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
        ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
 
-       skel->bss->seen_tc1 = false;
-       skel->bss->seen_tc2 = false;
-       skel->bss->seen_tc3 = false;
-
        err = bpf_link__detach(skel->links.tc2);
        if (!ASSERT_OK(err, "link_detach"))
                goto cleanup;
@@ -812,16 +805,13 @@ static void test_tc_links_replace_target(int target)
        ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
        ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
        ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
        ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
 
-       skel->bss->seen_tc1 = false;
-       skel->bss->seen_tc2 = false;
-       skel->bss->seen_tc3 = false;
-
        err = bpf_link__update_program(skel->links.tc1, skel->progs.tc1);
        if (!ASSERT_OK(err, "link_update_self"))
                goto cleanup;
@@ -843,6 +833,7 @@ static void test_tc_links_replace_target(int target)
        ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
        ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1254,6 +1245,7 @@ static void test_tc_links_prepend_target(int target)
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
        ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1261,9 +1253,6 @@ static void test_tc_links_prepend_target(int target)
        ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
        ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
 
-       skel->bss->seen_tc1 = false;
-       skel->bss->seen_tc2 = false;
-
        LIBBPF_OPTS_RESET(optl,
                .flags = BPF_F_BEFORE,
        );
@@ -1311,6 +1300,7 @@ static void test_tc_links_prepend_target(int target)
        ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
        ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1411,6 +1401,7 @@ static void test_tc_links_append_target(int target)
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
        ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1418,9 +1409,6 @@ static void test_tc_links_append_target(int target)
        ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
        ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
 
-       skel->bss->seen_tc1 = false;
-       skel->bss->seen_tc2 = false;
-
        LIBBPF_OPTS_RESET(optl,
                .flags = BPF_F_AFTER,
        );
@@ -1468,6 +1456,7 @@ static void test_tc_links_append_target(int target)
        ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
        ASSERT_EQ(optq.link_ids[4], 0, "link_ids[4]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1637,38 +1626,33 @@ static void test_tc_chain_mixed(int target)
 
        assert_mprog_count(target, 1);
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
        ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
        ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
 
-       skel->bss->seen_tc4 = false;
-       skel->bss->seen_tc5 = false;
-       skel->bss->seen_tc6 = false;
-
        err = bpf_link__update_program(skel->links.tc6, skel->progs.tc4);
        if (!ASSERT_OK(err, "link_update"))
                goto cleanup;
 
        assert_mprog_count(target, 1);
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
        ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
        ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
 
-       skel->bss->seen_tc4 = false;
-       skel->bss->seen_tc5 = false;
-       skel->bss->seen_tc6 = false;
-
        err = bpf_link__detach(skel->links.tc6);
        if (!ASSERT_OK(err, "prog_detach"))
                goto cleanup;
 
-       __assert_mprog_count(target, 0, true, loopback);
+       assert_mprog_count(target, 0);
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
@@ -1758,22 +1742,20 @@ static void test_tc_links_ingress(int target, bool chain_tc_old,
 
        assert_mprog_count(target, 2);
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
        ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
        ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
 
-       skel->bss->seen_tc1 = false;
-       skel->bss->seen_tc2 = false;
-       skel->bss->seen_tc3 = false;
-
        err = bpf_link__detach(skel->links.tc2);
        if (!ASSERT_OK(err, "prog_detach"))
                goto cleanup;
 
        assert_mprog_count(target, 1);
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
index 99af79e..ca506d2 100644 (file)
@@ -59,6 +59,7 @@ void serial_test_tc_opts_basic(void)
        ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
        ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -83,6 +84,7 @@ void serial_test_tc_opts_basic(void)
        ASSERT_EQ(optq.prog_ids[0], id2, "prog_ids[0]");
        ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -163,6 +165,7 @@ static void test_tc_opts_before_target(int target)
        ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -219,6 +222,7 @@ static void test_tc_opts_before_target(int target)
        ASSERT_EQ(optq.prog_ids[3], id2, "prog_ids[3]");
        ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -313,6 +317,7 @@ static void test_tc_opts_after_target(int target)
        ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -369,6 +374,7 @@ static void test_tc_opts_after_target(int target)
        ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
        ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -514,6 +520,7 @@ static void test_tc_opts_revision_target(int target)
        ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -608,22 +615,20 @@ static void test_tc_chain_classic(int target, bool chain_tc_old)
 
        assert_mprog_count(target, 2);
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
        ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
        ASSERT_EQ(skel->bss->seen_tc3, chain_tc_old, "seen_tc3");
 
-       skel->bss->seen_tc1 = false;
-       skel->bss->seen_tc2 = false;
-       skel->bss->seen_tc3 = false;
-
        err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
        if (!ASSERT_OK(err, "prog_detach"))
                goto cleanup_detach;
 
        assert_mprog_count(target, 1);
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -635,7 +640,7 @@ cleanup_detach:
        if (!ASSERT_OK(err, "prog_detach"))
                goto cleanup;
 
-       __assert_mprog_count(target, 0, chain_tc_old, loopback);
+       assert_mprog_count(target, 0);
 cleanup:
        if (tc_attached) {
                tc_opts.flags = tc_opts.prog_fd = tc_opts.prog_id = 0;
@@ -730,16 +735,13 @@ static void test_tc_opts_replace_target(int target)
        ASSERT_EQ(optq.prog_attach_flags[1], 0, "prog_flags[1]");
        ASSERT_EQ(optq.prog_attach_flags[2], 0, "prog_flags[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
        ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2");
        ASSERT_EQ(skel->bss->seen_tc3, false, "seen_tc3");
 
-       skel->bss->seen_tc1 = false;
-       skel->bss->seen_tc2 = false;
-       skel->bss->seen_tc3 = false;
-
        LIBBPF_OPTS_RESET(opta,
                .flags = BPF_F_REPLACE,
                .replace_prog_fd = fd2,
@@ -767,16 +769,13 @@ static void test_tc_opts_replace_target(int target)
        ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
        ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2");
        ASSERT_EQ(skel->bss->seen_tc3, true, "seen_tc3");
 
-       skel->bss->seen_tc1 = false;
-       skel->bss->seen_tc2 = false;
-       skel->bss->seen_tc3 = false;
-
        LIBBPF_OPTS_RESET(opta,
                .flags = BPF_F_REPLACE | BPF_F_BEFORE,
                .replace_prog_fd = fd3,
@@ -805,6 +804,7 @@ static void test_tc_opts_replace_target(int target)
        ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1084,6 +1084,7 @@ static void test_tc_opts_prepend_target(int target)
        ASSERT_EQ(optq.prog_ids[1], id1, "prog_ids[1]");
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1124,6 +1125,7 @@ static void test_tc_opts_prepend_target(int target)
        ASSERT_EQ(optq.prog_ids[3], id1, "prog_ids[3]");
        ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1222,6 +1224,7 @@ static void test_tc_opts_append_target(int target)
        ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
        ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -1262,6 +1265,7 @@ static void test_tc_opts_append_target(int target)
        ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
        ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1");
@@ -2250,7 +2254,7 @@ static void test_tc_opts_delete_empty(int target, bool chain_tc_old)
                                       BPF_TC_INGRESS : BPF_TC_EGRESS;
                err = bpf_tc_hook_create(&tc_hook);
                ASSERT_OK(err, "bpf_tc_hook_create");
-               __assert_mprog_count(target, 0, true, loopback);
+               assert_mprog_count(target, 0);
        }
        err = bpf_prog_detach_opts(0, loopback, target, &optd);
        ASSERT_EQ(err, -ENOENT, "prog_detach");
@@ -2316,16 +2320,13 @@ static void test_tc_chain_mixed(int target)
 
        assert_mprog_count(target, 1);
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
        ASSERT_EQ(skel->bss->seen_tc5, false, "seen_tc5");
        ASSERT_EQ(skel->bss->seen_tc6, true, "seen_tc6");
 
-       skel->bss->seen_tc4 = false;
-       skel->bss->seen_tc5 = false;
-       skel->bss->seen_tc6 = false;
-
        LIBBPF_OPTS_RESET(opta,
                .flags = BPF_F_REPLACE,
                .replace_prog_fd = fd3,
@@ -2339,21 +2340,19 @@ static void test_tc_chain_mixed(int target)
 
        assert_mprog_count(target, 1);
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc4, true, "seen_tc4");
        ASSERT_EQ(skel->bss->seen_tc5, true, "seen_tc5");
        ASSERT_EQ(skel->bss->seen_tc6, false, "seen_tc6");
 
-       skel->bss->seen_tc4 = false;
-       skel->bss->seen_tc5 = false;
-       skel->bss->seen_tc6 = false;
-
 cleanup_opts:
        err = bpf_prog_detach_opts(detach_fd, loopback, target, &optd);
        ASSERT_OK(err, "prog_detach");
-       __assert_mprog_count(target, 0, true, loopback);
+       assert_mprog_count(target, 0);
 
+       tc_skel_reset_all_seen(skel);
        ASSERT_OK(system(ping_cmd), ping_cmd);
 
        ASSERT_EQ(skel->bss->seen_tc4, false, "seen_tc4");
@@ -2462,3 +2461,229 @@ void serial_test_tc_opts_max(void)
        test_tc_opts_max_target(BPF_TCX_INGRESS, BPF_F_AFTER, true);
        test_tc_opts_max_target(BPF_TCX_EGRESS, BPF_F_AFTER, false);
 }
+
+static void test_tc_opts_query_target(int target)
+{
+       const size_t attr_size = offsetofend(union bpf_attr, query);
+       LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+       LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+       LIBBPF_OPTS(bpf_prog_query_opts, optq);
+       __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4;
+       struct test_tc_link *skel;
+       union bpf_attr attr;
+       __u32 prog_ids[5];
+       int err;
+
+       skel = test_tc_link__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "skel_load"))
+               goto cleanup;
+
+       fd1 = bpf_program__fd(skel->progs.tc1);
+       fd2 = bpf_program__fd(skel->progs.tc2);
+       fd3 = bpf_program__fd(skel->progs.tc3);
+       fd4 = bpf_program__fd(skel->progs.tc4);
+
+       id1 = id_from_prog_fd(fd1);
+       id2 = id_from_prog_fd(fd2);
+       id3 = id_from_prog_fd(fd3);
+       id4 = id_from_prog_fd(fd4);
+
+       assert_mprog_count(target, 0);
+
+       LIBBPF_OPTS_RESET(opta,
+               .expected_revision = 1,
+       );
+
+       err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+       if (!ASSERT_EQ(err, 0, "prog_attach"))
+               goto cleanup;
+
+       assert_mprog_count(target, 1);
+
+       LIBBPF_OPTS_RESET(opta,
+               .expected_revision = 2,
+       );
+
+       err = bpf_prog_attach_opts(fd2, loopback, target, &opta);
+       if (!ASSERT_EQ(err, 0, "prog_attach"))
+               goto cleanup1;
+
+       assert_mprog_count(target, 2);
+
+       LIBBPF_OPTS_RESET(opta,
+               .expected_revision = 3,
+       );
+
+       err = bpf_prog_attach_opts(fd3, loopback, target, &opta);
+       if (!ASSERT_EQ(err, 0, "prog_attach"))
+               goto cleanup2;
+
+       assert_mprog_count(target, 3);
+
+       LIBBPF_OPTS_RESET(opta,
+               .expected_revision = 4,
+       );
+
+       err = bpf_prog_attach_opts(fd4, loopback, target, &opta);
+       if (!ASSERT_EQ(err, 0, "prog_attach"))
+               goto cleanup3;
+
+       assert_mprog_count(target, 4);
+
+       /* Test 1: Double query via libbpf API */
+       err = bpf_prog_query_opts(loopback, target, &optq);
+       if (!ASSERT_OK(err, "prog_query"))
+               goto cleanup4;
+
+       ASSERT_EQ(optq.count, 4, "count");
+       ASSERT_EQ(optq.revision, 5, "revision");
+       ASSERT_EQ(optq.prog_ids, NULL, "prog_ids");
+       ASSERT_EQ(optq.link_ids, NULL, "link_ids");
+
+       memset(prog_ids, 0, sizeof(prog_ids));
+       optq.prog_ids = prog_ids;
+
+       err = bpf_prog_query_opts(loopback, target, &optq);
+       if (!ASSERT_OK(err, "prog_query"))
+               goto cleanup4;
+
+       ASSERT_EQ(optq.count, 4, "count");
+       ASSERT_EQ(optq.revision, 5, "revision");
+       ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+       ASSERT_EQ(optq.prog_ids[1], id2, "prog_ids[1]");
+       ASSERT_EQ(optq.prog_ids[2], id3, "prog_ids[2]");
+       ASSERT_EQ(optq.prog_ids[3], id4, "prog_ids[3]");
+       ASSERT_EQ(optq.prog_ids[4], 0, "prog_ids[4]");
+       ASSERT_EQ(optq.link_ids, NULL, "link_ids");
+
+       /* Test 2: Double query via bpf_attr & bpf(2) directly */
+       memset(&attr, 0, attr_size);
+       attr.query.target_ifindex = loopback;
+       attr.query.attach_type = target;
+
+       err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+       if (!ASSERT_OK(err, "prog_query"))
+               goto cleanup4;
+
+       ASSERT_EQ(attr.query.count, 4, "count");
+       ASSERT_EQ(attr.query.revision, 5, "revision");
+       ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+       ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+       ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+       ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+       ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids");
+       ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+       ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+       ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+       memset(prog_ids, 0, sizeof(prog_ids));
+       attr.query.prog_ids = ptr_to_u64(prog_ids);
+
+       err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size);
+       if (!ASSERT_OK(err, "prog_query"))
+               goto cleanup4;
+
+       ASSERT_EQ(attr.query.count, 4, "count");
+       ASSERT_EQ(attr.query.revision, 5, "revision");
+       ASSERT_EQ(attr.query.query_flags, 0, "query_flags");
+       ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags");
+       ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex");
+       ASSERT_EQ(attr.query.attach_type, target, "attach_type");
+       ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids");
+       ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]");
+       ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]");
+       ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]");
+       ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]");
+       ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]");
+       ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags");
+       ASSERT_EQ(attr.query.link_ids, 0, "link_ids");
+       ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags");
+
+cleanup4:
+       err = bpf_prog_detach_opts(fd4, loopback, target, &optd);
+       ASSERT_OK(err, "prog_detach");
+       assert_mprog_count(target, 3);
+
+cleanup3:
+       err = bpf_prog_detach_opts(fd3, loopback, target, &optd);
+       ASSERT_OK(err, "prog_detach");
+       assert_mprog_count(target, 2);
+
+cleanup2:
+       err = bpf_prog_detach_opts(fd2, loopback, target, &optd);
+       ASSERT_OK(err, "prog_detach");
+       assert_mprog_count(target, 1);
+
+cleanup1:
+       err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+       ASSERT_OK(err, "prog_detach");
+       assert_mprog_count(target, 0);
+
+cleanup:
+       test_tc_link__destroy(skel);
+}
+
+void serial_test_tc_opts_query(void)
+{
+       test_tc_opts_query_target(BPF_TCX_INGRESS);
+       test_tc_opts_query_target(BPF_TCX_EGRESS);
+}
+
+static void test_tc_opts_query_attach_target(int target)
+{
+       LIBBPF_OPTS(bpf_prog_attach_opts, opta);
+       LIBBPF_OPTS(bpf_prog_detach_opts, optd);
+       LIBBPF_OPTS(bpf_prog_query_opts, optq);
+       struct test_tc_link *skel;
+       __u32 prog_ids[2];
+       __u32 fd1, id1;
+       int err;
+
+       skel = test_tc_link__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "skel_load"))
+               goto cleanup;
+
+       fd1 = bpf_program__fd(skel->progs.tc1);
+       id1 = id_from_prog_fd(fd1);
+
+       err = bpf_prog_query_opts(loopback, target, &optq);
+       if (!ASSERT_OK(err, "prog_query"))
+               goto cleanup;
+
+       ASSERT_EQ(optq.count, 0, "count");
+       ASSERT_EQ(optq.revision, 1, "revision");
+
+       LIBBPF_OPTS_RESET(opta,
+               .expected_revision = optq.revision,
+       );
+
+       err = bpf_prog_attach_opts(fd1, loopback, target, &opta);
+       if (!ASSERT_EQ(err, 0, "prog_attach"))
+               goto cleanup;
+
+       memset(prog_ids, 0, sizeof(prog_ids));
+       optq.prog_ids = prog_ids;
+       optq.count = ARRAY_SIZE(prog_ids);
+
+       err = bpf_prog_query_opts(loopback, target, &optq);
+       if (!ASSERT_OK(err, "prog_query"))
+               goto cleanup1;
+
+       ASSERT_EQ(optq.count, 1, "count");
+       ASSERT_EQ(optq.revision, 2, "revision");
+       ASSERT_EQ(optq.prog_ids[0], id1, "prog_ids[0]");
+       ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]");
+
+cleanup1:
+       err = bpf_prog_detach_opts(fd1, loopback, target, &optd);
+       ASSERT_OK(err, "prog_detach");
+       assert_mprog_count(target, 0);
+cleanup:
+       test_tc_link__destroy(skel);
+}
+
+void serial_test_tc_opts_query_attach(void)
+{
+       test_tc_opts_query_attach_target(BPF_TCX_INGRESS);
+       test_tc_opts_query_attach_target(BPF_TCX_EGRESS);
+}
index 290c21d..ce2c61d 100644 (file)
@@ -2,6 +2,7 @@
 /* Copyright (c) 2021 Facebook */
 #include <test_progs.h>
 #include "timer.skel.h"
+#include "timer_failure.skel.h"
 
 static int timer(struct timer *timer_skel)
 {
@@ -49,10 +50,11 @@ void serial_test_timer(void)
 
        timer_skel = timer__open_and_load();
        if (!ASSERT_OK_PTR(timer_skel, "timer_skel_load"))
-               goto cleanup;
+               return;
 
        err = timer(timer_skel);
        ASSERT_OK(err, "timer");
-cleanup:
        timer__destroy(timer_skel);
+
+       RUN_TESTS(timer_failure);
 }
diff --git a/tools/testing/selftests/bpf/progs/timer_failure.c b/tools/testing/selftests/bpf/progs/timer_failure.c
new file mode 100644 (file)
index 0000000..226d33b
--- /dev/null
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <time.h>
+#include <errno.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+#include "bpf_tcp_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct elem {
+       struct bpf_timer t;
+};
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, 1);
+       __type(key, int);
+       __type(value, struct elem);
+} timer_map SEC(".maps");
+
+static int timer_cb_ret1(void *map, int *key, struct bpf_timer *timer)
+{
+       if (bpf_get_smp_processor_id() % 2)
+               return 1;
+       else
+               return 0;
+}
+
+SEC("fentry/bpf_fentry_test1")
+__failure __msg("should have been in (0x0; 0x0)")
+int BPF_PROG2(test_ret_1, int, a)
+{
+       int key = 0;
+       struct bpf_timer *timer;
+
+       timer = bpf_map_lookup_elem(&timer_map, &key);
+       if (timer) {
+               bpf_timer_init(timer, &timer_map, CLOCK_BOOTTIME);
+               bpf_timer_set_callback(timer, timer_cb_ret1);
+               bpf_timer_start(timer, 1000, 0);
+       }
+
+       return 0;
+}
index 112bc1d..ce33d30 100644 (file)
@@ -1,7 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * tools/testing/selftests/kvm/include/kvm_util.h
- *
  * Copyright (C) 2018, Google LLC.
  */
 #ifndef SELFTEST_KVM_UCALL_COMMON_H
index 4fd0421..25bc61d 100644 (file)
@@ -68,6 +68,12 @@ struct xstate {
 #define XFEATURE_MASK_OPMASK           BIT_ULL(5)
 #define XFEATURE_MASK_ZMM_Hi256                BIT_ULL(6)
 #define XFEATURE_MASK_Hi16_ZMM         BIT_ULL(7)
+#define XFEATURE_MASK_PT               BIT_ULL(8)
+#define XFEATURE_MASK_PKRU             BIT_ULL(9)
+#define XFEATURE_MASK_PASID            BIT_ULL(10)
+#define XFEATURE_MASK_CET_USER         BIT_ULL(11)
+#define XFEATURE_MASK_CET_KERNEL       BIT_ULL(12)
+#define XFEATURE_MASK_LBR              BIT_ULL(15)
 #define XFEATURE_MASK_XTILE_CFG                BIT_ULL(17)
 #define XFEATURE_MASK_XTILE_DATA       BIT_ULL(18)
 
@@ -147,6 +153,7 @@ struct kvm_x86_cpu_feature {
 #define        X86_FEATURE_CLWB                KVM_X86_CPU_FEATURE(0x7, 0, EBX, 24)
 #define        X86_FEATURE_UMIP                KVM_X86_CPU_FEATURE(0x7, 0, ECX, 2)
 #define        X86_FEATURE_PKU                 KVM_X86_CPU_FEATURE(0x7, 0, ECX, 3)
+#define        X86_FEATURE_OSPKE               KVM_X86_CPU_FEATURE(0x7, 0, ECX, 4)
 #define        X86_FEATURE_LA57                KVM_X86_CPU_FEATURE(0x7, 0, ECX, 16)
 #define        X86_FEATURE_RDPID               KVM_X86_CPU_FEATURE(0x7, 0, ECX, 22)
 #define        X86_FEATURE_SGX_LC              KVM_X86_CPU_FEATURE(0x7, 0, ECX, 30)
@@ -553,6 +560,13 @@ static inline void xsetbv(u32 index, u64 value)
        __asm__ __volatile__("xsetbv" :: "a" (eax), "d" (edx), "c" (index));
 }
 
+static inline void wrpkru(u32 pkru)
+{
+       /* Note, ECX and EDX are architecturally required to be '0'. */
+       asm volatile(".byte 0x0f,0x01,0xef\n\t"
+                    : : "a" (pkru), "c"(0), "d"(0));
+}
+
 static inline struct desc_ptr get_gdt(void)
 {
        struct desc_ptr gdt;
@@ -908,6 +922,15 @@ static inline bool kvm_pmu_has(struct kvm_x86_pmu_feature feature)
               !kvm_cpu_has(feature.anti_feature);
 }
 
+static __always_inline uint64_t kvm_cpu_supported_xcr0(void)
+{
+       if (!kvm_cpu_has_p(X86_PROPERTY_SUPPORTED_XCR0_LO))
+               return 0;
+
+       return kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_LO) |
+              ((uint64_t)kvm_cpu_property(X86_PROPERTY_SUPPORTED_XCR0_HI) << 32);
+}
+
 static inline size_t kvm_cpuid2_size(int nr_entries)
 {
        return sizeof(struct kvm_cpuid2) +
index c4a69d8..7462751 100644 (file)
@@ -200,6 +200,13 @@ repeat:
                        ++fmt;
                }
 
+               /*
+                * Play nice with %llu, %llx, etc.  KVM selftests only support
+                * 64-bit builds, so just treat %ll* the same as %l*.
+                */
+               if (qualifier == 'l' && *fmt == 'l')
+                       ++fmt;
+
                /* default base */
                base = 10;
 
index 7168e25..89153a3 100644 (file)
@@ -1,7 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * tools/testing/selftests/kvm/lib/x86_64/processor.c
- *
  * Copyright (C) 2021, Google LLC.
  */
 
index 20eb2e7..8698d1a 100644 (file)
@@ -1033,9 +1033,8 @@ static bool test_loop(const struct test_data *data,
                      struct test_result *rbestruntime)
 {
        uint64_t maxslots;
-       struct test_result result;
+       struct test_result result = {};
 
-       result.nloops = 0;
        if (!test_execute(targs->nslots, &maxslots, targs->seconds, data,
                          &result.nloops,
                          &result.slot_runtime, &result.guest_runtime)) {
@@ -1089,7 +1088,7 @@ int main(int argc, char *argv[])
                .seconds = 5,
                .runs = 1,
        };
-       struct test_result rbestslottime;
+       struct test_result rbestslottime = {};
        int tctr;
 
        if (!check_memory_sizes())
@@ -1098,11 +1097,10 @@ int main(int argc, char *argv[])
        if (!parse_args(argc, argv, &targs))
                return -1;
 
-       rbestslottime.slottimens = 0;
        for (tctr = targs.tfirst; tctr <= targs.tlast; tctr++) {
                const struct test_data *data = &tests[tctr];
                unsigned int runctr;
-               struct test_result rbestruntime;
+               struct test_result rbestruntime = {};
 
                if (tctr > targs.tfirst)
                        pr_info("\n");
@@ -1110,7 +1108,6 @@ int main(int argc, char *argv[])
                pr_info("Testing %s performance with %i runs, %d seconds each\n",
                        data->name, targs.runs, targs.seconds);
 
-               rbestruntime.runtimens = 0;
                for (runctr = 0; runctr < targs.runs; runctr++)
                        if (!test_loop(data, &targs,
                                       &rbestslottime, &rbestruntime))
index e446d76..6c12785 100644 (file)
@@ -1,7 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * KVM_GET/SET_* tests
- *
  * Copyright (C) 2022, Red Hat, Inc.
  *
  * Tests for Hyper-V extensions to SVM.
index 7f36c32..18ac5c1 100644 (file)
@@ -1,7 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * tools/testing/selftests/kvm/nx_huge_page_test.c
- *
  * Usage: to be run via nx_huge_page_test.sh, which does the necessary
  * environment setup and teardown
  *
index 0560149..7cbb409 100755 (executable)
@@ -4,7 +4,6 @@
 # Wrapper script which performs setup and cleanup for nx_huge_pages_test.
 # Makes use of root privileges to set up huge pages and KVM module parameters.
 #
-# tools/testing/selftests/kvm/nx_huge_page_test.sh
 # Copyright (C) 2022, Google LLC.
 
 set -e
index 4c4925a..88b58aa 100644 (file)
@@ -139,6 +139,83 @@ static void vmx_l1_guest_code(struct vmx_pages *vmx_pages)
 static void __attribute__((__flatten__)) guest_code(void *arg)
 {
        GUEST_SYNC(1);
+
+       if (this_cpu_has(X86_FEATURE_XSAVE)) {
+               uint64_t supported_xcr0 = this_cpu_supported_xcr0();
+               uint8_t buffer[4096];
+
+               memset(buffer, 0xcc, sizeof(buffer));
+
+               set_cr4(get_cr4() | X86_CR4_OSXSAVE);
+               GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSXSAVE));
+
+               xsetbv(0, xgetbv(0) | supported_xcr0);
+
+               /*
+                * Modify state for all supported xfeatures to take them out of
+                * their "init" state, i.e. to make them show up in XSTATE_BV.
+                *
+                * Note off-by-default features, e.g. AMX, are out of scope for
+                * this particular testcase as they have a different ABI.
+                */
+               GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_FP);
+               asm volatile ("fincstp");
+
+               GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_SSE);
+               asm volatile ("vmovdqu %0, %%xmm0" :: "m" (buffer));
+
+               if (supported_xcr0 & XFEATURE_MASK_YMM)
+                       asm volatile ("vmovdqu %0, %%ymm0" :: "m" (buffer));
+
+               if (supported_xcr0 & XFEATURE_MASK_AVX512) {
+                       asm volatile ("kmovq %0, %%k1" :: "r" (-1ull));
+                       asm volatile ("vmovupd %0, %%zmm0" :: "m" (buffer));
+                       asm volatile ("vmovupd %0, %%zmm16" :: "m" (buffer));
+               }
+
+               if (this_cpu_has(X86_FEATURE_MPX)) {
+                       uint64_t bounds[2] = { 10, 0xffffffffull };
+                       uint64_t output[2] = { };
+
+                       GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDREGS);
+                       GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_BNDCSR);
+
+                       /*
+                        * Don't bother trying to get BNDCSR into the INUSE
+                        * state.  MSR_IA32_BNDCFGS doesn't count as it isn't
+                        * managed via XSAVE/XRSTOR, and BNDCFGU can only be
+                        * modified by XRSTOR.  Stuffing XSTATE_BV in the host
+                        * is simpler than doing XRSTOR here in the guest.
+                        *
+                        * However, temporarily enable MPX in BNDCFGS so that
+                        * BNDMOV actually loads BND1.  If MPX isn't *fully*
+                        * enabled, all MPX instructions are treated as NOPs.
+                        *
+                        * Hand encode "bndmov (%rax),%bnd1" as support for MPX
+                        * mnemonics/registers has been removed from gcc and
+                        * clang (and was never fully supported by clang).
+                        */
+                       wrmsr(MSR_IA32_BNDCFGS, BIT_ULL(0));
+                       asm volatile (".byte 0x66,0x0f,0x1a,0x08" :: "a" (bounds));
+                       /*
+                        * Hand encode "bndmov %bnd1, (%rax)" to sanity check
+                        * that BND1 actually got loaded.
+                        */
+                       asm volatile (".byte 0x66,0x0f,0x1b,0x08" :: "a" (output));
+                       wrmsr(MSR_IA32_BNDCFGS, 0);
+
+                       GUEST_ASSERT_EQ(bounds[0], output[0]);
+                       GUEST_ASSERT_EQ(bounds[1], output[1]);
+               }
+               if (this_cpu_has(X86_FEATURE_PKU)) {
+                       GUEST_ASSERT(supported_xcr0 & XFEATURE_MASK_PKRU);
+                       set_cr4(get_cr4() | X86_CR4_PKE);
+                       GUEST_ASSERT(this_cpu_has(X86_FEATURE_OSPKE));
+
+                       wrpkru(-1u);
+               }
+       }
+
        GUEST_SYNC(2);
 
        if (arg) {
@@ -153,10 +230,11 @@ static void __attribute__((__flatten__)) guest_code(void *arg)
 
 int main(int argc, char *argv[])
 {
+       uint64_t *xstate_bv, saved_xstate_bv;
        vm_vaddr_t nested_gva = 0;
-
+       struct kvm_cpuid2 empty_cpuid = {};
        struct kvm_regs regs1, regs2;
-       struct kvm_vcpu *vcpu;
+       struct kvm_vcpu *vcpu, *vcpuN;
        struct kvm_vm *vm;
        struct kvm_x86_state *state;
        struct ucall uc;
@@ -209,6 +287,34 @@ int main(int argc, char *argv[])
                /* Restore state in a new VM.  */
                vcpu = vm_recreate_with_one_vcpu(vm);
                vcpu_load_state(vcpu, state);
+
+               /*
+                * Restore XSAVE state in a dummy vCPU, first without doing
+                * KVM_SET_CPUID2, and then with an empty guest CPUID.  Except
+                * for off-by-default xfeatures, e.g. AMX, KVM is supposed to
+                * allow KVM_SET_XSAVE regardless of guest CPUID.  Manually
+                * load only XSAVE state, MSRs in particular have a much more
+                * convoluted ABI.
+                *
+                * Load two versions of XSAVE state: one with the actual guest
+                * XSAVE state, and one with all supported features forced "on"
+                * in xstate_bv, e.g. to ensure that KVM allows loading all
+                * supported features, even if something goes awry in saving
+                * the original snapshot.
+                */
+               xstate_bv = (void *)&((uint8_t *)state->xsave->region)[512];
+               saved_xstate_bv = *xstate_bv;
+
+               vcpuN = __vm_vcpu_add(vm, vcpu->id + 1);
+               vcpu_xsave_set(vcpuN, state->xsave);
+               *xstate_bv = kvm_cpu_supported_xcr0();
+               vcpu_xsave_set(vcpuN, state->xsave);
+
+               vcpu_init_cpuid(vcpuN, &empty_cpuid);
+               vcpu_xsave_set(vcpuN, state->xsave);
+               *xstate_bv = saved_xstate_bv;
+               vcpu_xsave_set(vcpuN, state->xsave);
+
                kvm_x86_state_cleanup(state);
 
                memset(&regs2, 0, sizeof(regs2));
index 5b66981..59c7304 100644 (file)
@@ -1,10 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * svm_vmcall_test
- *
  * Copyright © 2021 Amazon.com, Inc. or its affiliates.
- *
- * Xen shared_info / pvclock testing
  */
 
 #include "test_util.h"
index 05898ad..9ec9ab6 100644 (file)
@@ -1,10 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * svm_vmcall_test
- *
  * Copyright © 2021 Amazon.com, Inc. or its affiliates.
- *
- * Xen shared_info / pvclock testing
  */
 
 #include "test_util.h"
index 8b01707..4a2881d 100644 (file)
@@ -34,6 +34,7 @@ TEST_PROGS += gro.sh
 TEST_PROGS += gre_gso.sh
 TEST_PROGS += cmsg_so_mark.sh
 TEST_PROGS += cmsg_time.sh cmsg_ipv6.sh
+TEST_PROGS += netns-name.sh
 TEST_PROGS += srv6_end_dt46_l3vpn_test.sh
 TEST_PROGS += srv6_end_dt4_l3vpn_test.sh
 TEST_PROGS += srv6_end_dt6_l3vpn_test.sh
index e7d2a53..66d0db7 100755 (executable)
@@ -2437,6 +2437,9 @@ ipv4_mpath_list_test()
        run_cmd "ip -n ns2 route add 203.0.113.0/24
                nexthop via 172.16.201.2 nexthop via 172.16.202.2"
        run_cmd "ip netns exec ns2 sysctl -qw net.ipv4.fib_multipath_hash_policy=1"
+       run_cmd "ip netns exec ns2 sysctl -qw net.ipv4.conf.veth2.rp_filter=0"
+       run_cmd "ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=0"
+       run_cmd "ip netns exec ns2 sysctl -qw net.ipv4.conf.default.rp_filter=0"
        set +e
 
        local dmac=$(ip -n ns2 -j link show dev veth2 | jq -r '.[]["address"]')
@@ -2449,7 +2452,7 @@ ipv4_mpath_list_test()
        # words, the FIB lookup tracepoint needs to be triggered for every
        # packet.
        local t0_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
-       run_cmd "perf stat -e fib:fib_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
+       run_cmd "perf stat -a -e fib:fib_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
        local t1_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
        local diff=$(echo $t1_rx_pkts - $t0_rx_pkts | bc -l)
        list_rcv_eval $tmp_file $diff
@@ -2494,7 +2497,7 @@ ipv6_mpath_list_test()
        # words, the FIB lookup tracepoint needs to be triggered for every
        # packet.
        local t0_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
-       run_cmd "perf stat -e fib6:fib6_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
+       run_cmd "perf stat -a -e fib6:fib6_table_lookup --filter 'err == 0' -j -o $tmp_file -- $cmd"
        local t1_rx_pkts=$(link_stats_get ns2 veth2 rx packets)
        local diff=$(echo $t1_rx_pkts - $t0_rx_pkts | bc -l)
        list_rcv_eval $tmp_file $diff
index ee1f89a..dc895b7 100755 (executable)
@@ -1432,7 +1432,9 @@ chk_rst_nr()
        count=$(get_counter ${ns_tx} "MPTcpExtMPRstTx")
        if [ -z "$count" ]; then
                print_skip
-       elif [ $count -lt $rst_tx ]; then
+       # accept more rst than expected except if we don't expect any
+       elif { [ $rst_tx -ne 0 ] && [ $count -lt $rst_tx ]; } ||
+            { [ $rst_tx -eq 0 ] && [ $count -ne 0 ]; }; then
                fail_test "got $count MP_RST[s] TX expected $rst_tx"
        else
                print_ok
@@ -1442,7 +1444,9 @@ chk_rst_nr()
        count=$(get_counter ${ns_rx} "MPTcpExtMPRstRx")
        if [ -z "$count" ]; then
                print_skip
-       elif [ "$count" -lt "$rst_rx" ]; then
+       # accept more rst than expected except if we don't expect any
+       elif { [ $rst_rx -ne 0 ] && [ $count -lt $rst_rx ]; } ||
+            { [ $rst_rx -eq 0 ] && [ $count -ne 0 ]; }; then
                fail_test "got $count MP_RST[s] RX expected $rst_rx"
        else
                print_ok
@@ -2305,6 +2309,7 @@ remove_tests()
                chk_join_nr 1 1 1
                chk_rm_tx_nr 1
                chk_rm_nr 1 1
+               chk_rst_nr 0 0
        fi
 
        # multiple subflows, remove
@@ -2317,6 +2322,7 @@ remove_tests()
                        run_tests $ns1 $ns2 10.0.1.1
                chk_join_nr 2 2 2
                chk_rm_nr 2 2
+               chk_rst_nr 0 0
        fi
 
        # single address, remove
@@ -2329,6 +2335,7 @@ remove_tests()
                chk_join_nr 1 1 1
                chk_add_nr 1 1
                chk_rm_nr 1 1 invert
+               chk_rst_nr 0 0
        fi
 
        # subflow and signal, remove
@@ -2342,6 +2349,7 @@ remove_tests()
                chk_join_nr 2 2 2
                chk_add_nr 1 1
                chk_rm_nr 1 1
+               chk_rst_nr 0 0
        fi
 
        # subflows and signal, remove
@@ -2356,6 +2364,7 @@ remove_tests()
                chk_join_nr 3 3 3
                chk_add_nr 1 1
                chk_rm_nr 2 2
+               chk_rst_nr 0 0
        fi
 
        # addresses remove
@@ -2370,6 +2379,7 @@ remove_tests()
                chk_join_nr 3 3 3
                chk_add_nr 3 3
                chk_rm_nr 3 3 invert
+               chk_rst_nr 0 0
        fi
 
        # invalid addresses remove
@@ -2384,6 +2394,7 @@ remove_tests()
                chk_join_nr 1 1 1
                chk_add_nr 3 3
                chk_rm_nr 3 1 invert
+               chk_rst_nr 0 0
        fi
 
        # subflows and signal, flush
@@ -2398,6 +2409,7 @@ remove_tests()
                chk_join_nr 3 3 3
                chk_add_nr 1 1
                chk_rm_nr 1 3 invert simult
+               chk_rst_nr 0 0
        fi
 
        # subflows flush
@@ -2417,6 +2429,7 @@ remove_tests()
                else
                        chk_rm_nr 3 3
                fi
+               chk_rst_nr 0 0
        fi
 
        # addresses flush
@@ -2431,6 +2444,7 @@ remove_tests()
                chk_join_nr 3 3 3
                chk_add_nr 3 3
                chk_rm_nr 3 3 invert simult
+               chk_rst_nr 0 0
        fi
 
        # invalid addresses flush
@@ -2445,6 +2459,7 @@ remove_tests()
                chk_join_nr 1 1 1
                chk_add_nr 3 3
                chk_rm_nr 3 1 invert
+               chk_rst_nr 0 0
        fi
 
        # remove id 0 subflow
@@ -2456,6 +2471,7 @@ remove_tests()
                        run_tests $ns1 $ns2 10.0.1.1
                chk_join_nr 1 1 1
                chk_rm_nr 1 1
+               chk_rst_nr 0 0
        fi
 
        # remove id 0 address
@@ -2468,6 +2484,7 @@ remove_tests()
                chk_join_nr 1 1 1
                chk_add_nr 1 1
                chk_rm_nr 1 1 invert
+               chk_rst_nr 0 0 invert
        fi
 }
 
diff --git a/tools/testing/selftests/net/netns-name.sh b/tools/testing/selftests/net/netns-name.sh
new file mode 100755 (executable)
index 0000000..7d3d3fc
--- /dev/null
@@ -0,0 +1,87 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+set -o pipefail
+
+NS=netns-name-test
+DEV=dummy-dev0
+DEV2=dummy-dev1
+ALT_NAME=some-alt-name
+
+RET_CODE=0
+
+cleanup() {
+    ip netns del $NS
+}
+
+trap cleanup EXIT
+
+fail() {
+    echo "ERROR: ${1:-unexpected return code} (ret: $_)" >&2
+    RET_CODE=1
+}
+
+ip netns add $NS
+
+#
+# Test basic move without a rename
+#
+ip -netns $NS link add name $DEV type dummy || fail
+ip -netns $NS link set dev $DEV netns 1 ||
+    fail "Can't perform a netns move"
+ip link show dev $DEV >> /dev/null || fail "Device not found after move"
+ip link del $DEV || fail
+
+#
+# Test move with a conflict
+#
+ip link add name $DEV type dummy
+ip -netns $NS link add name $DEV type dummy || fail
+ip -netns $NS link set dev $DEV netns 1 2> /dev/null &&
+    fail "Performed a netns move with a name conflict"
+ip link show dev $DEV >> /dev/null || fail "Device not found after move"
+ip -netns $NS link del $DEV || fail
+ip link del $DEV || fail
+
+#
+# Test move with a conflict and rename
+#
+ip link add name $DEV type dummy
+ip -netns $NS link add name $DEV type dummy || fail
+ip -netns $NS link set dev $DEV netns 1 name $DEV2 ||
+    fail "Can't perform a netns move with rename"
+ip link del $DEV2 || fail
+ip link del $DEV || fail
+
+#
+# Test dup alt-name with netns move
+#
+ip link add name $DEV type dummy || fail
+ip link property add dev $DEV altname $ALT_NAME || fail
+ip -netns $NS link add name $DEV2 type dummy || fail
+ip -netns $NS link property add dev $DEV2 altname $ALT_NAME || fail
+
+ip -netns $NS link set dev $DEV2 netns 1 2> /dev/null &&
+    fail "Moved with alt-name dup"
+
+ip link del $DEV || fail
+ip -netns $NS link del $DEV2 || fail
+
+#
+# Test creating alt-name in one net-ns and using in another
+#
+ip -netns $NS link add name $DEV type dummy || fail
+ip -netns $NS link property add dev $DEV altname $ALT_NAME || fail
+ip -netns $NS link set dev $DEV netns 1 || fail
+ip link show dev $ALT_NAME >> /dev/null || fail "Can't find alt-name after move"
+ip  -netns $NS link show dev $ALT_NAME 2> /dev/null &&
+    fail "Can still find alt-name after move"
+ip link del $DEV || fail
+
+echo -ne "$(basename $0) \t\t\t\t"
+if [ $RET_CODE -eq 0 ]; then
+    echo "[  OK  ]"
+else
+    echo "[ FAIL ]"
+fi
+exit $RET_CODE
index 9c2012d..f8499d4 100755 (executable)
@@ -3,6 +3,8 @@
 #
 # OVS kernel module self tests
 
+trap ovs_exit_sig EXIT TERM INT ERR
+
 # Kselftest framework requirement - SKIP code is 4.
 ksft_skip=4
 
@@ -142,6 +144,12 @@ ovs_add_flow () {
        return 0
 }
 
+ovs_del_flows () {
+       info "Deleting all flows from DP: sbx:$1 br:$2"
+       ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py del-flows "$2"
+       return 0
+}
+
 ovs_drop_record_and_run () {
        local sbx=$1
        shift
@@ -198,6 +206,17 @@ test_drop_reason() {
        ip netns exec server ip addr add 172.31.110.20/24 dev s1
        ip netns exec server ip link set s1 up
 
+       # Check if drop reasons can be sent
+       ovs_add_flow "test_drop_reason" dropreason \
+               'in_port(1),eth(),eth_type(0x0806),arp()' 'drop(10)' 2>/dev/null
+       if [ $? == 1 ]; then
+               info "no support for drop reasons - skipping"
+               ovs_exit_sig
+               return $ksft_skip
+       fi
+
+       ovs_del_flows "test_drop_reason" dropreason
+
        # Allow ARP
        ovs_add_flow "test_drop_reason" dropreason \
                'in_port(1),eth(),eth_type(0x0806),arp()' '2' || return 1
@@ -525,7 +544,7 @@ run_test() {
        fi
 
        if python3 ovs-dpctl.py -h 2>&1 | \
-            grep "Need to install the python" >/dev/null 2>&1; then
+            grep -E "Need to (install|upgrade) the python" >/dev/null 2>&1; then
                stdbuf -o0 printf "TEST: %-60s  [PYLIB]\n" "${tdesc}"
                return $ksft_skip
        fi
index 912dc8c..b97e621 100644 (file)
@@ -28,8 +28,10 @@ try:
     from pyroute2.netlink import nlmsg_atoms
     from pyroute2.netlink.exceptions import NetlinkError
     from pyroute2.netlink.generic import GenericNetlinkSocket
+    import pyroute2
+
 except ModuleNotFoundError:
-    print("Need to install the python pyroute2 package.")
+    print("Need to install the python pyroute2 package >= 0.6.")
     sys.exit(0)
 
 
@@ -1117,12 +1119,14 @@ class ovskey(nla):
                 "src",
                 lambda x: str(ipaddress.IPv4Address(x)),
                 int,
+                convert_ipv4,
             ),
             (
                 "dst",
                 "dst",
-                lambda x: str(ipaddress.IPv6Address(x)),
+                lambda x: str(ipaddress.IPv4Address(x)),
                 int,
+                convert_ipv4,
             ),
             ("tp_src", "tp_src", "%d", int),
             ("tp_dst", "tp_dst", "%d", int),
@@ -1904,6 +1908,32 @@ class OvsFlow(GenericNetlinkSocket):
             raise ne
         return reply
 
+    def del_flows(self, dpifindex):
+        """
+        Send a del message to the kernel that will drop all flows.
+
+        dpifindex should be a valid datapath obtained by calling
+        into the OvsDatapath lookup
+        """
+
+        flowmsg = OvsFlow.ovs_flow_msg()
+        flowmsg["cmd"] = OVS_FLOW_CMD_DEL
+        flowmsg["version"] = OVS_DATAPATH_VERSION
+        flowmsg["reserved"] = 0
+        flowmsg["dpifindex"] = dpifindex
+
+        try:
+            reply = self.nlm_request(
+                flowmsg,
+                msg_type=self.prid,
+                msg_flags=NLM_F_REQUEST | NLM_F_ACK,
+            )
+            reply = reply[0]
+        except NetlinkError as ne:
+            print(flowmsg)
+            raise ne
+        return reply
+
     def dump(self, dpifindex, flowspec=None):
         """
         Returns a list of messages containing flows.
@@ -1998,6 +2028,12 @@ def main(argv):
     nlmsg_atoms.ovskey = ovskey
     nlmsg_atoms.ovsactions = ovsactions
 
+    # version check for pyroute2
+    prverscheck = pyroute2.__version__.split(".")
+    if int(prverscheck[0]) == 0 and int(prverscheck[1]) < 6:
+        print("Need to upgrade the python pyroute2 package to >= 0.6.")
+        sys.exit(0)
+
     parser = argparse.ArgumentParser()
     parser.add_argument(
         "-v",
@@ -2060,6 +2096,9 @@ def main(argv):
     addflcmd.add_argument("flow", help="Flow specification")
     addflcmd.add_argument("acts", help="Flow actions")
 
+    delfscmd = subparsers.add_parser("del-flows")
+    delfscmd.add_argument("flsbr", help="Datapath name")
+
     args = parser.parse_args()
 
     if args.verbose > 0:
@@ -2143,6 +2182,11 @@ def main(argv):
         flow = OvsFlow.ovs_flow_msg()
         flow.parse(args.flow, args.acts, rep["dpifindex"])
         ovsflow.add_flow(rep["dpifindex"], flow)
+    elif hasattr(args, "flsbr"):
+        rep = ovsdp.info(args.flsbr, 0)
+        if rep is None:
+            print("DP '%s' not found." % args.flsbr)
+        ovsflow.del_flows(rep["dpifindex"])
 
     return 0
 
index bb34329..99ed5bd 100755 (executable)
@@ -11,6 +11,12 @@ nft --version >/dev/null 2>&1 || {
        exit $SKIP_RC
 }
 
+# Run everything in a separate network namespace
+[ "${1}" != "run" ] && { unshare -n "${0}" run; exit $?; }
+
+# give other scripts a chance to finish - audit_logread sees all activity
+sleep 1
+
 logfile=$(mktemp)
 rulefile=$(mktemp)
 echo "logging into $logfile"
@@ -93,6 +99,12 @@ do_test 'nft add counter t1 c1' \
 do_test 'nft add counter t2 c1; add counter t2 c2' \
 'table=t2 family=2 entries=2 op=nft_register_obj'
 
+for ((i = 3; i <= 500; i++)); do
+       echo "add counter t2 c$i"
+done >$rulefile
+do_test "nft -f $rulefile" \
+'table=t2 family=2 entries=498 op=nft_register_obj'
+
 # adding/updating quotas
 
 do_test 'nft add quota t1 q1 { 10 bytes }' \
@@ -101,6 +113,12 @@ do_test 'nft add quota t1 q1 { 10 bytes }' \
 do_test 'nft add quota t2 q1 { 10 bytes }; add quota t2 q2 { 10 bytes }' \
 'table=t2 family=2 entries=2 op=nft_register_obj'
 
+for ((i = 3; i <= 500; i++)); do
+       echo "add quota t2 q$i { 10 bytes }"
+done >$rulefile
+do_test "nft -f $rulefile" \
+'table=t2 family=2 entries=498 op=nft_register_obj'
+
 # changing the quota value triggers obj update path
 do_test 'nft add quota t1 q1 { 20 bytes }' \
 'table=t1 family=2 entries=1 op=nft_register_obj'
@@ -150,6 +168,40 @@ done
 do_test 'nft reset set t1 s' \
 'table=t1 family=2 entries=3 op=nft_reset_setelem'
 
+# resetting counters
+
+do_test 'nft reset counter t1 c1' \
+'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+do_test 'nft reset counters t1' \
+'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+do_test 'nft reset counters t2' \
+'table=t2 family=2 entries=342 op=nft_reset_obj
+table=t2 family=2 entries=158 op=nft_reset_obj'
+
+do_test 'nft reset counters' \
+'table=t1 family=2 entries=1 op=nft_reset_obj
+table=t2 family=2 entries=341 op=nft_reset_obj
+table=t2 family=2 entries=159 op=nft_reset_obj'
+
+# resetting quotas
+
+do_test 'nft reset quota t1 q1' \
+'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+do_test 'nft reset quotas t1' \
+'table=t1 family=2 entries=1 op=nft_reset_obj'
+
+do_test 'nft reset quotas t2' \
+'table=t2 family=2 entries=315 op=nft_reset_obj
+table=t2 family=2 entries=185 op=nft_reset_obj'
+
+do_test 'nft reset quotas' \
+'table=t1 family=2 entries=1 op=nft_reset_obj
+table=t2 family=2 entries=314 op=nft_reset_obj
+table=t2 family=2 entries=186 op=nft_reset_obj'
+
 # deleting rules
 
 readarray -t handles < <(nft -a list chain t1 c1 | \
index 11e0f05..c333263 100644 (file)
@@ -5,11 +5,11 @@
 # Additional include paths needed by kselftest.h and local headers
 CFLAGS += -D_GNU_SOURCE -std=gnu99 -I.
 
-TEST_GEN_FILES := testcases/mmap_default testcases/mmap_bottomup
+TEST_GEN_FILES := mmap_default mmap_bottomup
 
-TEST_PROGS := testcases/run_mmap.sh
+TEST_PROGS := run_mmap.sh
 
 include ../../lib.mk
 
-$(OUTPUT)/mm: testcases/mmap_default.c testcases/mmap_bottomup.c testcases/mmap_tests.h
+$(OUTPUT)/mm: mmap_default.c mmap_bottomup.c mmap_tests.h
        $(CC) -o$@ $(CFLAGS) $(LDFLAGS) $^
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 #include <sys/mman.h>
-#include <testcases/mmap_test.h>
+#include <mmap_test.h>
 
 #include "../../kselftest_harness.h"
 
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 #include <sys/mman.h>
-#include <testcases/mmap_test.h>
+#include <mmap_test.h>
 
 #include "../../kselftest_harness.h"