Merge tag 's390-6.4-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Jun 2023 16:29:51 +0000 (09:29 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Jun 2023 16:29:51 +0000 (09:29 -0700)
Pull s390 fixes from Alexander Gordeev:

 - Avoid linker error for randomly generated config file that has
   CONFIG_BRANCH_PROFILE_NONE enabled and make it similar to riscv, x86
   and also to commit 4bf3ec384edf ("s390: disable branch profiling for
   vdso").

 - Currently, if the device is offline and all the channel paths are
   either configured or varied offline, the associated subchannel gets
   unregistered. Don't unregister the subchannel, instead unregister
   offline device.

* tag 's390-6.4-3' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux:
  s390/purgatory: disable branch profiling
  s390/cio: unregister device when the only path is gone

1329 files changed:
.mailmap
CREDITS
Documentation/admin-guide/cgroup-v2.rst
Documentation/admin-guide/cifs/changes.rst
Documentation/admin-guide/cifs/usage.rst
Documentation/admin-guide/quickly-build-trimmed-linux.rst
Documentation/cdrom/index.rst
Documentation/devicetree/bindings/ata/ceva,ahci-1v84.yaml
Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
Documentation/devicetree/bindings/fpga/lattice,sysconfig.yaml
Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml
Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml
Documentation/devicetree/bindings/iio/adc/renesas,rcar-gyroadc.yaml
Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
Documentation/devicetree/bindings/net/can/st,stm32-bxcan.yaml
Documentation/devicetree/bindings/serial/8250_omap.yaml
Documentation/devicetree/bindings/sound/tas2562.yaml
Documentation/devicetree/bindings/sound/tas2770.yaml
Documentation/devicetree/bindings/sound/tas27xx.yaml
Documentation/devicetree/bindings/sound/tlv320aic32x4.txt
Documentation/devicetree/bindings/usb/cdns,usb3.yaml
Documentation/devicetree/bindings/usb/snps,dwc3.yaml
Documentation/filesystems/index.rst
Documentation/filesystems/ramfs-rootfs-initramfs.rst
Documentation/filesystems/sharedsubtree.rst
Documentation/filesystems/smb/cifsroot.rst [moved from Documentation/filesystems/cifs/cifsroot.rst with 97% similarity]
Documentation/filesystems/smb/index.rst [moved from Documentation/filesystems/cifs/index.rst with 100% similarity]
Documentation/filesystems/smb/ksmbd.rst [moved from Documentation/filesystems/cifs/ksmbd.rst with 100% similarity]
Documentation/fpga/index.rst
Documentation/locking/index.rst
Documentation/mm/page_table_check.rst
Documentation/netlink/specs/ethtool.yaml
Documentation/netlink/specs/handshake.yaml
Documentation/networking/device_drivers/ethernet/mellanox/mlx5/devlink.rst
Documentation/networking/ip-sysctl.rst
Documentation/networking/tls-handshake.rst
Documentation/pcmcia/index.rst
Documentation/process/maintainer-netdev.rst
Documentation/staging/crc32.rst
Documentation/timers/index.rst
Documentation/trace/histogram.rst
Documentation/userspace-api/ioctl/ioctl-number.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/imx6qdl-mba6.dtsi
arch/arm/boot/dts/imx6ull-dhcor-som.dtsi
arch/arm/boot/dts/stm32f429.dtsi
arch/arm/boot/dts/stm32f7-pinctrl.dtsi
arch/arm/boot/dts/vexpress-v2p-ca5s.dts
arch/arm/include/asm/arm_pmuv3.h
arch/arm64/Kconfig
arch/arm64/boot/dts/arm/foundation-v8.dtsi
arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts
arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
arch/arm64/boot/dts/freescale/imx8-ss-conn.dtsi
arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi
arch/arm64/boot/dts/freescale/imx8mn.dtsi
arch/arm64/boot/dts/freescale/imx8mp.dtsi
arch/arm64/boot/dts/freescale/imx8x-colibri-eval-v3.dtsi
arch/arm64/boot/dts/freescale/imx8x-colibri-iris.dtsi
arch/arm64/boot/dts/freescale/imx8x-colibri.dtsi
arch/arm64/include/asm/arm_pmuv3.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/kvm_pgtable.h
arch/arm64/include/asm/sysreg.h
arch/arm64/kernel/mte.c
arch/arm64/kernel/vdso.c
arch/arm64/kvm/fpsimd.c
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/inject_fault.c
arch/arm64/kvm/pmu-emul.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/vgic/vgic-init.c
arch/arm64/kvm/vgic/vgic-its.c
arch/arm64/kvm/vgic/vgic-kvm-device.c
arch/arm64/kvm/vgic/vgic-mmio-v3.c
arch/arm64/kvm/vgic/vgic-mmio.c
arch/arm64/kvm/vgic/vgic-v2.c
arch/arm64/kvm/vgic/vgic-v3.c
arch/arm64/kvm/vgic/vgic-v4.c
arch/arm64/kvm/vmid.c
arch/arm64/mm/copypage.c
arch/arm64/mm/fault.c
arch/m68k/kernel/signal.c
arch/mips/Kconfig
arch/mips/alchemy/common/dbdma.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/setup.c
arch/parisc/Kconfig
arch/parisc/Kconfig.debug
arch/parisc/include/asm/cacheflush.h
arch/parisc/include/asm/spinlock.h
arch/parisc/include/asm/spinlock_types.h
arch/parisc/kernel/alternative.c
arch/parisc/kernel/cache.c
arch/parisc/kernel/pci-dma.c
arch/parisc/kernel/process.c
arch/parisc/kernel/traps.c
arch/powerpc/Kconfig
arch/powerpc/boot/Makefile
arch/powerpc/crypto/Kconfig
arch/powerpc/crypto/Makefile
arch/powerpc/crypto/aes-gcm-p10-glue.c
arch/powerpc/crypto/aesp10-ppc.pl [moved from arch/powerpc/crypto/aesp8-ppc.pl with 99% similarity]
arch/powerpc/crypto/ghashp10-ppc.pl [moved from arch/powerpc/crypto/ghashp8-ppc.pl with 97% similarity]
arch/powerpc/include/asm/iommu.h
arch/powerpc/kernel/dma-iommu.c
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/isa-bridge.c
arch/powerpc/mm/book3s64/radix_pgtable.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/platforms/Kconfig
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/xmon/xmon.c
arch/riscv/Kconfig
arch/riscv/errata/Makefile
arch/riscv/include/asm/hugetlb.h
arch/riscv/include/asm/perf_event.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/probes/Makefile
arch/riscv/mm/hugetlbpage.c
arch/riscv/mm/init.c
arch/s390/kernel/Makefile
arch/um/drivers/Makefile
arch/um/drivers/harddog.h [new file with mode: 0644]
arch/um/drivers/harddog_kern.c
arch/um/drivers/harddog_user.c
arch/um/drivers/harddog_user_exp.c [new file with mode: 0644]
arch/x86/crypto/aria-aesni-avx-asm_64.S
arch/x86/events/intel/core.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/include/asm/fpu/sched.h
arch/x86/include/asm/vmx.h
arch/x86/kernel/Makefile
arch/x86/kernel/cpu/topology.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/fpu/context.h
arch/x86/kernel/fpu/core.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/sgx.c
arch/x86/kvm/x86.c
arch/x86/lib/copy_user_64.S
arch/x86/mm/init.c
arch/x86/pci/xen.c
arch/xtensa/kernel/signal.c
arch/xtensa/kernel/xtensa_ksyms.c
arch/xtensa/lib/Makefile
arch/xtensa/lib/bswapdi2.S [new file with mode: 0644]
arch/xtensa/lib/bswapsi2.S [new file with mode: 0644]
block/blk-core.c
block/blk-map.c
block/blk-mq-tag.c
block/blk-settings.c
block/blk-wbt.c
block/fops.c
crypto/asymmetric_keys/public_key.c
drivers/accel/ivpu/Kconfig
drivers/accel/ivpu/ivpu_hw_mtl.c
drivers/accel/ivpu/ivpu_hw_mtl_reg.h
drivers/accel/ivpu/ivpu_ipc.c
drivers/accel/ivpu/ivpu_job.c
drivers/accel/ivpu/ivpu_mmu.c
drivers/accel/qaic/qaic_control.c
drivers/accel/qaic/qaic_data.c
drivers/accel/qaic/qaic_drv.c
drivers/acpi/apei/apei-internal.h
drivers/acpi/apei/bert.c
drivers/acpi/resource.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/android/binder_alloc.h
drivers/android/binder_alloc_selftest.c
drivers/ata/libata-scsi.c
drivers/base/cacheinfo.c
drivers/base/class.c
drivers/base/firmware_loader/main.c
drivers/base/regmap/Kconfig
drivers/base/regmap/regcache-maple.c
drivers/base/regmap/regmap-sdw.c
drivers/base/regmap/regmap.c
drivers/block/ublk_drv.c
drivers/block/xen-blkfront.c
drivers/bluetooth/btnxpuart.c
drivers/bluetooth/hci_qca.c
drivers/char/agp/parisc-agp.c
drivers/char/tpm/tpm-chip.c
drivers/char/tpm/tpm-interface.c
drivers/char/tpm/tpm_tis.c
drivers/char/tpm/tpm_tis_core.c
drivers/char/tpm/tpm_tis_core.h
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/amd-pstate.c
drivers/cpufreq/pcc-cpufreq.c
drivers/cxl/core/mbox.c
drivers/cxl/core/pci.c
drivers/cxl/core/port.c
drivers/cxl/cxl.h
drivers/cxl/cxlmem.h
drivers/cxl/cxlpci.h
drivers/cxl/mem.c
drivers/cxl/pci.c
drivers/cxl/port.c
drivers/dma/at_hdmac.c
drivers/dma/at_xdmac.c
drivers/dma/idxd/cdev.c
drivers/dma/pl330.c
drivers/dma/ti/k3-udma.c
drivers/firmware/arm_ffa/bus.c
drivers/firmware/arm_ffa/driver.c
drivers/firmware/arm_scmi/raw_mode.c
drivers/firmware/efi/libstub/Makefile.zboot
drivers/firmware/efi/libstub/efistub.h
drivers/gpio/Kconfig
drivers/gpio/gpio-f7188x.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpio-sim.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
drivers/gpu/drm/amd/display/dc/link/link_validation.c
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
drivers/gpu/drm/ast/ast_dp.c
drivers/gpu/drm/ast/ast_drv.h
drivers/gpu/drm/ast/ast_main.c
drivers/gpu/drm/ast/ast_mode.c
drivers/gpu/drm/ast/ast_post.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_managed.c
drivers/gpu/drm/drm_panel_orientation_quirks.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_g2d.h
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/i915/display/intel_cdclk.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_dp_aux.c
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drivers/gpu/drm/i915/gt/selftest_execlists.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/lima/lima_sched.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_3_0_msm8998.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_0_sm8250.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_2_sc7180.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_3_sm6115.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_5_qcm2290.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_0_sm8350.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_7_2_sc7280.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
drivers/gpu/drm/msm/dp/dp_audio.c
drivers/gpu/drm/msm/dp/dp_audio.h
drivers/gpu/drm/msm/dp/dp_catalog.c
drivers/gpu/drm/msm/dp/dp_catalog.h
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/pl111/pl111_display.c
drivers/gpu/drm/pl111/pl111_drm.h
drivers/gpu/drm/pl111/pl111_drv.c
drivers/gpu/drm/pl111/pl111_versatile.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-ids.h
drivers/hid/hid-logitech-hidpp.c
drivers/hid/wacom_sys.c
drivers/hid/wacom_wac.c
drivers/hwtracing/coresight/coresight-etm-perf.c
drivers/hwtracing/coresight/coresight-tmc-etr.c
drivers/iio/accel/kionix-kx022a.c
drivers/iio/accel/st_accel_core.c
drivers/iio/adc/ad4130.c
drivers/iio/adc/ad7192.c
drivers/iio/adc/ad_sigma_delta.c
drivers/iio/adc/imx93_adc.c
drivers/iio/adc/mt6370-adc.c
drivers/iio/adc/mxs-lradc-adc.c
drivers/iio/adc/palmas_gpadc.c
drivers/iio/adc/stm32-adc.c
drivers/iio/addac/ad74413r.c
drivers/iio/dac/Makefile
drivers/iio/dac/mcp4725.c
drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
drivers/iio/industrialio-gts-helper.c
drivers/iio/light/rohm-bu27034.c
drivers/iio/light/vcnl4035.c
drivers/iio/magnetometer/tmag5273.c
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/bnxt_re/qplib_res.c
drivers/infiniband/hw/bnxt_re/qplib_sp.c
drivers/infiniband/hw/efa/efa_verbs.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
drivers/infiniband/hw/hns/hns_roce_mr.c
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/sw/rxe/rxe_comp.c
drivers/infiniband/sw/rxe/rxe_net.c
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/sw/rxe/rxe_recv.c
drivers/infiniband/sw/rxe/rxe_req.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/infiniband/sw/rxe/rxe_verbs.c
drivers/input/input.c
drivers/input/joystick/xpad.c
drivers/input/misc/soc_button_array.c
drivers/input/mouse/elantech.c
drivers/input/touchscreen/cyttsp5.c
drivers/iommu/Kconfig
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/init.c
drivers/iommu/amd/iommu.c
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
drivers/iommu/mtk_iommu.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/irq-gic-common.c
drivers/irqchip/irq-gic-common.h
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-mbigen.c
drivers/irqchip/irq-meson-gpio.c
drivers/irqchip/irq-mips-gic.c
drivers/leds/rgb/leds-qcom-lpg.c
drivers/mailbox/mailbox-test.c
drivers/md/raid5.c
drivers/media/cec/core/cec-adap.c
drivers/media/cec/core/cec-core.c
drivers/media/cec/core/cec-priv.h
drivers/media/dvb-core/dvb_ca_en50221.c
drivers/media/dvb-core/dvb_demux.c
drivers/media/dvb-core/dvb_frontend.c
drivers/media/dvb-core/dvb_net.c
drivers/media/dvb-core/dvbdev.c
drivers/media/dvb-frontends/mn88443x.c
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_stateful.c
drivers/media/platform/qcom/camss/camss-video.c
drivers/media/platform/verisilicon/hantro_v4l2.c
drivers/media/usb/dvb-usb-v2/ce6230.c
drivers/media/usb/dvb-usb-v2/ec168.c
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/media/usb/dvb-usb/az6027.c
drivers/media/usb/dvb-usb/digitv.c
drivers/media/usb/dvb-usb/dw2102.c
drivers/media/usb/pvrusb2/Kconfig
drivers/media/usb/ttusb-dec/ttusb_dec.c
drivers/media/usb/uvc/uvc_driver.c
drivers/media/v4l2-core/v4l2-mc.c
drivers/misc/fastrpc.c
drivers/mmc/core/block.c
drivers/mmc/core/pwrseq_sd8787.c
drivers/mmc/host/sdhci-cadence.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/vub300.c
drivers/mtd/mtdchar.c
drivers/mtd/nand/raw/ingenic/ingenic_ecc.h
drivers/mtd/nand/raw/marvell_nand.c
drivers/mtd/spi-nor/core.c
drivers/mtd/spi-nor/spansion.c
drivers/net/bonding/bond_main.c
drivers/net/can/Kconfig
drivers/net/can/bxcan.c
drivers/net/can/dev/skb.c
drivers/net/can/kvaser_pciefd.c
drivers/net/dsa/lan9303-core.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/dsa/mv88e6xxx/port.h
drivers/net/dsa/qca/Kconfig
drivers/net/dsa/rzn1_a5psw.c
drivers/net/dsa/rzn1_a5psw.h
drivers/net/ethernet/3com/3c515.c
drivers/net/ethernet/3com/3c589_cs.c
drivers/net/ethernet/8390/ne.c
drivers/net/ethernet/8390/smc-ultra.c
drivers/net/ethernet/8390/wd.c
drivers/net/ethernet/amd/lance.c
drivers/net/ethernet/amd/pds_core/dev.c
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/cirrus/cs89x0.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_common.h
drivers/net/ethernet/intel/ice/ice_dcb_lib.c
drivers/net/ethernet/intel/ice/ice_gnss.c
drivers/net/ethernet/intel/ice/ice_gnss.h
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_sriov.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.h
drivers/net/ethernet/intel/ice/ice_vf_lib.c
drivers/net/ethernet/intel/ice/ice_vf_lib.h
drivers/net/ethernet/intel/ice/ice_virtchnl.c
drivers/net/ethernet/intel/igb/e1000_mac.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.h
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
drivers/net/ethernet/microsoft/mana/mana_en.c
drivers/net/ethernet/microsoft/mana/mana_ethtool.c
drivers/net/ethernet/netronome/nfp/nic/main.h
drivers/net/ethernet/nvidia/forcedeth.c
drivers/net/ethernet/qlogic/qed/qed_l2.c
drivers/net/ethernet/qlogic/qede/qede.h
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/renesas/rswitch.c
drivers/net/ethernet/sfc/ef100_netdev.c
drivers/net/ethernet/sfc/efx_devlink.c
drivers/net/ethernet/sfc/tc.c
drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ipa/ipa_endpoint.c
drivers/net/mdio/mdio-i2c.c
drivers/net/pcs/pcs-xpcs.c
drivers/net/phy/dp83867.c
drivers/net/phy/mscc/mscc.h
drivers/net/phy/mscc/mscc_main.c
drivers/net/phy/mxl-gpy.c
drivers/net/phy/phylink.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/virtio_net.c
drivers/net/wireless/broadcom/b43/b43.h
drivers/net/wireless/broadcom/b43legacy/b43legacy.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/link.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/intel/iwlwifi/mvm/rfi.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/mediatek/mt76/mt7615/mac.c
drivers/net/wireless/mediatek/mt76/mt76_connac2_mac.h
drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
drivers/net/wireless/mediatek/mt76/mt7996/mac.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
drivers/net/wireless/realtek/rtw88/mac80211.c
drivers/net/wireless/realtek/rtw88/main.c
drivers/net/wireless/realtek/rtw88/main.h
drivers/net/wireless/realtek/rtw88/ps.c
drivers/net/wireless/realtek/rtw88/ps.h
drivers/net/wireless/realtek/rtw88/sdio.c
drivers/net/wireless/realtek/rtw88/usb.h
drivers/net/wireless/realtek/rtw89/core.c
drivers/net/wireless/realtek/rtw89/mac.c
drivers/net/wireless/realtek/rtw89/mac.h
drivers/net/wireless/realtek/rtw89/mac80211.c
drivers/net/wireless/realtek/rtw89/ps.c
drivers/net/wireless/realtek/rtw89/ps.h
drivers/net/wireless/realtek/rtw89/rtw8852b.c
drivers/net/wireless/virtual/mac80211_hwsim.c
drivers/net/wwan/iosm/iosm_ipc_imem.c
drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
drivers/net/wwan/t7xx/t7xx_pci.c
drivers/net/wwan/t7xx/t7xx_pci.h
drivers/nfc/nfcsim.c
drivers/nvme/host/constants.c
drivers/nvme/host/core.c
drivers/nvme/host/hwmon.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/target/passthru.c
drivers/pci/quirks.c
drivers/phy/amlogic/phy-meson-g12a-mipi-dphy-analog.c
drivers/phy/mediatek/phy-mtk-hdmi-mt8195.c
drivers/phy/qualcomm/phy-qcom-qmp-combo.c
drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.c
drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c
drivers/pinctrl/meson/pinctrl-meson-axg.c
drivers/platform/mellanox/mlxbf-pmc.c
drivers/platform/surface/aggregator/controller.c
drivers/platform/surface/surface_aggregator_tabletsw.c
drivers/platform/x86/amd/pmf/core.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/intel/ifs/load.c
drivers/platform/x86/intel/int3472/clk_and_regulator.c
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
drivers/power/supply/ab8500_btemp.c
drivers/power/supply/ab8500_fg.c
drivers/power/supply/axp288_fuel_gauge.c
drivers/power/supply/bq24190_charger.c
drivers/power/supply/bq25890_charger.c
drivers/power/supply/bq27xxx_battery.c
drivers/power/supply/bq27xxx_battery_i2c.c
drivers/power/supply/mt6360_charger.c
drivers/power/supply/power_supply_core.c
drivers/power/supply/power_supply_leds.c
drivers/power/supply/power_supply_sysfs.c
drivers/power/supply/rt9467-charger.c
drivers/power/supply/sbs-charger.c
drivers/power/supply/sc27xx_fuel_gauge.c
drivers/regulator/core.c
drivers/regulator/mt6359-regulator.c
drivers/regulator/pca9450-regulator.c
drivers/s390/block/dasd_eckd.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/scsi_lib.c
drivers/scsi/stex.c
drivers/scsi/storvsc_drv.c
drivers/soc/fsl/qe/Kconfig
drivers/spi/spi-cadence.c
drivers/spi/spi-dw-mmio.c
drivers/spi/spi-fsl-lpspi.c
drivers/spi/spi-geni-qcom.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi-qup.c
drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
drivers/staging/media/imx/imx8mq-mipi-csi2.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/tee/optee/smc_abi.c
drivers/thermal/intel/int340x_thermal/int3400_thermal.c
drivers/thunderbolt/nhi.c
drivers/thunderbolt/nhi_regs.h
drivers/tty/serial/8250/8250_bcm7271.c
drivers/tty/serial/8250/8250_exar.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/8250/8250_tegra.c
drivers/tty/serial/Kconfig
drivers/tty/serial/arc_uart.c
drivers/tty/serial/cpm_uart/cpm_uart.h
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/vt/vc_screen.c
drivers/ufs/core/ufs-mcq.c
drivers/ufs/core/ufshcd.c
drivers/usb/cdns3/cdns3-gadget.c
drivers/usb/class/usbtmc.c
drivers/usb/core/buffer.c
drivers/usb/core/devio.c
drivers/usb/dwc3/core.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/debugfs.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/gadget/udc/amd5536udc_pci.c
drivers/usb/gadget/udc/core.c
drivers/usb/host/uhci-pci.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.h
drivers/usb/storage/scsiglue.c
drivers/usb/typec/altmodes/displayport.c
drivers/usb/typec/tipd/core.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/vhost.c
drivers/video/fbdev/Kconfig
drivers/video/fbdev/arcfb.c
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/au1100fb.c
drivers/video/fbdev/au1200fb.c
drivers/video/fbdev/broadsheetfb.c
drivers/video/fbdev/bw2.c
drivers/video/fbdev/core/bitblit.c
drivers/video/fbdev/core/fbmem.c
drivers/video/fbdev/i810/i810_dvt.c
drivers/video/fbdev/imsttfb.c
drivers/video/fbdev/matrox/matroxfb_maven.c
drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
drivers/video/fbdev/ssd1307fb.c
drivers/video/fbdev/stifb.c
drivers/video/fbdev/udlfb.c
drivers/xen/pvcalls-back.c
fs/Kconfig
fs/Makefile
fs/afs/dir.c
fs/btrfs/bio.c
fs/btrfs/block-group.c
fs/btrfs/disk-io.c
fs/btrfs/file-item.c
fs/btrfs/scrub.c
fs/btrfs/tree-log.c
fs/ceph/mds_client.c
fs/ceph/snap.c
fs/coredump.c
fs/erofs/Kconfig
fs/erofs/Makefile
fs/erofs/internal.h
fs/erofs/xattr.c
fs/erofs/zdata.c
fs/ext4/ext4.h
fs/ext4/fsync.c
fs/ext4/inode.c
fs/ext4/mballoc.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/gfs2/file.c
fs/lockd/svc.c
fs/nfs/dir.c
fs/nfs/nfs4proc.c
fs/nfsd/nfsctl.c
fs/nfsd/trace.h
fs/nfsd/vfs.c
fs/nilfs2/inode.c
fs/smb/Kconfig [new file with mode: 0644]
fs/smb/Makefile [new file with mode: 0644]
fs/smb/client/Kconfig [moved from fs/cifs/Kconfig with 100% similarity]
fs/smb/client/Makefile [moved from fs/cifs/Makefile with 100% similarity]
fs/smb/client/asn1.c [moved from fs/cifs/asn1.c with 100% similarity]
fs/smb/client/cached_dir.c [moved from fs/cifs/cached_dir.c with 100% similarity]
fs/smb/client/cached_dir.h [moved from fs/cifs/cached_dir.h with 100% similarity]
fs/smb/client/cifs_debug.c [moved from fs/cifs/cifs_debug.c with 99% similarity]
fs/smb/client/cifs_debug.h [moved from fs/cifs/cifs_debug.h with 100% similarity]
fs/smb/client/cifs_dfs_ref.c [moved from fs/cifs/cifs_dfs_ref.c with 100% similarity]
fs/smb/client/cifs_fs_sb.h [moved from fs/cifs/cifs_fs_sb.h with 100% similarity]
fs/smb/client/cifs_ioctl.h [moved from fs/cifs/cifs_ioctl.h with 100% similarity]
fs/smb/client/cifs_spnego.c [moved from fs/cifs/cifs_spnego.c with 100% similarity]
fs/smb/client/cifs_spnego.h [moved from fs/cifs/cifs_spnego.h with 100% similarity]
fs/smb/client/cifs_spnego_negtokeninit.asn1 [moved from fs/cifs/cifs_spnego_negtokeninit.asn1 with 100% similarity]
fs/smb/client/cifs_swn.c [moved from fs/cifs/cifs_swn.c with 100% similarity]
fs/smb/client/cifs_swn.h [moved from fs/cifs/cifs_swn.h with 100% similarity]
fs/smb/client/cifs_unicode.c [moved from fs/cifs/cifs_unicode.c with 100% similarity]
fs/smb/client/cifs_unicode.h [moved from fs/cifs/cifs_unicode.h with 100% similarity]
fs/smb/client/cifs_uniupr.h [moved from fs/cifs/cifs_uniupr.h with 100% similarity]
fs/smb/client/cifsacl.c [moved from fs/cifs/cifsacl.c with 100% similarity]
fs/smb/client/cifsacl.h [moved from fs/cifs/cifsacl.h with 100% similarity]
fs/smb/client/cifsencrypt.c [moved from fs/cifs/cifsencrypt.c with 99% similarity]
fs/smb/client/cifsfs.c [moved from fs/cifs/cifsfs.c with 100% similarity]
fs/smb/client/cifsfs.h [moved from fs/cifs/cifsfs.h with 100% similarity]
fs/smb/client/cifsglob.h [moved from fs/cifs/cifsglob.h with 99% similarity]
fs/smb/client/cifspdu.h [moved from fs/cifs/cifspdu.h with 99% similarity]
fs/smb/client/cifsproto.h [moved from fs/cifs/cifsproto.h with 100% similarity]
fs/smb/client/cifsroot.c [moved from fs/cifs/cifsroot.c with 100% similarity]
fs/smb/client/cifssmb.c [moved from fs/cifs/cifssmb.c with 100% similarity]
fs/smb/client/connect.c [moved from fs/cifs/connect.c with 100% similarity]
fs/smb/client/dfs.c [moved from fs/cifs/dfs.c with 99% similarity]
fs/smb/client/dfs.h [moved from fs/cifs/dfs.h with 100% similarity]
fs/smb/client/dfs_cache.c [moved from fs/cifs/dfs_cache.c with 100% similarity]
fs/smb/client/dfs_cache.h [moved from fs/cifs/dfs_cache.h with 100% similarity]
fs/smb/client/dir.c [moved from fs/cifs/dir.c with 100% similarity]
fs/smb/client/dns_resolve.c [moved from fs/cifs/dns_resolve.c with 100% similarity]
fs/smb/client/dns_resolve.h [moved from fs/cifs/dns_resolve.h with 100% similarity]
fs/smb/client/export.c [moved from fs/cifs/export.c with 100% similarity]
fs/smb/client/file.c [moved from fs/cifs/file.c with 99% similarity]
fs/smb/client/fs_context.c [moved from fs/cifs/fs_context.c with 99% similarity]
fs/smb/client/fs_context.h [moved from fs/cifs/fs_context.h with 100% similarity]
fs/smb/client/fscache.c [moved from fs/cifs/fscache.c with 100% similarity]
fs/smb/client/fscache.h [moved from fs/cifs/fscache.h with 100% similarity]
fs/smb/client/inode.c [moved from fs/cifs/inode.c with 100% similarity]
fs/smb/client/ioctl.c [moved from fs/cifs/ioctl.c with 98% similarity]
fs/smb/client/link.c [moved from fs/cifs/link.c with 100% similarity]
fs/smb/client/misc.c [moved from fs/cifs/misc.c with 100% similarity]
fs/smb/client/netlink.c [moved from fs/cifs/netlink.c with 100% similarity]
fs/smb/client/netlink.h [moved from fs/cifs/netlink.h with 100% similarity]
fs/smb/client/netmisc.c [moved from fs/cifs/netmisc.c with 100% similarity]
fs/smb/client/nterr.c [moved from fs/cifs/nterr.c with 100% similarity]
fs/smb/client/nterr.h [moved from fs/cifs/nterr.h with 100% similarity]
fs/smb/client/ntlmssp.h [moved from fs/cifs/ntlmssp.h with 100% similarity]
fs/smb/client/readdir.c [moved from fs/cifs/readdir.c with 100% similarity]
fs/smb/client/rfc1002pdu.h [moved from fs/cifs/rfc1002pdu.h with 100% similarity]
fs/smb/client/sess.c [moved from fs/cifs/sess.c with 100% similarity]
fs/smb/client/smb1ops.c [moved from fs/cifs/smb1ops.c with 99% similarity]
fs/smb/client/smb2file.c [moved from fs/cifs/smb2file.c with 100% similarity]
fs/smb/client/smb2glob.h [moved from fs/cifs/smb2glob.h with 100% similarity]
fs/smb/client/smb2inode.c [moved from fs/cifs/smb2inode.c with 100% similarity]
fs/smb/client/smb2maperror.c [moved from fs/cifs/smb2maperror.c with 100% similarity]
fs/smb/client/smb2misc.c [moved from fs/cifs/smb2misc.c with 100% similarity]
fs/smb/client/smb2ops.c [moved from fs/cifs/smb2ops.c with 99% similarity]
fs/smb/client/smb2pdu.c [moved from fs/cifs/smb2pdu.c with 99% similarity]
fs/smb/client/smb2pdu.h [moved from fs/cifs/smb2pdu.h with 100% similarity]
fs/smb/client/smb2proto.h [moved from fs/cifs/smb2proto.h with 100% similarity]
fs/smb/client/smb2status.h [moved from fs/cifs/smb2status.h with 100% similarity]
fs/smb/client/smb2transport.c [moved from fs/cifs/smb2transport.c with 100% similarity]
fs/smb/client/smbdirect.c [moved from fs/cifs/smbdirect.c with 100% similarity]
fs/smb/client/smbdirect.h [moved from fs/cifs/smbdirect.h with 100% similarity]
fs/smb/client/smbencrypt.c [moved from fs/cifs/smbencrypt.c with 98% similarity]
fs/smb/client/smberr.h [moved from fs/cifs/smberr.h with 100% similarity]
fs/smb/client/trace.c [moved from fs/cifs/trace.c with 100% similarity]
fs/smb/client/trace.h [moved from fs/cifs/trace.h with 100% similarity]
fs/smb/client/transport.c [moved from fs/cifs/transport.c with 100% similarity]
fs/smb/client/unc.c [moved from fs/cifs/unc.c with 100% similarity]
fs/smb/client/winucase.c [moved from fs/cifs/winucase.c with 100% similarity]
fs/smb/client/xattr.c [moved from fs/cifs/xattr.c with 100% similarity]
fs/smb/common/Makefile [moved from fs/smbfs_common/Makefile with 59% similarity]
fs/smb/common/arc4.h [moved from fs/smbfs_common/arc4.h with 100% similarity]
fs/smb/common/cifs_arc4.c [moved from fs/smbfs_common/cifs_arc4.c with 100% similarity]
fs/smb/common/cifs_md4.c [moved from fs/smbfs_common/cifs_md4.c with 100% similarity]
fs/smb/common/md4.h [moved from fs/smbfs_common/md4.h with 100% similarity]
fs/smb/common/smb2pdu.h [moved from fs/smbfs_common/smb2pdu.h with 100% similarity]
fs/smb/common/smbfsctl.h [moved from fs/smbfs_common/smbfsctl.h with 100% similarity]
fs/smb/server/Kconfig [moved from fs/ksmbd/Kconfig with 100% similarity]
fs/smb/server/Makefile [moved from fs/ksmbd/Makefile with 100% similarity]
fs/smb/server/asn1.c [moved from fs/ksmbd/asn1.c with 100% similarity]
fs/smb/server/asn1.h [moved from fs/ksmbd/asn1.h with 100% similarity]
fs/smb/server/auth.c [moved from fs/ksmbd/auth.c with 99% similarity]
fs/smb/server/auth.h [moved from fs/ksmbd/auth.h with 100% similarity]
fs/smb/server/connection.c [moved from fs/ksmbd/connection.c with 99% similarity]
fs/smb/server/connection.h [moved from fs/ksmbd/connection.h with 100% similarity]
fs/smb/server/crypto_ctx.c [moved from fs/ksmbd/crypto_ctx.c with 100% similarity]
fs/smb/server/crypto_ctx.h [moved from fs/ksmbd/crypto_ctx.h with 100% similarity]
fs/smb/server/glob.h [moved from fs/ksmbd/glob.h with 100% similarity]
fs/smb/server/ksmbd_netlink.h [moved from fs/ksmbd/ksmbd_netlink.h with 100% similarity]
fs/smb/server/ksmbd_spnego_negtokeninit.asn1 [moved from fs/ksmbd/ksmbd_spnego_negtokeninit.asn1 with 100% similarity]
fs/smb/server/ksmbd_spnego_negtokentarg.asn1 [moved from fs/ksmbd/ksmbd_spnego_negtokentarg.asn1 with 100% similarity]
fs/smb/server/ksmbd_work.c [moved from fs/ksmbd/ksmbd_work.c with 100% similarity]
fs/smb/server/ksmbd_work.h [moved from fs/ksmbd/ksmbd_work.h with 100% similarity]
fs/smb/server/mgmt/ksmbd_ida.c [moved from fs/ksmbd/mgmt/ksmbd_ida.c with 100% similarity]
fs/smb/server/mgmt/ksmbd_ida.h [moved from fs/ksmbd/mgmt/ksmbd_ida.h with 100% similarity]
fs/smb/server/mgmt/share_config.c [moved from fs/ksmbd/mgmt/share_config.c with 100% similarity]
fs/smb/server/mgmt/share_config.h [moved from fs/ksmbd/mgmt/share_config.h with 100% similarity]
fs/smb/server/mgmt/tree_connect.c [moved from fs/ksmbd/mgmt/tree_connect.c with 100% similarity]
fs/smb/server/mgmt/tree_connect.h [moved from fs/ksmbd/mgmt/tree_connect.h with 100% similarity]
fs/smb/server/mgmt/user_config.c [moved from fs/ksmbd/mgmt/user_config.c with 100% similarity]
fs/smb/server/mgmt/user_config.h [moved from fs/ksmbd/mgmt/user_config.h with 100% similarity]
fs/smb/server/mgmt/user_session.c [moved from fs/ksmbd/mgmt/user_session.c with 100% similarity]
fs/smb/server/mgmt/user_session.h [moved from fs/ksmbd/mgmt/user_session.h with 100% similarity]
fs/smb/server/misc.c [moved from fs/ksmbd/misc.c with 100% similarity]
fs/smb/server/misc.h [moved from fs/ksmbd/misc.h with 100% similarity]
fs/smb/server/ndr.c [moved from fs/ksmbd/ndr.c with 100% similarity]
fs/smb/server/ndr.h [moved from fs/ksmbd/ndr.h with 100% similarity]
fs/smb/server/nterr.h [moved from fs/ksmbd/nterr.h with 100% similarity]
fs/smb/server/ntlmssp.h [moved from fs/ksmbd/ntlmssp.h with 100% similarity]
fs/smb/server/oplock.c [moved from fs/ksmbd/oplock.c with 97% similarity]
fs/smb/server/oplock.h [moved from fs/ksmbd/oplock.h with 99% similarity]
fs/smb/server/server.c [moved from fs/ksmbd/server.c with 100% similarity]
fs/smb/server/server.h [moved from fs/ksmbd/server.h with 100% similarity]
fs/smb/server/smb2misc.c [moved from fs/ksmbd/smb2misc.c with 98% similarity]
fs/smb/server/smb2ops.c [moved from fs/ksmbd/smb2ops.c with 100% similarity]
fs/smb/server/smb2pdu.c [moved from fs/ksmbd/smb2pdu.c with 99% similarity]
fs/smb/server/smb2pdu.h [moved from fs/ksmbd/smb2pdu.h with 100% similarity]
fs/smb/server/smb_common.c [moved from fs/ksmbd/smb_common.c with 100% similarity]
fs/smb/server/smb_common.h [moved from fs/ksmbd/smb_common.h with 99% similarity]
fs/smb/server/smbacl.c [moved from fs/ksmbd/smbacl.c with 100% similarity]
fs/smb/server/smbacl.h [moved from fs/ksmbd/smbacl.h with 100% similarity]
fs/smb/server/smbfsctl.h [moved from fs/ksmbd/smbfsctl.h with 98% similarity]
fs/smb/server/smbstatus.h [moved from fs/ksmbd/smbstatus.h with 99% similarity]
fs/smb/server/transport_ipc.c [moved from fs/ksmbd/transport_ipc.c with 100% similarity]
fs/smb/server/transport_ipc.h [moved from fs/ksmbd/transport_ipc.h with 100% similarity]
fs/smb/server/transport_rdma.c [moved from fs/ksmbd/transport_rdma.c with 100% similarity]
fs/smb/server/transport_rdma.h [moved from fs/ksmbd/transport_rdma.h with 100% similarity]
fs/smb/server/transport_tcp.c [moved from fs/ksmbd/transport_tcp.c with 100% similarity]
fs/smb/server/transport_tcp.h [moved from fs/ksmbd/transport_tcp.h with 100% similarity]
fs/smb/server/unicode.c [moved from fs/ksmbd/unicode.c with 100% similarity]
fs/smb/server/unicode.h [moved from fs/ksmbd/unicode.h with 100% similarity]
fs/smb/server/uniupr.h [moved from fs/ksmbd/uniupr.h with 100% similarity]
fs/smb/server/vfs.c [moved from fs/ksmbd/vfs.c with 99% similarity]
fs/smb/server/vfs.h [moved from fs/ksmbd/vfs.h with 100% similarity]
fs/smb/server/vfs_cache.c [moved from fs/ksmbd/vfs_cache.c with 100% similarity]
fs/smb/server/vfs_cache.h [moved from fs/ksmbd/vfs_cache.h with 100% similarity]
fs/smb/server/xattr.h [moved from fs/ksmbd/xattr.h with 100% similarity]
fs/xattr.c
fs/xfs/libxfs/xfs_ag.c
fs/xfs/libxfs/xfs_alloc.c
fs/xfs/libxfs/xfs_alloc.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap_btree.c
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/libxfs/xfs_log_format.h
fs/xfs/libxfs/xfs_refcount.c
fs/xfs/libxfs/xfs_trans_inode.c
fs/xfs/scrub/bmap.c
fs/xfs/scrub/scrub.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_filestream.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_icache.h
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_inode_item.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_reflink.c
fs/xfs/xfs_super.c
fs/xfs/xfs_trans.c
include/asm-generic/vmlinux.lds.h
include/drm/drm_managed.h
include/linux/arm_ffa.h
include/linux/blkdev.h
include/linux/compiler.h
include/linux/cper.h
include/linux/device/class.h
include/linux/efi.h
include/linux/firewire.h
include/linux/fs.h
include/linux/if_team.h
include/linux/iio/iio-gts-helper.h
include/linux/lockdep.h
include/linux/lockdep_types.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/msi.h
include/linux/netdevice.h
include/linux/page-flags.h
include/linux/pe.h
include/linux/phy.h
include/linux/power/bq27xxx_battery.h
include/linux/sched/task.h
include/linux/sched/vhost_task.h
include/linux/shrinker.h
include/linux/skbuff.h
include/linux/skmsg.h
include/linux/sunrpc/svc_rdma.h
include/linux/sunrpc/svc_xprt.h
include/linux/sunrpc/svcsock.h
include/linux/surface_aggregator/device.h
include/linux/tpm.h
include/linux/trace_events.h
include/linux/usb/composite.h
include/linux/usb/hcd.h
include/linux/user_events.h
include/media/dvb_frontend.h
include/media/dvb_net.h
include/media/dvbdev.h
include/media/v4l2-subdev.h
include/net/bluetooth/hci.h
include/net/bluetooth/hci_core.h
include/net/bonding.h
include/net/handshake.h
include/net/ip.h
include/net/mana/mana.h
include/net/neighbour.h
include/net/netns/ipv6.h
include/net/nexthop.h
include/net/page_pool.h
include/net/ping.h
include/net/pkt_sched.h
include/net/rpl.h
include/net/sch_generic.h
include/net/sock.h
include/net/tcp.h
include/net/tls.h
include/sound/hda-mlink.h
include/sound/soc-acpi.h
include/sound/soc-dpcm.h
include/target/iscsi/iscsi_target_core.h
include/uapi/linux/bpf.h
include/uapi/linux/handshake.h
include/uapi/linux/in.h
include/uapi/sound/skl-tplg-interface.h
include/uapi/sound/sof/tokens.h
include/ufs/ufshcd.h
io_uring/epoll.c
io_uring/sqpoll.c
kernel/bpf/hashtab.c
kernel/bpf/map_in_map.c
kernel/bpf/offload.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup/cgroup-v1.c
kernel/cgroup/cgroup.c
kernel/exit.c
kernel/fork.c
kernel/irq/msi.c
kernel/locking/lockdep.c
kernel/module/decompress.c
kernel/module/main.c
kernel/module/stats.c
kernel/signal.c
kernel/trace/bpf_trace.c
kernel/trace/fprobe.c
kernel/trace/rethook.c
kernel/trace/trace.c
kernel/trace/trace_events.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_user.c
kernel/trace/trace_osnoise.c
kernel/trace/trace_probe.h
kernel/trace/trace_selftest.c
kernel/vhost_task.c
lib/cpu_rmap.c
lib/debugobjects.c
lib/maple_tree.c
lib/test_firmware.c
mm/Kconfig.debug
mm/kfence/kfence.h
mm/page_table_check.c
mm/shrinker_debug.c
mm/vmscan.c
mm/zsmalloc.c
mm/zswap.c
net/8021q/vlan_dev.c
net/atm/resources.c
net/batman-adv/distributed-arp-table.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sync.c
net/bluetooth/l2cap_core.c
net/bridge/br_private_tunnel.h
net/can/isotp.c
net/can/j1939/main.c
net/can/j1939/socket.c
net/core/dev.c
net/core/page_pool.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/skmsg.c
net/core/sock.c
net/core/sock_map.c
net/devlink/core.c
net/devlink/devl_internal.h
net/devlink/leftover.c
net/handshake/handshake-test.c
net/handshake/handshake.h
net/handshake/netlink.c
net/handshake/request.c
net/handshake/tlshd.c
net/ipv4/af_inet.c
net/ipv4/inet_connection_sock.c
net/ipv4/ip_sockglue.c
net/ipv4/raw.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_bpf.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_offload.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv4/udplite.c
net/ipv6/exthdrs.c
net/ipv6/exthdrs_core.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_gre.c
net/ipv6/raw.c
net/ipv6/route.c
net/ipv6/udplite.c
net/key/af_key.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/he.c
net/mac80211/ieee80211_i.h
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/mptcp/pm.c
net/mptcp/pm_netlink.c
net/mptcp/pm_userspace.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_bitwise.c
net/netfilter/nft_set_rbtree.c
net/netlink/af_netlink.c
net/netrom/nr_subr.c
net/nsh/nsh.c
net/openvswitch/datapath.c
net/openvswitch/vport.c
net/packet/af_packet.c
net/packet/diag.c
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/local_event.c
net/sched/act_police.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/sched/sch_api.c
net/sched/sch_fq_pie.c
net/sched/sch_generic.c
net/sched/sch_ingress.c
net/sched/sch_mq.c
net/sched/sch_mqprio.c
net/sched/sch_pie.c
net/sched/sch_red.c
net/sched/sch_sfq.c
net/sched/sch_taprio.c
net/sched/sch_teql.c
net/sctp/transport.c
net/smc/af_smc.c
net/smc/smc_core.c
net/smc/smc_llc.c
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/sunrpc/sched.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcsock.c
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/tipc/bearer.c
net/tipc/bearer.h
net/tipc/link.c
net/tipc/udp_media.c
net/tls/tls.h
net/tls/tls_device.c
net/tls/tls_strp.c
net/tls/tls_sw.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/core.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/scan.c
net/xfrm/xfrm_device.c
net/xfrm/xfrm_interface_core.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
samples/bpf/hbm.c
security/selinux/Makefile
sound/core/oss/pcm_plugin.h
sound/firewire/digi00x/digi00x-stream.c
sound/hda/hdac_device.c
sound/isa/gus/gus_pcm.c
sound/pci/cmipci.c
sound/pci/cs46xx/cs46xx_lib.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_generic.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/aureon.c
sound/pci/ice1712/ice1712.c
sound/pci/ice1712/ice1724.c
sound/pci/ymfpci/ymfpci_main.c
sound/soc/amd/ps/pci-ps.c
sound/soc/amd/ps/ps-pdm-dma.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/codecs/cs35l41-lib.c
sound/soc/codecs/cs35l56.c
sound/soc/codecs/lpass-tx-macro.c
sound/soc/codecs/max98363.c
sound/soc/codecs/nau8824.c
sound/soc/codecs/rt5682-i2c.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/rt5682.h
sound/soc/codecs/ssm2602.c
sound/soc/codecs/wcd938x-sdw.c
sound/soc/codecs/wsa881x.c
sound/soc/codecs/wsa883x.c
sound/soc/dwc/dwc-i2s.c
sound/soc/fsl/fsl_micfil.c
sound/soc/fsl/fsl_sai.c
sound/soc/fsl/fsl_sai.h
sound/soc/generic/simple-card-utils.c
sound/soc/generic/simple-card.c
sound/soc/intel/avs/apl.c
sound/soc/intel/avs/avs.h
sound/soc/intel/avs/board_selection.c
sound/soc/intel/avs/control.c
sound/soc/intel/avs/dsp.c
sound/soc/intel/avs/messages.h
sound/soc/intel/avs/path.h
sound/soc/intel/avs/pcm.c
sound/soc/intel/avs/probes.c
sound/soc/jz4740/jz4740-i2s.c
sound/soc/mediatek/mt8186/mt8186-afe-clk.c
sound/soc/mediatek/mt8186/mt8186-afe-clk.h
sound/soc/mediatek/mt8186/mt8186-afe-pcm.c
sound/soc/mediatek/mt8186/mt8186-audsys-clk.c
sound/soc/mediatek/mt8186/mt8186-audsys-clk.h
sound/soc/mediatek/mt8188/mt8188-afe-clk.c
sound/soc/mediatek/mt8188/mt8188-afe-clk.h
sound/soc/mediatek/mt8188/mt8188-afe-pcm.c
sound/soc/mediatek/mt8188/mt8188-audsys-clk.c
sound/soc/mediatek/mt8188/mt8188-audsys-clk.h
sound/soc/mediatek/mt8195/mt8195-afe-clk.c
sound/soc/mediatek/mt8195/mt8195-afe-clk.h
sound/soc/mediatek/mt8195/mt8195-afe-pcm.c
sound/soc/mediatek/mt8195/mt8195-audsys-clk.c
sound/soc/mediatek/mt8195/mt8195-audsys-clk.h
sound/soc/soc-pcm.c
sound/soc/sof/amd/acp-ipc.c
sound/soc/sof/debug.c
sound/soc/sof/intel/hda-mlink.c
sound/soc/sof/ipc3-topology.c
sound/soc/sof/ipc4-topology.c
sound/soc/sof/pcm.c
sound/soc/sof/pm.c
sound/soc/sof/sof-client-probes.c
sound/soc/sof/topology.c
sound/usb/format.c
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/x86/include/asm/cpufeatures.h
tools/arch/x86/include/asm/disabled-features.h
tools/arch/x86/include/asm/msr-index.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/prctl.h
tools/arch/x86/include/uapi/asm/unistd_32.h
tools/arch/x86/lib/memcpy_64.S
tools/arch/x86/lib/memset_64.S
tools/gpio/lsgpio.c
tools/include/asm/alternative.h
tools/include/linux/coresight-pmu.h
tools/include/uapi/drm/drm.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/bpf.h
tools/include/uapi/linux/const.h
tools/include/uapi/linux/in.h
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/prctl.h
tools/include/uapi/sound/asound.h
tools/lib/bpf/libbpf.c
tools/lib/bpf/libbpf_probes.c
tools/net/ynl/lib/ynl.py
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/arm/util/cs-etm.c
tools/perf/arch/arm/util/pmu.c
tools/perf/arch/arm64/util/header.c
tools/perf/arch/arm64/util/pmu.c
tools/perf/arch/s390/entry/syscalls/syscall.tbl
tools/perf/bench/mem-memcpy-x86-64-asm-def.h
tools/perf/bench/mem-memcpy-x86-64-asm.S
tools/perf/bench/mem-memset-x86-64-asm-def.h
tools/perf/bench/mem-memset-x86-64-asm.S
tools/perf/builtin-ftrace.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json
tools/perf/pmu-events/arch/x86/alderlaken/adln-metrics.json
tools/perf/pmu-events/arch/x86/broadwell/bdw-metrics.json
tools/perf/pmu-events/arch/x86/broadwellde/bdwde-metrics.json
tools/perf/pmu-events/arch/x86/broadwellx/bdx-metrics.json
tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json
tools/perf/pmu-events/arch/x86/haswell/hsw-metrics.json
tools/perf/pmu-events/arch/x86/haswellx/hsx-metrics.json
tools/perf/pmu-events/arch/x86/icelake/icl-metrics.json
tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json
tools/perf/pmu-events/arch/x86/ivybridge/ivb-metrics.json
tools/perf/pmu-events/arch/x86/ivytown/ivt-metrics.json
tools/perf/pmu-events/arch/x86/jaketown/jkt-metrics.json
tools/perf/pmu-events/arch/x86/sandybridge/snb-metrics.json
tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json
tools/perf/pmu-events/arch/x86/skylake/skl-metrics.json
tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json
tools/perf/pmu-events/arch/x86/tigerlake/tgl-metrics.json
tools/perf/pmu-events/jevents.py
tools/perf/pmu-events/pmu-events.h
tools/perf/tests/attr.py
tools/perf/tests/attr/base-stat
tools/perf/tests/attr/test-stat-default
tools/perf/tests/attr/test-stat-detailed-1
tools/perf/tests/attr/test-stat-detailed-2
tools/perf/tests/attr/test-stat-detailed-3
tools/perf/tests/expr.c
tools/perf/tests/parse-metric.c
tools/perf/tests/shell/stat.sh
tools/perf/tests/shell/test_intel_pt.sh
tools/perf/tests/shell/test_java_symbol.sh
tools/perf/trace/beauty/arch_prctl.c
tools/perf/trace/beauty/x86_arch_prctl.sh
tools/perf/util/Build
tools/perf/util/bpf_skel/lock_contention.bpf.c
tools/perf/util/bpf_skel/sample_filter.bpf.c
tools/perf/util/bpf_skel/vmlinux.h
tools/perf/util/cs-etm.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/expr.y
tools/perf/util/metricgroup.c
tools/perf/util/parse-events.c
tools/perf/util/stat-display.c
tools/perf/util/stat-shadow.c
tools/perf/util/symbol-elf.c
tools/power/cpupower/lib/powercap.c
tools/power/cpupower/utils/idle_monitor/mperf_monitor.c
tools/testing/cxl/Kbuild
tools/testing/cxl/test/mem.c
tools/testing/cxl/test/mock.c
tools/testing/selftests/alsa/pcm-test.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/prog_tests/inner_array_lookup.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
tools/testing/selftests/bpf/progs/inner_array_lookup.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_sockmap_kern.h
tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c [new file with mode: 0644]
tools/testing/selftests/ftrace/Makefile
tools/testing/selftests/ftrace/ftracetest
tools/testing/selftests/ftrace/ftracetest-ktap [new file with mode: 0755]
tools/testing/selftests/ftrace/test.d/filter/event-filter-function.tc
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack-legacy.tc [new file with mode: 0644]
tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack.tc
tools/testing/selftests/gpio/gpio-sim.sh
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c [new file with mode: 0644]
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/fib_nexthops.sh
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/mptcp/Makefile
tools/testing/selftests/net/mptcp/diag.sh
tools/testing/selftests/net/mptcp/mptcp_connect.sh
tools/testing/selftests/net/mptcp/mptcp_join.sh
tools/testing/selftests/net/mptcp/mptcp_lib.sh [new file with mode: 0644]
tools/testing/selftests/net/mptcp/mptcp_sockopt.sh
tools/testing/selftests/net/mptcp/pm_netlink.sh
tools/testing/selftests/net/mptcp/simult_flows.sh
tools/testing/selftests/net/mptcp/userspace_pm.sh
tools/testing/selftests/net/srv6_end_dt4_l3vpn_test.sh
tools/testing/selftests/sgx/Makefile
virt/kvm/kvm_main.c

index 71127b2..bf076bb 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -364,6 +364,11 @@ Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
 Nicolas Saenz Julienne <nsaenz@kernel.org> <nsaenzjulienne@suse.de>
 Nicolas Saenz Julienne <nsaenz@kernel.org> <nsaenzjulienne@suse.com>
 Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+Nikolay Aleksandrov <razor@blackwall.org> <naleksan@redhat.com>
+Nikolay Aleksandrov <razor@blackwall.org> <nikolay@redhat.com>
+Nikolay Aleksandrov <razor@blackwall.org> <nikolay@cumulusnetworks.com>
+Nikolay Aleksandrov <razor@blackwall.org> <nikolay@nvidia.com>
+Nikolay Aleksandrov <razor@blackwall.org> <nikolay@isovalent.com>
 Oleksandr Natalenko <oleksandr@natalenko.name> <oleksandr@redhat.com>
 Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
 Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
diff --git a/CREDITS b/CREDITS
index 2d9da9a..de7e4db 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -1706,6 +1706,10 @@ S: Panoramastrasse 18
 S: D-69126 Heidelberg
 S: Germany
 
+N: Neil Horman
+M: nhorman@tuxdriver.com
+D: SCTP protocol maintainer.
+
 N: Simon Horman
 M: horms@verge.net.au
 D: Renesas ARM/ARM64 SoC maintainer
index f67c082..e592a93 100644 (file)
@@ -1213,23 +1213,25 @@ PAGE_SIZE multiple when read back.
        A read-write single value file which exists on non-root
        cgroups.  The default is "max".
 
-       Memory usage throttle limit.  This is the main mechanism to
-       control memory usage of a cgroup.  If a cgroup's usage goes
+       Memory usage throttle limit.  If a cgroup's usage goes
        over the high boundary, the processes of the cgroup are
        throttled and put under heavy reclaim pressure.
 
        Going over the high limit never invokes the OOM killer and
-       under extreme conditions the limit may be breached.
+       under extreme conditions the limit may be breached. The high
+       limit should be used in scenarios where an external process
+       monitors the limited cgroup to alleviate heavy reclaim
+       pressure.
 
   memory.max
        A read-write single value file which exists on non-root
        cgroups.  The default is "max".
 
-       Memory usage hard limit.  This is the final protection
-       mechanism.  If a cgroup's memory usage reaches this limit and
-       can't be reduced, the OOM killer is invoked in the cgroup.
-       Under certain circumstances, the usage may go over the limit
-       temporarily.
+       Memory usage hard limit.  This is the main mechanism to limit
+       memory usage of a cgroup.  If a cgroup's memory usage reaches
+       this limit and can't be reduced, the OOM killer is invoked in
+       the cgroup. Under certain circumstances, the usage may go
+       over the limit temporarily.
 
        In default configuration regular 0-order allocations always
        succeed unless OOM killer chooses current task as a victim.
@@ -1238,10 +1240,6 @@ PAGE_SIZE multiple when read back.
        Caller could retry them differently, return into userspace
        as -ENOMEM or silently ignore in cases like disk readahead.
 
-       This is the ultimate protection mechanism.  As long as the
-       high limit is used and monitored properly, this limit's
-       utility is limited to providing the final safety net.
-
   memory.reclaim
        A write-only nested-keyed file which exists for all cgroups.
 
index 3147bba..8c42c4d 100644 (file)
@@ -5,5 +5,5 @@ Changes
 See https://wiki.samba.org/index.php/LinuxCIFSKernel for summary
 information about fixes/improvements to CIFS/SMB2/SMB3 support (changes
 to cifs.ko module) by kernel version (and cifs internal module version).
-This may be easier to read than parsing the output of "git log fs/cifs"
-by release.
+This may be easier to read than parsing the output of
+"git log fs/smb/client" by release.
index 2e151cd..5f936b4 100644 (file)
@@ -45,7 +45,7 @@ Installation instructions
 
 If you have built the CIFS vfs as module (successfully) simply
 type ``make modules_install`` (or if you prefer, manually copy the file to
-the modules directory e.g. /lib/modules/2.4.10-4GB/kernel/fs/cifs/cifs.ko).
+the modules directory e.g. /lib/modules/6.3.0-060300-generic/kernel/fs/smb/client/cifs.ko).
 
 If you have built the CIFS vfs into the kernel itself, follow the instructions
 for your distribution on how to install a new kernel (usually you
@@ -66,15 +66,15 @@ If cifs is built as a module, then the size and number of network buffers
 and maximum number of simultaneous requests to one server can be configured.
 Changing these from their defaults is not recommended. By executing modinfo::
 
-       modinfo kernel/fs/cifs/cifs.ko
+       modinfo <path to cifs.ko>
 
-on kernel/fs/cifs/cifs.ko the list of configuration changes that can be made
+on kernel/fs/smb/client/cifs.ko the list of configuration changes that can be made
 at module initialization time (by running insmod cifs.ko) can be seen.
 
 Recommendations
 ===============
 
-To improve security the SMB2.1 dialect or later (usually will get SMB3) is now
+To improve security the SMB2.1 dialect or later (usually will get SMB3.1.1) is now
 the new default. To use old dialects (e.g. to mount Windows XP) use "vers=1.0"
 on mount (or vers=2.0 for Windows Vista).  Note that the CIFS (vers=1.0) is
 much older and less secure than the default dialect SMB3 which includes
index ff4f4cc..f08149b 100644 (file)
@@ -215,12 +215,14 @@ again.
    reduce the compile time enormously, especially if you are running an
    universal kernel from a commodity Linux distribution.
 
-   There is a catch: the make target 'localmodconfig' will disable kernel
-   features you have not directly or indirectly through some program utilized
-   since you booted the system. You can reduce or nearly eliminate that risk by
-   using tricks outlined in the reference section; for quick testing purposes
-   that risk is often negligible, but it is an aspect you want to keep in mind
-   in case your kernel behaves oddly.
+   There is a catch: 'localmodconfig' is likely to disable kernel features you
+   did not use since you booted your Linux -- like drivers for currently
+   disconnected peripherals or a virtualization software not haven't used yet.
+   You can reduce or nearly eliminate that risk with tricks the reference
+   section outlines; but when building a kernel just for quick testing purposes
+   it is often negligible if such features are missing. But you should keep that
+   aspect in mind when using a kernel built with this make target, as it might
+   be the reason why something you only use occasionally stopped working.
 
    [:ref:`details<configuration>`]
 
@@ -271,6 +273,9 @@ again.
    does nothing at all; in that case you have to manually install your kernel,
    as outlined in the reference section.
 
+   If you are running a immutable Linux distribution, check its documentation
+   and the web to find out how to install your own kernel there.
+
    [:ref:`details<install>`]
 
 .. _another_sbs:
@@ -291,29 +296,29 @@ again.
    version you care about, as git otherwise might retrieve the entire commit
    history::
 
-     git fetch --shallow-exclude=v6.1 origin
-
-   If you modified the sources (for example by applying a patch), you now need
-   to discard those modifications; that's because git otherwise will not be able
-   to switch to the sources of another version due to potential conflicting
-   changes::
-
-     git reset --hard
+     git fetch --shallow-exclude=v6.0 origin
 
-   Now checkout the version you are interested in, as explained above::
+   Now switch to the version you are interested in -- but be aware the command
+   used here will discard any modifications you performed, as they would
+   conflict with the sources you want to checkout::
 
-     git checkout --detach origin/master
+     git checkout --force --detach origin/master
 
    At this point you might want to patch the sources again or set/modify a build
-   tag, as explained earlier; afterwards adjust the build configuration to the
-   new codebase and build your next kernel::
+   tag, as explained earlier. Afterwards adjust the build configuration to the
+   new codebase using olddefconfig, which will now adjust the configuration file
+   you prepared earlier using localmodconfig  (~/linux/.config) for your next
+   kernel::
 
      # reminder: if you want to apply patches, do it at this point
      # reminder: you might want to update your build tag at this point
      make olddefconfig
+
+   Now build your kernel::
+
      make -j $(nproc --all)
 
-   Install the kernel as outlined above::
+   Afterwards install the kernel as outlined above::
 
      command -v installkernel && sudo make modules_install install
 
@@ -584,11 +589,11 @@ versions and individual commits at hand at any time::
     curl -L \
       https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/clone.bundle \
       -o linux-stable.git.bundle
-    git clone clone.bundle ~/linux/
+    git clone linux-stable.git.bundle ~/linux/
     rm linux-stable.git.bundle
     cd ~/linux/
-    git remote set-url origin
-    https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
+    git remote set-url origin \
+      https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
     git fetch origin
     git checkout --detach origin/master
 
index e87a878..e9b022d 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0
 
-=====
-cdrom
-=====
+======
+CD-ROM
+======
 
 .. toctree::
     :maxdepth: 1
index 9b31f86..71364c6 100644 (file)
@@ -32,7 +32,7 @@ properties:
     maxItems: 1
 
   iommus:
-    maxItems: 1
+    maxItems: 4
 
   power-domains:
     maxItems: 1
index e6c1ebf..130e16d 100644 (file)
@@ -82,6 +82,18 @@ properties:
       Indicates if the DSI controller is driving a panel which needs
       2 DSI links.
 
+  qcom,master-dsi:
+    type: boolean
+    description: |
+      Indicates if the DSI controller is the master DSI controller when
+      qcom,dual-dsi-mode enabled.
+
+  qcom,sync-dual-dsi:
+    type: boolean
+    description: |
+      Indicates if the DSI controller needs to sync the other DSI controller
+      with MIPI DCS commands when qcom,dual-dsi-mode enabled.
+
   assigned-clocks:
     minItems: 2
     maxItems: 4
index 4fb05eb..164331e 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Lattice Slave SPI sysCONFIG FPGA manager
 
 maintainers:
-  - Ivan Bornyakov <i.bornyakov@metrotek.ru>
+  - Vladimir Georgiev <v.georgiev@metrotek.ru>
 
 description: |
   Lattice sysCONFIG port, which is used for FPGA configuration, among others,
index 527532f..a157eec 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Microchip Polarfire FPGA manager.
 
 maintainers:
-  - Ivan Bornyakov <i.bornyakov@metrotek.ru>
+  - Vladimir Georgiev <v.georgiev@metrotek.ru>
 
 description:
   Device Tree Bindings for Microchip Polarfire FPGA Manager using slave SPI to
index 63369ba..0a192ca 100644 (file)
@@ -39,6 +39,12 @@ properties:
   power-domains:
     maxItems: 1
 
+  vref-supply:
+    description: |
+      External ADC reference voltage supply on VREFH pad. If VERID[MVI] is
+      set, there are additional, internal reference voltages selectable.
+      VREFH1 is always from VREFH pad.
+
   "#io-channel-cells":
     const: 1
 
@@ -72,6 +78,7 @@ examples:
             assigned-clocks = <&clk IMX_SC_R_ADC_0>;
             assigned-clock-rates = <24000000>;
             power-domains = <&pd IMX_SC_R_ADC_0>;
+            vref-supply = <&reg_1v8>;
             #io-channel-cells = <1>;
         };
     };
index 1c7aee5..36dff32 100644 (file)
@@ -90,7 +90,7 @@ patternProperties:
             of the MAX chips to the GyroADC, while MISO line of each Maxim
             ADC connects to a shared input pin of the GyroADC.
         enum:
-          - adi,7476
+          - adi,ad7476
           - fujitsu,mb88101a
           - maxim,max1162
           - maxim,max11100
index 9211726..39e64c7 100644 (file)
@@ -166,6 +166,12 @@ properties:
   resets:
     maxItems: 1
 
+  mediatek,broken-save-restore-fw:
+    type: boolean
+    description:
+      Asserts that the firmware on this device has issues saving and restoring
+      GICR registers when the GIC redistributors are powered off.
+
 dependencies:
   mbi-ranges: [ msi-controller ]
   msi-controller: [ mbi-ranges ]
index 769fa5c..de1d429 100644 (file)
@@ -21,11 +21,22 @@ properties:
 
   st,can-primary:
     description:
-      Primary and secondary mode of the bxCAN peripheral is only relevant
-      if the chip has two CAN peripherals. In that case they share some
-      of the required logic.
+      Primary mode of the bxCAN peripheral is only relevant if the chip has
+      two CAN peripherals in dual CAN configuration. In that case they share
+      some of the required logic.
+      Not to be used if the peripheral is in single CAN configuration.
       To avoid misunderstandings, it should be noted that ST documentation
-      uses the terms master/slave instead of primary/secondary.
+      uses the terms master instead of primary.
+    type: boolean
+
+  st,can-secondary:
+    description:
+      Secondary mode of the bxCAN peripheral is only relevant if the chip
+      has two CAN peripherals in dual CAN configuration. In that case they
+      share some of the required logic.
+      Not to be used if the peripheral is in single CAN configuration.
+      To avoid misunderstandings, it should be noted that ST documentation
+      uses the terms slave instead of secondary.
     type: boolean
 
   reg:
index eb3488d..6a7be42 100644 (file)
@@ -70,6 +70,7 @@ properties:
   dsr-gpios: true
   rng-gpios: true
   dcd-gpios: true
+  rs485-rts-active-high: true
   rts-gpio: true
   power-domains: true
   clock-frequency: true
index a5bb561..31a3024 100644 (file)
@@ -55,7 +55,9 @@ properties:
     description: TDM TX current sense time slot.
 
   '#sound-dai-cells':
-    const: 1
+    # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward
+    # compatibility but is deprecated.
+    enum: [0, 1]
 
 required:
   - compatible
@@ -72,7 +74,7 @@ examples:
      codec: codec@4c {
        compatible = "ti,tas2562";
        reg = <0x4c>;
-       #sound-dai-cells = <1>;
+       #sound-dai-cells = <0>;
        interrupt-parent = <&gpio1>;
        interrupts = <14>;
        shutdown-gpios = <&gpio1 15 0>;
index 26088ad..8908bf1 100644 (file)
@@ -57,7 +57,9 @@ properties:
       - 1 # Falling edge
 
   '#sound-dai-cells':
-    const: 1
+    # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward
+    # compatibility but is deprecated.
+    enum: [0, 1]
 
 required:
   - compatible
@@ -74,7 +76,7 @@ examples:
      codec: codec@41 {
        compatible = "ti,tas2770";
        reg = <0x41>;
-       #sound-dai-cells = <1>;
+       #sound-dai-cells = <0>;
        interrupt-parent = <&gpio1>;
        interrupts = <14>;
        reset-gpio = <&gpio1 15 0>;
index 8cba013..a876545 100644 (file)
@@ -50,7 +50,9 @@ properties:
     description: TDM TX voltage sense time slot.
 
   '#sound-dai-cells':
-    const: 1
+    # The codec has a single DAI, the #sound-dai-cells=<1>; case is left in for backward
+    # compatibility but is deprecated.
+    enum: [0, 1]
 
 required:
   - compatible
@@ -67,7 +69,7 @@ examples:
      codec: codec@38 {
        compatible = "ti,tas2764";
        reg = <0x38>;
-       #sound-dai-cells = <1>;
+       #sound-dai-cells = <0>;
        interrupt-parent = <&gpio1>;
        interrupts = <14>;
        reset-gpios = <&gpio1 15 0>;
index f59125b..0b4e21b 100644 (file)
@@ -8,7 +8,7 @@ Required properties:
        "ti,tlv320aic32x6" TLV320AIC3206, TLV320AIC3256
        "ti,tas2505" TAS2505, TAS2521
  - reg: I2C slave address
- - supply-*: Required supply regulators are:
+ - *-supply: Required supply regulators are:
     "iov" - digital IO power supply
     "ldoin" - LDO power supply
     "dv" - Digital core power supply
index cae46c4..69a93a0 100644 (file)
@@ -64,7 +64,7 @@ properties:
     description:
       size of memory intended as internal memory for endpoints
       buffers expressed in KB
-    $ref: /schemas/types.yaml#/definitions/uint32
+    $ref: /schemas/types.yaml#/definitions/uint16
 
   cdns,phyrst-a-enable:
     description: Enable resetting of PHY if Rx fail is detected
index 50edc4d..4f76259 100644 (file)
@@ -287,7 +287,7 @@ properties:
     description:
       High-Speed PHY interface selection between UTMI+ and ULPI when the
       DWC_USB3_HSPHY_INTERFACE has value 3.
-    $ref: /schemas/types.yaml#/definitions/uint8
+    $ref: /schemas/types.yaml#/definitions/string
     enum: [utmi, ulpi]
 
   snps,quirk-frame-length-adjustment:
index fbb2b5a..eb252fc 100644 (file)
@@ -72,7 +72,6 @@ Documentation for filesystem implementations.
    befs
    bfs
    btrfs
-   cifs/index
    ceph
    coda
    configfs
@@ -111,6 +110,7 @@ Documentation for filesystem implementations.
    ramfs-rootfs-initramfs
    relay
    romfs
+   smb/index
    spufs/index
    squashfs
    sysfs
index 1649606..447f767 100644 (file)
@@ -6,8 +6,7 @@ Ramfs, rootfs and initramfs
 
 October 17, 2005
 
-Rob Landley <rob@landley.net>
-=============================
+:Author: Rob Landley <rob@landley.net>
 
 What is ramfs?
 --------------
index d833953..1cf5648 100644 (file)
@@ -147,6 +147,7 @@ replicas continue to be exactly same.
 
 
 3) Setting mount states
+-----------------------
 
        The mount command (util-linux package) can be used to set mount
        states::
@@ -612,6 +613,7 @@ replicas continue to be exactly same.
 
 
 6) Quiz
+-------
 
        A. What is the result of the following command sequence?
 
@@ -673,6 +675,7 @@ replicas continue to be exactly same.
                /mnt/1/test be?
 
 7) FAQ
+------
 
        Q1. Why is bind mount needed? How is it different from symbolic links?
                symbolic links can get stale if the destination mount gets
@@ -841,6 +844,7 @@ replicas continue to be exactly same.
                             tmp  usr tmp usr tmp usr
 
 8) Implementation
+-----------------
 
 8A) Datastructure
 
similarity index 97%
rename from Documentation/filesystems/cifs/cifsroot.rst
rename to Documentation/filesystems/smb/cifsroot.rst
index 4930bb4..bf2d9db 100644 (file)
@@ -59,7 +59,7 @@ the root file system via SMB protocol.
 Enables the kernel to mount the root file system via SMB that are
 located in the <server-ip> and <share> specified in this option.
 
-The default mount options are set in fs/cifs/cifsroot.c.
+The default mount options are set in fs/smb/client/cifsroot.c.
 
 server-ip
        IPv4 address of the server.
index f80f956..43c9688 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0
 
 ====
-fpga
+FPGA
 ====
 
 .. toctree::
index 7003bd5..6a9ea96 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0
 
 =======
-locking
+Locking
 =======
 
 .. toctree::
index cfd8f41..c12838c 100644 (file)
@@ -52,3 +52,22 @@ Build kernel with:
 
 Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page
 table support without extra kernel parameter.
+
+Implementation notes
+====================
+
+We specifically decided not to use VMA information in order to avoid relying on
+MM states (except for limited "struct page" info). The page table check is a
+separate from Linux-MM state machine that verifies that the user accessible
+pages are not falsely shared.
+
+PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without
+EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory
+regions into the userspace via /dev/mem. At the same time, pages may change
+their properties (e.g., from anonymous pages to named pages) while they are
+still being mapped in the userspace, leading to "corruption" detected by the
+page table check.
+
+Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via
+/dev/mem. However, these pages are always considered as named pages, so they
+won't break the logic used in the page table check.
index 129f413..4846345 100644 (file)
@@ -61,22 +61,6 @@ attribute-sets:
         nested-attributes: bitset-bits
 
   -
-    name: u64-array
-    attributes:
-      -
-        name: u64
-        type: nest
-        multi-attr: true
-        nested-attributes: u64
-  -
-    name: s32-array
-    attributes:
-      -
-        name: s32
-        type: nest
-        multi-attr: true
-        nested-attributes: s32
-  -
     name: string
     attributes:
       -
@@ -239,7 +223,7 @@ attribute-sets:
         name: tx-min-frag-size
         type: u32
       -
-        name: tx-min-frag-size
+        name: rx-min-frag-size
         type: u32
       -
         name: verify-enabled
@@ -310,7 +294,7 @@ attribute-sets:
         name: master-slave-state
         type: u8
       -
-        name: master-slave-lanes
+        name: lanes
         type: u32
       -
         name: rate-matching
@@ -338,7 +322,7 @@ attribute-sets:
         name: ext-substate
         type: u8
       -
-        name: down-cnt
+        name: ext-down-cnt
         type: u32
   -
     name: debug
@@ -593,7 +577,7 @@ attribute-sets:
         name: phc-index
         type: u32
   -
-    name: cable-test-nft-nest-result
+    name: cable-test-ntf-nest-result
     attributes:
       -
         name: pair
@@ -602,7 +586,7 @@ attribute-sets:
         name: code
         type: u8
   -
-    name: cable-test-nft-nest-fault-length
+    name: cable-test-ntf-nest-fault-length
     attributes:
       -
         name: pair
@@ -611,16 +595,16 @@ attribute-sets:
         name: cm
         type: u32
   -
-    name: cable-test-nft-nest
+    name: cable-test-ntf-nest
     attributes:
       -
         name: result
         type: nest
-        nested-attributes: cable-test-nft-nest-result
+        nested-attributes: cable-test-ntf-nest-result
       -
         name: fault-length
         type: nest
-        nested-attributes: cable-test-nft-nest-fault-length
+        nested-attributes: cable-test-ntf-nest-fault-length
   -
     name: cable-test
     attributes:
@@ -634,7 +618,7 @@ attribute-sets:
       -
         name: nest
         type: nest
-        nested-attributes: cable-test-nft-nest
+        nested-attributes: cable-test-ntf-nest
   -
     name: cable-test-tdr-cfg
     attributes:
@@ -705,16 +689,16 @@ attribute-sets:
         type: u8
       -
         name: corrected
-        type: nest
-        nested-attributes: u64-array
+        type: binary
+        sub-type: u64
       -
         name: uncorr
-        type: nest
-        nested-attributes: u64-array
+        type: binary
+        sub-type: u64
       -
         name: corr-bits
-        type: nest
-        nested-attributes: u64-array
+        type: binary
+        sub-type: u64
   -
     name: fec
     attributes:
@@ -792,7 +776,7 @@ attribute-sets:
         name: hist-bkt-hi
         type: u32
       -
-        name: hist-bkt-val
+        name: hist-val
         type: u64
   -
     name: stats
@@ -827,8 +811,8 @@ attribute-sets:
         type: u32
       -
         name: index
-        type: nest
-        nested-attributes: s32-array
+        type: binary
+        sub-type: s32
   -
     name: module
     attributes:
@@ -981,7 +965,7 @@ operations:
             - duplex
             - master-slave-cfg
             - master-slave-state
-            - master-slave-lanes
+            - lanes
             - rate-matching
       dump: *linkmodes-get-op
     -
@@ -1015,7 +999,7 @@ operations:
             - sqi-max
             - ext-state
             - ext-substate
-            - down-cnt
+            - ext-down-cnt
       dump: *linkstate-get-op
     -
       name: debug-get
@@ -1367,7 +1351,7 @@ operations:
         reply:
           attributes:
             - header
-            - cable-test-nft-nest
+            - cable-test-ntf-nest
     -
       name: cable-test-tdr-act
       doc: Cable test TDR.
@@ -1555,7 +1539,7 @@ operations:
             - hkey
       dump: *rss-get-op
     -
-      name: plca-get
+      name: plca-get-cfg
       doc: Get PLCA params.
 
       attribute-set: plca
@@ -1577,7 +1561,7 @@ operations:
             - burst-tmr
       dump: *plca-get-op
     -
-      name: plca-set
+      name: plca-set-cfg
       doc: Set PLCA params.
 
       attribute-set: plca
@@ -1601,7 +1585,7 @@ operations:
     -
       name: plca-ntf
       doc: Notification for change in PLCA params.
-      notify: plca-get
+      notify: plca-get-cfg
     -
       name: mm-get
       doc: Get MAC Merge configuration and state
index 614f1a5..6d89e30 100644 (file)
@@ -68,6 +68,9 @@ attribute-sets:
         type: nest
         nested-attributes: x509
         multi-attr: true
+      -
+        name: peername
+        type: string
   -
     name: done
     attributes:
@@ -105,6 +108,7 @@ operations:
             - auth-mode
             - peer-identity
             - certificate
+            - peername
     -
       name: done
       doc: Handler reports handshake completion
index 3a7a714..3354ca3 100644 (file)
@@ -40,6 +40,7 @@ flow_steering_mode: Device flow steering mode
 ---------------------------------------------
 The flow steering mode parameter controls the flow steering mode of the driver.
 Two modes are supported:
+
 1. 'dmfs' - Device managed flow steering.
 2. 'smfs' - Software/Driver managed flow steering.
 
@@ -99,6 +100,7 @@ between representors and stacked devices.
 By default metadata is enabled on the supported devices in E-switch.
 Metadata is applicable only for E-switch in switchdev mode and
 users may disable it when NONE of the below use cases will be in use:
+
 1. HCA is in Dual/multi-port RoCE mode.
 2. VF/SF representor bonding (Usually used for Live migration)
 3. Stacked devices
@@ -180,7 +182,8 @@ User commands examples:
 
     $ devlink health diagnose pci/0000:82:00.0 reporter tx
 
-NOTE: This command has valid output only when interface is up, otherwise the command has empty output.
+.. note::
+   This command has valid output only when interface is up, otherwise the command has empty output.
 
 - Show number of tx errors indicated, number of recover flows ended successfully,
   is autorecover enabled and graceful period from last recover::
@@ -232,8 +235,9 @@ User commands examples:
 
     $ devlink health dump show pci/0000:82:00.0 reporter fw
 
-NOTE: This command can run only on the PF which has fw tracer ownership,
-running it on other PF or any VF will return "Operation not permitted".
+.. note::
+   This command can run only on the PF which has fw tracer ownership,
+   running it on other PF or any VF will return "Operation not permitted".
 
 fw fatal reporter
 -----------------
@@ -256,7 +260,8 @@ User commands examples:
 
     $ devlink health dump show pci/0000:82:00.1 reporter fw_fatal
 
-NOTE: This command can run only on PF.
+.. note::
+   This command can run only on PF.
 
 vnic reporter
 -------------
@@ -265,28 +270,37 @@ It is responsible for querying the vnic diagnostic counters from fw and displayi
 them in realtime.
 
 Description of the vnic counters:
-total_q_under_processor_handle: number of queues in an error state due to
-an async error or errored command.
-send_queue_priority_update_flow: number of QP/SQ priority/SL update
-events.
-cq_overrun: number of times CQ entered an error state due to an
-overflow.
-async_eq_overrun: number of times an EQ mapped to async events was
-overrun.
-comp_eq_overrun: number of times an EQ mapped to completion events was
-overrun.
-quota_exceeded_command: number of commands issued and failed due to quota
-exceeded.
-invalid_command: number of commands issued and failed dues to any reason
-other than quota exceeded.
-nic_receive_steering_discard: number of packets that completed RX flow
-steering but were discarded due to a mismatch in flow table.
+
+- total_q_under_processor_handle
+        number of queues in an error state due to
+        an async error or errored command.
+- send_queue_priority_update_flow
+        number of QP/SQ priority/SL update events.
+- cq_overrun
+        number of times CQ entered an error state due to an overflow.
+- async_eq_overrun
+        number of times an EQ mapped to async events was overrun.
+        comp_eq_overrun number of times an EQ mapped to completion events was
+        overrun.
+- quota_exceeded_command
+        number of commands issued and failed due to quota exceeded.
+- invalid_command
+        number of commands issued and failed dues to any reason other than quota
+        exceeded.
+- nic_receive_steering_discard
+        number of packets that completed RX flow
+        steering but were discarded due to a mismatch in flow table.
 
 User commands examples:
-- Diagnose PF/VF vnic counters
+
+- Diagnose PF/VF vnic counters::
+
         $ devlink health diagnose pci/0000:82:00.1 reporter vnic
+
 - Diagnose representor vnic counters (performed by supplying devlink port of the
-  representor, which can be obtained via devlink port command)
+  representor, which can be obtained via devlink port command)::
+
         $ devlink health diagnose pci/0000:82:00.1/65537 reporter vnic
 
-NOTE: This command can run over all interfaces such as PF/VF and representor ports.
+.. note::
+   This command can run over all interfaces such as PF/VF and representor ports.
index 6ec06a3..80b8f73 100644 (file)
@@ -1352,8 +1352,8 @@ ping_group_range - 2 INTEGERS
        Restrict ICMP_PROTO datagram sockets to users in the group range.
        The default is "1 0", meaning, that nobody (not even root) may
        create ping sockets.  Setting it to "100 100" would grant permissions
-       to the single group. "0 4294967295" would enable it for the world, "100
-       4294967295" would enable it for the users, but not daemons.
+       to the single group. "0 4294967294" would enable it for the world, "100
+       4294967294" would enable it for the users, but not daemons.
 
 tcp_early_demux - BOOLEAN
        Enable early demux for established TCP sockets.
index a2817a8..6f5ea16 100644 (file)
@@ -53,6 +53,7 @@ fills in a structure that contains the parameters of the request:
         struct socket   *ta_sock;
         tls_done_func_t ta_done;
         void            *ta_data;
+        const char      *ta_peername;
         unsigned int    ta_timeout_ms;
         key_serial_t    ta_keyring;
         key_serial_t    ta_my_cert;
@@ -71,6 +72,10 @@ instantiated a struct file in sock->file.
 has completed. Further explanation of this function is in the "Handshake
 Completion" sesction below.
 
+The consumer can provide a NUL-terminated hostname in the @ta_peername
+field that is sent as part of ClientHello. If no peername is provided,
+the DNS hostname associated with the server's IP address is used instead.
+
 The consumer can fill in the @ta_timeout_ms field to force the servicing
 handshake agent to exit after a number of milliseconds. This enables the
 socket to be fully closed once both the kernel and the handshake agent
index 7ae1f62..8067236 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0
 
 ======
-pcmcia
+PCMCIA
 ======
 
 .. toctree::
index f73ac9e..83614ce 100644 (file)
@@ -127,13 +127,32 @@ the value of ``Message-ID`` to the URL above.
 Updating patch status
 ~~~~~~~~~~~~~~~~~~~~~
 
-It may be tempting to help the maintainers and update the state of your
-own patches when you post a new version or spot a bug. Please **do not**
-do that.
-Interfering with the patch status on patchwork will only cause confusion. Leave
-it to the maintainer to figure out what is the most recent and current
-version that should be applied. If there is any doubt, the maintainer
-will reply and ask what should be done.
+Contributors and reviewers do not have the permissions to update patch
+state directly in patchwork. Patchwork doesn't expose much information
+about the history of the state of patches, therefore having multiple
+people update the state leads to confusion.
+
+Instead of delegating patchwork permissions netdev uses a simple mail
+bot which looks for special commands/lines within the emails sent to
+the mailing list. For example to mark a series as Changes Requested
+one needs to send the following line anywhere in the email thread::
+
+  pw-bot: changes-requested
+
+As a result the bot will set the entire series to Changes Requested.
+This may be useful when author discovers a bug in their own series
+and wants to prevent it from getting applied.
+
+The use of the bot is entirely optional, if in doubt ignore its existence
+completely. Maintainers will classify and update the state of the patches
+themselves. No email should ever be sent to the list with the main purpose
+of communicating with the bot, the bot commands should be seen as metadata.
+
+The use of the bot is restricted to authors of the patches (the ``From:``
+header on patch submission and command must match!), maintainers themselves
+and a handful of senior reviewers. Bot records its activity here:
+
+  https://patchwork.hopto.org/pw-bot.html
 
 Review timelines
 ~~~~~~~~~~~~~~~~
index 8a6860f..7542220 100644 (file)
@@ -1,5 +1,5 @@
 =================================
-brief tutorial on CRC computation
+Brief tutorial on CRC computation
 =================================
 
 A CRC is a long-division remainder.  You add the CRC to the message,
index df510ad..983f91f 100644 (file)
@@ -1,7 +1,7 @@
 .. SPDX-License-Identifier: GPL-2.0
 
 ======
-timers
+Timers
 ======
 
 .. toctree::
index 479c9ea..3c9b263 100644 (file)
@@ -35,7 +35,7 @@ Documentation written by Tom Zanussi
   in place of an explicit value field - this is simply a count of
   event hits.  If 'values' isn't specified, an implicit 'hitcount'
   value will be automatically created and used as the only value.
-  Keys can be any field, or the special string 'stacktrace', which
+  Keys can be any field, or the special string 'common_stacktrace', which
   will use the event's kernel stacktrace as the key.  The keywords
   'keys' or 'key' can be used to specify keys, and the keywords
   'values', 'vals', or 'val' can be used to specify values.  Compound
@@ -54,7 +54,7 @@ Documentation written by Tom Zanussi
   'compatible' if the fields named in the trigger share the same
   number and type of fields and those fields also have the same names.
   Note that any two events always share the compatible 'hitcount' and
-  'stacktrace' fields and can therefore be combined using those
+  'common_stacktrace' fields and can therefore be combined using those
   fields, however pointless that may be.
 
   'hist' triggers add a 'hist' file to each event's subdirectory.
@@ -547,9 +547,9 @@ Extended error information
   the hist trigger display symbolic call_sites, we can have the hist
   trigger additionally display the complete set of kernel stack traces
   that led to each call_site.  To do that, we simply use the special
-  value 'stacktrace' for the key parameter::
+  value 'common_stacktrace' for the key parameter::
 
-    # echo 'hist:keys=stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \
+    # echo 'hist:keys=common_stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \
            /sys/kernel/tracing/events/kmem/kmalloc/trigger
 
   The above trigger will use the kernel stack trace in effect when an
@@ -561,9 +561,9 @@ Extended error information
   every callpath to a kmalloc for a kernel compile)::
 
     # cat /sys/kernel/tracing/events/kmem/kmalloc/hist
-    # trigger info: hist:keys=stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active]
+    # trigger info: hist:keys=common_stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active]
 
-    { stacktrace:
+    { common_stacktrace:
          __kmalloc_track_caller+0x10b/0x1a0
          kmemdup+0x20/0x50
          hidraw_report_event+0x8a/0x120 [hid]
@@ -581,7 +581,7 @@ Extended error information
          cpu_startup_entry+0x315/0x3e0
          rest_init+0x7c/0x80
     } hitcount:          3  bytes_req:         21  bytes_alloc:         24
-    { stacktrace:
+    { common_stacktrace:
          __kmalloc_track_caller+0x10b/0x1a0
          kmemdup+0x20/0x50
          hidraw_report_event+0x8a/0x120 [hid]
@@ -596,7 +596,7 @@ Extended error information
          do_IRQ+0x5a/0xf0
          ret_from_intr+0x0/0x30
     } hitcount:          3  bytes_req:         21  bytes_alloc:         24
-    { stacktrace:
+    { common_stacktrace:
          kmem_cache_alloc_trace+0xeb/0x150
          aa_alloc_task_context+0x27/0x40
          apparmor_cred_prepare+0x1f/0x50
@@ -608,7 +608,7 @@ Extended error information
     .
     .
     .
-    { stacktrace:
+    { common_stacktrace:
          __kmalloc+0x11b/0x1b0
          i915_gem_execbuffer2+0x6c/0x2c0 [i915]
          drm_ioctl+0x349/0x670 [drm]
@@ -616,7 +616,7 @@ Extended error information
          SyS_ioctl+0x81/0xa0
          system_call_fastpath+0x12/0x6a
     } hitcount:      17726  bytes_req:   13944120  bytes_alloc:   19593808
-    { stacktrace:
+    { common_stacktrace:
          __kmalloc+0x11b/0x1b0
          load_elf_phdrs+0x76/0xa0
          load_elf_binary+0x102/0x1650
@@ -625,7 +625,7 @@ Extended error information
          SyS_execve+0x3a/0x50
          return_from_execve+0x0/0x23
     } hitcount:      33348  bytes_req:   17152128  bytes_alloc:   20226048
-    { stacktrace:
+    { common_stacktrace:
          kmem_cache_alloc_trace+0xeb/0x150
          apparmor_file_alloc_security+0x27/0x40
          security_file_alloc+0x16/0x20
@@ -636,7 +636,7 @@ Extended error information
          SyS_open+0x1e/0x20
          system_call_fastpath+0x12/0x6a
     } hitcount:    4766422  bytes_req:    9532844  bytes_alloc:   38131376
-    { stacktrace:
+    { common_stacktrace:
          __kmalloc+0x11b/0x1b0
          seq_buf_alloc+0x1b/0x50
          seq_read+0x2cc/0x370
@@ -1026,7 +1026,7 @@ Extended error information
   First we set up an initially paused stacktrace trigger on the
   netif_receive_skb event::
 
-    # echo 'hist:key=stacktrace:vals=len:pause' > \
+    # echo 'hist:key=common_stacktrace:vals=len:pause' > \
            /sys/kernel/tracing/events/net/netif_receive_skb/trigger
 
   Next, we set up an 'enable_hist' trigger on the sched_process_exec
@@ -1060,9 +1060,9 @@ Extended error information
     $ wget https://www.kernel.org/pub/linux/kernel/v3.x/patch-3.19.xz
 
     # cat /sys/kernel/tracing/events/net/netif_receive_skb/hist
-    # trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused]
+    # trigger info: hist:keys=common_stacktrace:vals=len:sort=hitcount:size=2048 [paused]
 
-    { stacktrace:
+    { common_stacktrace:
          __netif_receive_skb_core+0x46d/0x990
          __netif_receive_skb+0x18/0x60
          netif_receive_skb_internal+0x23/0x90
@@ -1079,7 +1079,7 @@ Extended error information
          kthread+0xd2/0xf0
          ret_from_fork+0x42/0x70
     } hitcount:         85  len:      28884
-    { stacktrace:
+    { common_stacktrace:
          __netif_receive_skb_core+0x46d/0x990
          __netif_receive_skb+0x18/0x60
          netif_receive_skb_internal+0x23/0x90
@@ -1097,7 +1097,7 @@ Extended error information
          irq_thread+0x11f/0x150
          kthread+0xd2/0xf0
     } hitcount:         98  len:     664329
-    { stacktrace:
+    { common_stacktrace:
          __netif_receive_skb_core+0x46d/0x990
          __netif_receive_skb+0x18/0x60
          process_backlog+0xa8/0x150
@@ -1115,7 +1115,7 @@ Extended error information
          inet_sendmsg+0x64/0xa0
          sock_sendmsg+0x3d/0x50
     } hitcount:        115  len:      13030
-    { stacktrace:
+    { common_stacktrace:
          __netif_receive_skb_core+0x46d/0x990
          __netif_receive_skb+0x18/0x60
          netif_receive_skb_internal+0x23/0x90
@@ -1142,14 +1142,14 @@ Extended error information
   into the histogram.  In order to avoid having to set everything up
   again, we can just clear the histogram first::
 
-    # echo 'hist:key=stacktrace:vals=len:clear' >> \
+    # echo 'hist:key=common_stacktrace:vals=len:clear' >> \
            /sys/kernel/tracing/events/net/netif_receive_skb/trigger
 
   Just to verify that it is in fact cleared, here's what we now see in
   the hist file::
 
     # cat /sys/kernel/tracing/events/net/netif_receive_skb/hist
-    # trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused]
+    # trigger info: hist:keys=common_stacktrace:vals=len:sort=hitcount:size=2048 [paused]
 
     Totals:
         Hits: 0
@@ -1485,12 +1485,12 @@ Extended error information
 
   And here's an example that shows how to combine histogram data from
   any two events even if they don't share any 'compatible' fields
-  other than 'hitcount' and 'stacktrace'.  These commands create a
+  other than 'hitcount' and 'common_stacktrace'.  These commands create a
   couple of triggers named 'bar' using those fields::
 
-    # echo 'hist:name=bar:key=stacktrace:val=hitcount' > \
+    # echo 'hist:name=bar:key=common_stacktrace:val=hitcount' > \
            /sys/kernel/tracing/events/sched/sched_process_fork/trigger
-    # echo 'hist:name=bar:key=stacktrace:val=hitcount' > \
+    # echo 'hist:name=bar:key=common_stacktrace:val=hitcount' > \
           /sys/kernel/tracing/events/net/netif_rx/trigger
 
   And displaying the output of either shows some interesting if
@@ -1501,16 +1501,16 @@ Extended error information
 
     # event histogram
     #
-    # trigger info: hist:name=bar:keys=stacktrace:vals=hitcount:sort=hitcount:size=2048 [active]
+    # trigger info: hist:name=bar:keys=common_stacktrace:vals=hitcount:sort=hitcount:size=2048 [active]
     #
 
-    { stacktrace:
+    { common_stacktrace:
              kernel_clone+0x18e/0x330
              kernel_thread+0x29/0x30
              kthreadd+0x154/0x1b0
              ret_from_fork+0x3f/0x70
     } hitcount:          1
-    { stacktrace:
+    { common_stacktrace:
              netif_rx_internal+0xb2/0xd0
              netif_rx_ni+0x20/0x70
              dev_loopback_xmit+0xaa/0xd0
@@ -1528,7 +1528,7 @@ Extended error information
              call_cpuidle+0x3b/0x60
              cpu_startup_entry+0x22d/0x310
     } hitcount:          1
-    { stacktrace:
+    { common_stacktrace:
              netif_rx_internal+0xb2/0xd0
              netif_rx_ni+0x20/0x70
              dev_loopback_xmit+0xaa/0xd0
@@ -1543,7 +1543,7 @@ Extended error information
              SyS_sendto+0xe/0x10
              entry_SYSCALL_64_fastpath+0x12/0x6a
     } hitcount:          2
-    { stacktrace:
+    { common_stacktrace:
              netif_rx_internal+0xb2/0xd0
              netif_rx+0x1c/0x60
              loopback_xmit+0x6c/0xb0
@@ -1561,7 +1561,7 @@ Extended error information
              sock_sendmsg+0x38/0x50
              ___sys_sendmsg+0x14e/0x270
     } hitcount:         76
-    { stacktrace:
+    { common_stacktrace:
              netif_rx_internal+0xb2/0xd0
              netif_rx+0x1c/0x60
              loopback_xmit+0x6c/0xb0
@@ -1579,7 +1579,7 @@ Extended error information
              sock_sendmsg+0x38/0x50
              ___sys_sendmsg+0x269/0x270
     } hitcount:         77
-    { stacktrace:
+    { common_stacktrace:
              netif_rx_internal+0xb2/0xd0
              netif_rx+0x1c/0x60
              loopback_xmit+0x6c/0xb0
@@ -1597,7 +1597,7 @@ Extended error information
              sock_sendmsg+0x38/0x50
              SYSC_sendto+0xef/0x170
     } hitcount:         88
-    { stacktrace:
+    { common_stacktrace:
              kernel_clone+0x18e/0x330
              SyS_clone+0x19/0x20
              entry_SYSCALL_64_fastpath+0x12/0x6a
@@ -1949,7 +1949,7 @@ uninterruptible state::
 
   # cd /sys/kernel/tracing
   # echo 's:block_lat pid_t pid; u64 delta; unsigned long[] stack;' > dynamic_events
-  # echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=stacktrace  if prev_state == 2' >> events/sched/sched_switch/trigger
+  # echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=common_stacktrace  if prev_state == 2' >> events/sched/sched_switch/trigger
   # echo 'hist:keys=prev_pid:delta=common_timestamp.usecs-$ts,s=$st:onmax($delta).trace(block_lat,prev_pid,$delta,$s)' >> events/sched/sched_switch/trigger
   # echo 1 > events/synthetic/block_lat/enable
   # cat trace
index 176e8fc..4f7b23f 100644 (file)
@@ -363,7 +363,7 @@ Code  Seq#    Include File                                           Comments
 0xCC  00-0F  drivers/misc/ibmvmc.h                                   pseries VMC driver
 0xCD  01     linux/reiserfs_fs.h
 0xCE  01-02  uapi/linux/cxl_mem.h                                    Compute Express Link Memory Devices
-0xCF  02     fs/cifs/ioctl.c
+0xCF  02     fs/smb/client/cifs_ioctl.h
 0xDB  00-0F  drivers/char/mwave/mwavepub.h
 0xDD  00-3F                                                          ZFCP device driver see drivers/s390/scsi/
                                                                      <mailto:aherrman@de.ibm.com>
index e0ad886..3d2d5fb 100644 (file)
@@ -956,7 +956,8 @@ F:  Documentation/networking/device_drivers/ethernet/amazon/ena.rst
 F:     drivers/net/ethernet/amazon/
 
 AMAZON RDMA EFA DRIVER
-M:     Gal Pressman <galpress@amazon.com>
+M:     Michael Margolin <mrgolin@amazon.com>
+R:     Gal Pressman <gal.pressman@linux.dev>
 R:     Yossi Leybovich <sleybo@amazon.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
@@ -1600,7 +1601,7 @@ F:        drivers/media/i2c/ar0521.c
 
 ARASAN NAND CONTROLLER DRIVER
 M:     Miquel Raynal <miquel.raynal@bootlin.com>
-M:     Naga Sureshkumar Relli <nagasure@xilinx.com>
+R:     Michal Simek <michal.simek@amd.com>
 L:     linux-mtd@lists.infradead.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
@@ -1677,10 +1678,7 @@ F:       drivers/power/reset/arm-versatile-reboot.c
 F:     drivers/soc/versatile/
 
 ARM KOMEDA DRM-KMS DRIVER
-M:     James (Qian) Wang <james.qian.wang@arm.com>
 M:     Liviu Dudau <liviu.dudau@arm.com>
-M:     Mihail Atanassov <mihail.atanassov@arm.com>
-L:     Mali DP Maintainers <malidp@foss.arm.com>
 S:     Supported
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     Documentation/devicetree/bindings/display/arm,komeda.yaml
@@ -1701,8 +1699,6 @@ F:        include/uapi/drm/panfrost_drm.h
 
 ARM MALI-DP DRM DRIVER
 M:     Liviu Dudau <liviu.dudau@arm.com>
-M:     Brian Starkey <brian.starkey@arm.com>
-L:     Mali DP Maintainers <malidp@foss.arm.com>
 S:     Supported
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     Documentation/devicetree/bindings/display/arm,malidp.yaml
@@ -1768,7 +1764,7 @@ F:        include/linux/amba/mmci.h
 
 ARM PRIMECELL PL35X NAND CONTROLLER DRIVER
 M:     Miquel Raynal <miquel.raynal@bootlin.com>
-M:     Naga Sureshkumar Relli <nagasure@xilinx.com>
+R:     Michal Simek <michal.simek@amd.com>
 L:     linux-mtd@lists.infradead.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml
@@ -1776,7 +1772,7 @@ F:        drivers/mtd/nand/raw/pl35x-nand-controller.c
 
 ARM PRIMECELL PL35X SMC DRIVER
 M:     Miquel Raynal <miquel.raynal@bootlin.com>
-M:     Naga Sureshkumar Relli <nagasure@xilinx.com>
+R:     Michal Simek <michal.simek@amd.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     Documentation/devicetree/bindings/memory-controllers/arm,pl35x-smc.yaml
@@ -2434,6 +2430,15 @@ X:       drivers/net/wireless/atmel/
 N:     at91
 N:     atmel
 
+ARM/MICROCHIP (ARM64) SoC support
+M:     Conor Dooley <conor@kernel.org>
+M:     Nicolas Ferre <nicolas.ferre@microchip.com>
+M:     Claudiu Beznea <claudiu.beznea@microchip.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Supported
+T:     git https://git.kernel.org/pub/scm/linux/kernel/git/at91/linux.git
+F:     arch/arm64/boot/dts/microchip/
+
 ARM/Microchip Sparx5 SoC support
 M:     Lars Povlsen <lars.povlsen@microchip.com>
 M:     Steen Hegelund <Steen.Hegelund@microchip.com>
@@ -2441,8 +2446,7 @@ M:        Daniel Machon <daniel.machon@microchip.com>
 M:     UNGLinuxDriver@microchip.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Supported
-T:     git git://github.com/microchip-ung/linux-upstream.git
-F:     arch/arm64/boot/dts/microchip/
+F:     arch/arm64/boot/dts/microchip/sparx*
 F:     drivers/net/ethernet/microchip/vcap/
 F:     drivers/pinctrl/pinctrl-microchip-sgpio.c
 N:     sparx5
@@ -3541,7 +3545,7 @@ F:        Documentation/filesystems/befs.rst
 F:     fs/befs/
 
 BFQ I/O SCHEDULER
-M:     Paolo Valente <paolo.valente@linaro.org>
+M:     Paolo Valente <paolo.valente@unimore.it>
 M:     Jens Axboe <axboe@kernel.dk>
 L:     linux-block@vger.kernel.org
 S:     Maintained
@@ -4914,7 +4918,6 @@ F:        drivers/media/cec/i2c/ch7322.c
 CIRRUS LOGIC AUDIO CODEC DRIVERS
 M:     James Schulman <james.schulman@cirrus.com>
 M:     David Rhodes <david.rhodes@cirrus.com>
-M:     Lucas Tanure <tanureal@opensource.cirrus.com>
 M:     Richard Fitzgerald <rf@opensource.cirrus.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 L:     patches@opensource.cirrus.com
@@ -5136,7 +5139,7 @@ X:        drivers/clk/clkdev.c
 
 COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3)
 M:     Steve French <sfrench@samba.org>
-R:     Paulo Alcantara <pc@cjr.nz> (DFS, global name space)
+R:     Paulo Alcantara <pc@manguebit.com> (DFS, global name space)
 R:     Ronnie Sahlberg <lsahlber@redhat.com> (directory leases, sparse files)
 R:     Shyam Prasad N <sprasad@microsoft.com> (multichannel)
 R:     Tom Talpey <tom@talpey.com> (RDMA, smbdirect)
@@ -5146,8 +5149,8 @@ S:        Supported
 W:     https://wiki.samba.org/index.php/LinuxCIFS
 T:     git git://git.samba.org/sfrench/cifs-2.6.git
 F:     Documentation/admin-guide/cifs/
-F:     fs/cifs/
-F:     fs/smbfs_common/
+F:     fs/smb/client/
+F:     fs/smb/common/
 F:     include/uapi/linux/cifs
 
 COMPACTPCI HOTPLUG CORE
@@ -5725,6 +5728,14 @@ F:       include/linux/tfrc.h
 F:     include/uapi/linux/dccp.h
 F:     net/dccp/
 
+DEBUGOBJECTS:
+M:     Thomas Gleixner <tglx@linutronix.de>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/debugobjects
+F:     lib/debugobjects.c
+F:     include/linux/debugobjects.h
+
 DECSTATION PLATFORM SUPPORT
 M:     "Maciej W. Rozycki" <macro@orcam.me.uk>
 L:     linux-mips@vger.kernel.org
@@ -6012,7 +6023,7 @@ W:        http://www.dialog-semiconductor.com/products
 F:     Documentation/devicetree/bindings/input/da90??-onkey.txt
 F:     Documentation/devicetree/bindings/input/dlg,da72??.txt
 F:     Documentation/devicetree/bindings/mfd/da90*.txt
-F:     Documentation/devicetree/bindings/mfd/da90*.yaml
+F:     Documentation/devicetree/bindings/mfd/dlg,da90*.yaml
 F:     Documentation/devicetree/bindings/regulator/da92*.txt
 F:     Documentation/devicetree/bindings/regulator/dlg,da9*.yaml
 F:     Documentation/devicetree/bindings/regulator/slg51000.txt
@@ -6211,6 +6222,7 @@ X:        Documentation/devicetree/
 X:     Documentation/driver-api/media/
 X:     Documentation/firmware-guide/acpi/
 X:     Documentation/i2c/
+X:     Documentation/netlink/
 X:     Documentation/power/
 X:     Documentation/spi/
 X:     Documentation/userspace-api/media/
@@ -8158,6 +8170,7 @@ F:        include/linux/spi/spi-fsl-dspi.h
 
 FREESCALE ENETC ETHERNET DRIVERS
 M:     Claudiu Manoil <claudiu.manoil@nxp.com>
+M:     Vladimir Oltean <vladimir.oltean@nxp.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/freescale/enetc/
@@ -8786,6 +8799,7 @@ F:        include/linux/gpio/regmap.h
 GPIO SUBSYSTEM
 M:     Linus Walleij <linus.walleij@linaro.org>
 M:     Bartosz Golaszewski <brgl@bgdev.pl>
+R:     Andy Shevchenko <andy@kernel.org>
 L:     linux-gpio@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
@@ -9339,7 +9353,7 @@ F:        include/linux/hisi_acc_qm.h
 
 HISILICON ROCE DRIVER
 M:     Haoyue Xu <xuhaoyue1@hisilicon.com>
-M:     Wenpeng Liang <liangwenpeng@huawei.com>
+M:     Junxian Huang <huangjunxian6@hisilicon.com>
 L:     linux-rdma@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
@@ -10110,7 +10124,7 @@ S:      Maintained
 F:     Documentation/process/kernel-docs.rst
 
 INDUSTRY PACK SUBSYSTEM (IPACK)
-M:     Samuel Iglesias Gonsalvez <siglesias@igalia.com>
+M:     Vaibhav Gupta <vaibhavgupta40@gmail.com>
 M:     Jens Taprogge <jens.taprogge@taprogge.org>
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     industrypack-devel@lists.sourceforge.net
@@ -11305,9 +11319,9 @@ R:      Tom Talpey <tom@talpey.com>
 L:     linux-cifs@vger.kernel.org
 S:     Maintained
 T:     git git://git.samba.org/ksmbd.git
-F:     Documentation/filesystems/cifs/ksmbd.rst
-F:     fs/ksmbd/
-F:     fs/smbfs_common/
+F:     Documentation/filesystems/smb/ksmbd.rst
+F:     fs/smb/common/
+F:     fs/smb/server/
 
 KERNEL UNIT TESTING FRAMEWORK (KUnit)
 M:     Brendan Higgins <brendanhiggins@google.com>
@@ -13832,7 +13846,7 @@ F:      drivers/tty/serial/8250/8250_pci1xxxx.c
 
 MICROCHIP POLARFIRE FPGA DRIVERS
 M:     Conor Dooley <conor.dooley@microchip.com>
-R:     Ivan Bornyakov <i.bornyakov@metrotek.ru>
+R:     Vladimir Georgiev <v.georgiev@metrotek.ru>
 L:     linux-fpga@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml
@@ -14566,6 +14580,7 @@ T:      git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 F:     Documentation/devicetree/bindings/net/
 F:     drivers/connector/
 F:     drivers/net/
+X:     drivers/net/wireless/
 F:     include/dt-bindings/net/
 F:     include/linux/etherdevice.h
 F:     include/linux/fcdevice.h
@@ -14615,6 +14630,7 @@ B:      mailto:netdev@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
 F:     Documentation/core-api/netlink.rst
+F:     Documentation/netlink/
 F:     Documentation/networking/
 F:     Documentation/process/maintainer-netdev.rst
 F:     Documentation/userspace-api/netlink/
@@ -14629,6 +14645,7 @@ F:      include/uapi/linux/netdevice.h
 F:     lib/net_utils.c
 F:     lib/random32.c
 F:     net/
+X:     net/bluetooth/
 F:     tools/net/
 F:     tools/testing/selftests/net/
 
@@ -14928,6 +14945,7 @@ F:      drivers/ntb/hw/intel/
 
 NTFS FILESYSTEM
 M:     Anton Altaparmakov <anton@tuxera.com>
+R:     Namjae Jeon <linkinjeon@kernel.org>
 L:     linux-ntfs-dev@lists.sourceforge.net
 S:     Supported
 W:     http://www.tuxera.com/
@@ -18575,10 +18593,9 @@ F:     Documentation/admin-guide/LSM/SafeSetID.rst
 F:     security/safesetid/
 
 SAMSUNG AUDIO (ASoC) DRIVERS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Sylwester Nawrocki <s.nawrocki@samsung.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
-S:     Supported
+S:     Maintained
 B:     mailto:linux-samsung-soc@vger.kernel.org
 F:     Documentation/devicetree/bindings/sound/samsung*
 F:     sound/soc/samsung/
@@ -18706,7 +18723,6 @@ F:      include/dt-bindings/clock/samsung,*.h
 F:     include/linux/clk/samsung.h
 
 SAMSUNG SPI DRIVERS
-M:     Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
 M:     Andi Shyti <andi.shyti@kernel.org>
 L:     linux-spi@vger.kernel.org
 L:     linux-samsung-soc@vger.kernel.org
@@ -18842,12 +18858,11 @@ F:    drivers/target/
 F:     include/target/
 
 SCTP PROTOCOL
-M:     Neil Horman <nhorman@tuxdriver.com>
 M:     Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
 M:     Xin Long <lucien.xin@gmail.com>
 L:     linux-sctp@vger.kernel.org
 S:     Maintained
-W:     http://lksctp.sourceforge.net
+W:     https://github.com/sctp/lksctp-tools/wiki
 F:     Documentation/networking/sctp.rst
 F:     include/linux/sctp.h
 F:     include/net/sctp/
index f836936..09866a8 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 4
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc5
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
index 78555a6..7b7e6c2 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_pcie>;
        reset-gpio = <&gpio6 7 GPIO_ACTIVE_LOW>;
+       vpcie-supply = <&reg_pcie>;
        status = "okay";
 };
 
index 5882c75..32a6022 100644 (file)
@@ -8,6 +8,7 @@
 #include <dt-bindings/input/input.h>
 #include <dt-bindings/leds/common.h>
 #include <dt-bindings/pwm/pwm.h>
+#include <dt-bindings/regulator/dlg,da9063-regulator.h>
 #include "imx6ull.dtsi"
 
 / {
 
                regulators {
                        vdd_soc_in_1v4: buck1 {
+                               regulator-allowed-modes = <DA9063_BUCK_MODE_SLEEP>; /* PFM */
                                regulator-always-on;
                                regulator-boot-on;
+                               regulator-initial-mode = <DA9063_BUCK_MODE_SLEEP>;
                                regulator-max-microvolt = <1400000>;
                                regulator-min-microvolt = <1400000>;
                                regulator-name = "vdd_soc_in_1v4";
                        };
 
                        vcc_3v3: buck2 {
+                               regulator-allowed-modes = <DA9063_BUCK_MODE_SYNC>; /* PWM */
                                regulator-always-on;
                                regulator-boot-on;
+                               regulator-initial-mode = <DA9063_BUCK_MODE_SYNC>;
                                regulator-max-microvolt = <3300000>;
                                regulator-min-microvolt = <3300000>;
                                regulator-name = "vcc_3v3";
                         * the voltage is set to 1.5V.
                         */
                        vcc_ddr_1v35: buck3 {
+                               regulator-allowed-modes = <DA9063_BUCK_MODE_SYNC>; /* PWM */
                                regulator-always-on;
                                regulator-boot-on;
+                               regulator-initial-mode = <DA9063_BUCK_MODE_SYNC>;
                                regulator-max-microvolt = <1500000>;
                                regulator-min-microvolt = <1500000>;
                                regulator-name = "vcc_ddr_1v35";
index c9e05e3..00bf53f 100644 (file)
                        interrupt-names = "tx", "rx0", "rx1", "sce";
                        resets = <&rcc STM32F4_APB1_RESET(CAN2)>;
                        clocks = <&rcc 0 STM32F4_APB1_CLOCK(CAN2)>;
+                       st,can-secondary;
                        st,gcan = <&gcan>;
                        status = "disabled";
                };
index c8e6c52..9f65403 100644 (file)
                                        slew-rate = <2>;
                                };
                        };
+
+                       can1_pins_a: can1-0 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('A', 12, AF9)>; /* CAN1_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('A', 11, AF9)>; /* CAN1_RX */
+                                       bias-pull-up;
+                               };
+                       };
+
+                       can1_pins_b: can1-1 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('B', 9, AF9)>; /* CAN1_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('B', 8, AF9)>; /* CAN1_RX */
+                                       bias-pull-up;
+                               };
+                       };
+
+                       can1_pins_c: can1-2 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('D', 1, AF9)>; /* CAN1_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('D', 0, AF9)>; /* CAN1_RX */
+                                       bias-pull-up;
+
+                               };
+                       };
+
+                       can1_pins_d: can1-3 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('H', 13, AF9)>; /* CAN1_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('H', 14, AF9)>; /* CAN1_RX */
+                                       bias-pull-up;
+
+                               };
+                       };
+
+                       can2_pins_a: can2-0 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('B', 6, AF9)>; /* CAN2_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('B', 5, AF9)>; /* CAN2_RX */
+                                       bias-pull-up;
+                               };
+                       };
+
+                       can2_pins_b: can2-1 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('B', 13, AF9)>; /* CAN2_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('B', 12, AF9)>; /* CAN2_RX */
+                                       bias-pull-up;
+                               };
+                       };
+
+                       can3_pins_a: can3-0 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('A', 15, AF11)>; /* CAN3_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('A', 8, AF11)>; /* CAN3_RX */
+                                       bias-pull-up;
+                               };
+                       };
+
+                       can3_pins_b: can3-1 {
+                               pins1 {
+                                       pinmux = <STM32_PINMUX('B', 4, AF11)>;  /* CAN3_TX */
+                               };
+                               pins2 {
+                                       pinmux = <STM32_PINMUX('B', 3, AF11)>; /* CAN3_RX */
+                                       bias-pull-up;
+                               };
+                       };
                };
        };
 };
index 3b88209..ff1f9a1 100644 (file)
                reg = <0x2c0f0000 0x1000>;
                interrupts = <0 84 4>;
                cache-level = <2>;
+               cache-unified;
        };
 
        pmu {
index 78d3d4b..f4db3e7 100644 (file)
@@ -92,7 +92,7 @@
 
 #define RETURN_READ_PMEVCNTRN(n) \
        return read_sysreg(PMEVCNTR##n)
-static unsigned long read_pmevcntrn(int n)
+static inline unsigned long read_pmevcntrn(int n)
 {
        PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
        return 0;
@@ -100,14 +100,14 @@ static unsigned long read_pmevcntrn(int n)
 
 #define WRITE_PMEVCNTRN(n) \
        write_sysreg(val, PMEVCNTR##n)
-static void write_pmevcntrn(int n, unsigned long val)
+static inline void write_pmevcntrn(int n, unsigned long val)
 {
        PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
 }
 
 #define WRITE_PMEVTYPERN(n) \
        write_sysreg(val, PMEVTYPER##n)
-static void write_pmevtypern(int n, unsigned long val)
+static inline void write_pmevtypern(int n, unsigned long val)
 {
        PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
 }
index b1201d2..343e1e1 100644 (file)
@@ -1516,7 +1516,7 @@ config XEN
 # 16K |       27          |      14      |       13        |         11         |
 # 64K |       29          |      16      |       13        |         13         |
 config ARCH_FORCE_MAX_ORDER
-       int "Order of maximal physically contiguous allocations" if EXPERT && (ARM64_4K_PAGES || ARM64_16K_PAGES)
+       int
        default "13" if ARM64_64K_PAGES
        default "11" if ARM64_16K_PAGES
        default "10"
index 0295780..7b41537 100644 (file)
@@ -59,6 +59,7 @@
                L2_0: l2-cache0 {
                        compatible = "cache";
                        cache-level = <2>;
+                       cache-unified;
                };
        };
 
index ef68f5a..afdf954 100644 (file)
@@ -72,6 +72,7 @@
                L2_0: l2-cache0 {
                        compatible = "cache";
                        cache-level = <2>;
+                       cache-unified;
                };
        };
 
index 796cd7d..7bdeb96 100644 (file)
@@ -58,6 +58,7 @@
                L2_0: l2-cache0 {
                        compatible = "cache";
                        cache-level = <2>;
+                       cache-unified;
                };
        };
 
index 2209c1a..e62a435 100644 (file)
@@ -171,6 +171,7 @@ conn_subsys: bus@5b000000 {
                        interrupt-names = "host", "peripheral", "otg", "wakeup";
                        phys = <&usb3_phy>;
                        phy-names = "cdns3,usb3-phy";
+                       cdns,on-chip-buff-size = /bits/ 16 <18>;
                        status = "disabled";
                };
        };
index 67072e6..cbd9d12 100644 (file)
                #address-cells = <1>;
                #size-cells = <0>;
 
-               ethphy: ethernet-phy@4 {
+               ethphy: ethernet-phy@4 { /* AR8033 or ADIN1300 */
                        compatible = "ethernet-phy-ieee802.3-c22";
                        reg = <4>;
                        reset-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
                        reset-assert-us = <10000>;
+                       /*
+                        * Deassert delay:
+                        * ADIN1300 requires 5ms.
+                        * AR8033   requires 1ms.
+                        */
+                       reset-deassert-us = <20000>;
                };
        };
 };
index bd84db5..8be8f09 100644 (file)
                                         <&clk IMX8MN_CLK_DISP_APB_ROOT>,
                                         <&clk IMX8MN_CLK_DISP_AXI_ROOT>;
                                clock-names = "pix", "axi", "disp_axi";
-                               assigned-clocks = <&clk IMX8MN_CLK_DISP_PIXEL_ROOT>,
-                                                 <&clk IMX8MN_CLK_DISP_AXI>,
-                                                 <&clk IMX8MN_CLK_DISP_APB>;
-                               assigned-clock-parents = <&clk IMX8MN_CLK_DISP_PIXEL>,
-                                                        <&clk IMX8MN_SYS_PLL2_1000M>,
-                                                        <&clk IMX8MN_SYS_PLL1_800M>;
-                               assigned-clock-rates = <594000000>, <500000000>, <200000000>;
                                interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
                                power-domains = <&disp_blk_ctrl IMX8MN_DISPBLK_PD_LCDIF>;
                                status = "disabled";
                                clocks = <&clk IMX8MN_CLK_DSI_CORE>,
                                         <&clk IMX8MN_CLK_DSI_PHY_REF>;
                                clock-names = "bus_clk", "sclk_mipi";
-                               assigned-clocks = <&clk IMX8MN_CLK_DSI_CORE>,
-                                                 <&clk IMX8MN_CLK_DSI_PHY_REF>;
-                               assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_266M>,
-                                                        <&clk IMX8MN_CLK_24M>;
-                               assigned-clock-rates = <266000000>, <24000000>;
-                               samsung,pll-clock-frequency = <24000000>;
                                interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>;
                                power-domains = <&disp_blk_ctrl IMX8MN_DISPBLK_PD_MIPI_DSI>;
                                status = "disabled";
                                              "lcdif-axi", "lcdif-apb", "lcdif-pix",
                                              "dsi-pclk", "dsi-ref",
                                              "csi-aclk", "csi-pclk";
+                               assigned-clocks = <&clk IMX8MN_CLK_DSI_CORE>,
+                                                 <&clk IMX8MN_CLK_DSI_PHY_REF>,
+                                                 <&clk IMX8MN_CLK_DISP_PIXEL>,
+                                                 <&clk IMX8MN_CLK_DISP_AXI>,
+                                                 <&clk IMX8MN_CLK_DISP_APB>;
+                               assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_266M>,
+                                                        <&clk IMX8MN_CLK_24M>,
+                                                        <&clk IMX8MN_VIDEO_PLL1_OUT>,
+                                                        <&clk IMX8MN_SYS_PLL2_1000M>,
+                                                        <&clk IMX8MN_SYS_PLL1_800M>;
+                               assigned-clock-rates = <266000000>,
+                                                      <24000000>,
+                                                      <594000000>,
+                                                      <500000000>,
+                                                      <200000000>;
                                #power-domain-cells = <1>;
                        };
 
index f813919..428c604 100644 (file)
                                         <&clk IMX8MP_CLK_MEDIA_APB_ROOT>,
                                         <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>;
                                clock-names = "pix", "axi", "disp_axi";
-                               assigned-clocks = <&clk IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT>,
-                                                 <&clk IMX8MP_CLK_MEDIA_AXI>,
-                                                 <&clk IMX8MP_CLK_MEDIA_APB>;
-                               assigned-clock-parents = <&clk IMX8MP_CLK_MEDIA_DISP1_PIX>,
-                                                        <&clk IMX8MP_SYS_PLL2_1000M>,
-                                                        <&clk IMX8MP_SYS_PLL1_800M>;
-                               assigned-clock-rates = <594000000>, <500000000>, <200000000>;
                                interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
                                power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_LCDIF_1>;
                                status = "disabled";
                                         <&clk IMX8MP_CLK_MEDIA_APB_ROOT>,
                                         <&clk IMX8MP_CLK_MEDIA_AXI_ROOT>;
                                clock-names = "pix", "axi", "disp_axi";
-                               assigned-clocks = <&clk IMX8MP_CLK_MEDIA_DISP2_PIX>,
-                                                 <&clk IMX8MP_VIDEO_PLL1>;
-                               assigned-clock-parents = <&clk IMX8MP_VIDEO_PLL1_OUT>,
-                                                        <&clk IMX8MP_VIDEO_PLL1_REF_SEL>;
-                               assigned-clock-rates = <0>, <1039500000>;
                                power-domains = <&media_blk_ctrl IMX8MP_MEDIABLK_PD_LCDIF_2>;
                                status = "disabled";
 
                                              "disp1", "disp2", "isp", "phy";
 
                                assigned-clocks = <&clk IMX8MP_CLK_MEDIA_AXI>,
-                                                 <&clk IMX8MP_CLK_MEDIA_APB>;
+                                                 <&clk IMX8MP_CLK_MEDIA_APB>,
+                                                 <&clk IMX8MP_CLK_MEDIA_DISP1_PIX>,
+                                                 <&clk IMX8MP_CLK_MEDIA_DISP2_PIX>,
+                                                 <&clk IMX8MP_VIDEO_PLL1>;
                                assigned-clock-parents = <&clk IMX8MP_SYS_PLL2_1000M>,
-                                                        <&clk IMX8MP_SYS_PLL1_800M>;
-                               assigned-clock-rates = <500000000>, <200000000>;
-
+                                                        <&clk IMX8MP_SYS_PLL1_800M>,
+                                                        <&clk IMX8MP_VIDEO_PLL1_OUT>,
+                                                        <&clk IMX8MP_VIDEO_PLL1_OUT>;
+                               assigned-clock-rates = <500000000>, <200000000>,
+                                                      <0>, <0>, <1039500000>;
                                #power-domain-cells = <1>;
 
                                lvds_bridge: bridge@5c {
index 7264d78..9af769a 100644 (file)
        };
 };
 
+&iomuxc {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_ext_io0>, <&pinctrl_hog0>, <&pinctrl_hog1>,
+                   <&pinctrl_lpspi2_cs2>;
+};
+
 /* Colibri SPI */
 &lpspi2 {
        status = "okay";
index 5f30c88..f895306 100644 (file)
@@ -48,8 +48,7 @@
                           <IMX8QXP_SAI0_TXFS_LSIO_GPIO0_IO28           0x20>,          /* SODIMM 101 */
                           <IMX8QXP_SAI0_RXD_LSIO_GPIO0_IO27            0x20>,          /* SODIMM  97 */
                           <IMX8QXP_ENET0_RGMII_RXC_LSIO_GPIO5_IO03     0x06000020>,    /* SODIMM  85 */
-                          <IMX8QXP_SAI0_TXC_LSIO_GPIO0_IO26            0x20>,          /* SODIMM  79 */
-                          <IMX8QXP_QSPI0A_DATA1_LSIO_GPIO3_IO10        0x06700041>;    /* SODIMM  45 */
+                          <IMX8QXP_SAI0_TXC_LSIO_GPIO0_IO26            0x20>;          /* SODIMM  79 */
        };
 
        pinctrl_uart1_forceoff: uart1forceoffgrp {
index 7cad791..49d105e 100644 (file)
 /* TODO VPU Encoder/Decoder */
 
 &iomuxc {
-       pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_ext_io0>, <&pinctrl_hog0>, <&pinctrl_hog1>,
-                   <&pinctrl_hog2>, <&pinctrl_lpspi2_cs2>;
-
        /* On-module touch pen-down interrupt */
        pinctrl_ad7879_int: ad7879intgrp {
                fsl,pins = <IMX8QXP_MIPI_CSI0_I2C0_SCL_LSIO_GPIO3_IO05  0x21>;
        };
 
        pinctrl_hog1: hog1grp {
-               fsl,pins = <IMX8QXP_CSI_MCLK_LSIO_GPIO3_IO01                    0x20>,          /* SODIMM  75 */
-                          <IMX8QXP_QSPI0A_SCLK_LSIO_GPIO3_IO16                 0x20>;          /* SODIMM  93 */
+               fsl,pins = <IMX8QXP_QSPI0A_SCLK_LSIO_GPIO3_IO16                 0x20>;          /* SODIMM  93 */
        };
 
        pinctrl_hog2: hog2grp {
                fsl,pins = <IMX8QXP_SCU_BOOT_MODE3_SCU_DSC_RTC_CLOCK_OUTPUT_32K 0x20>;
        };
 };
+
+/* Delete peripherals which are not present on SOC, but are defined in imx8-ss-*.dtsi */
+
+/delete-node/ &adc1;
+/delete-node/ &adc1_lpcg;
+/delete-node/ &dsp;
+/delete-node/ &dsp_lpcg;
index d6b51de..18dc2fb 100644 (file)
@@ -13,7 +13,7 @@
 
 #define RETURN_READ_PMEVCNTRN(n) \
        return read_sysreg(pmevcntr##n##_el0)
-static unsigned long read_pmevcntrn(int n)
+static inline unsigned long read_pmevcntrn(int n)
 {
        PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN);
        return 0;
@@ -21,14 +21,14 @@ static unsigned long read_pmevcntrn(int n)
 
 #define WRITE_PMEVCNTRN(n) \
        write_sysreg(val, pmevcntr##n##_el0)
-static void write_pmevcntrn(int n, unsigned long val)
+static inline void write_pmevcntrn(int n, unsigned long val)
 {
        PMEVN_SWITCH(n, WRITE_PMEVCNTRN);
 }
 
 #define WRITE_PMEVTYPERN(n) \
        write_sysreg(val, pmevtyper##n##_el0)
-static void write_pmevtypern(int n, unsigned long val)
+static inline void write_pmevtypern(int n, unsigned long val)
 {
        PMEVN_SWITCH(n, WRITE_PMEVTYPERN);
 }
index 683ca3a..5f6f848 100644 (file)
 #define APPLE_CPU_PART_M1_FIRESTORM_MAX        0x029
 #define APPLE_CPU_PART_M2_BLIZZARD     0x032
 #define APPLE_CPU_PART_M2_AVALANCHE    0x033
+#define APPLE_CPU_PART_M2_BLIZZARD_PRO 0x034
+#define APPLE_CPU_PART_M2_AVALANCHE_PRO        0x035
+#define APPLE_CPU_PART_M2_BLIZZARD_MAX 0x038
+#define APPLE_CPU_PART_M2_AVALANCHE_MAX        0x039
 
 #define AMPERE_CPU_PART_AMPERE1                0xAC3
 
 #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
 #define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD)
 #define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE)
+#define MIDR_APPLE_M2_BLIZZARD_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_PRO)
+#define MIDR_APPLE_M2_AVALANCHE_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_PRO)
+#define MIDR_APPLE_M2_BLIZZARD_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD_MAX)
+#define MIDR_APPLE_M2_AVALANCHE_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE_MAX)
 #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
 
 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
index 4cd6762..93bd097 100644 (file)
@@ -209,6 +209,7 @@ struct kvm_pgtable_visit_ctx {
        kvm_pte_t                               old;
        void                                    *arg;
        struct kvm_pgtable_mm_ops               *mm_ops;
+       u64                                     start;
        u64                                     addr;
        u64                                     end;
        u32                                     level;
@@ -631,9 +632,9 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
  *
  * The walker will walk the page-table entries corresponding to the input
  * address range specified, visiting entries according to the walker flags.
- * Invalid entries are treated as leaf entries. Leaf entries are reloaded
- * after invoking the walker callback, allowing the walker to descend into
- * a newly installed table.
+ * Invalid entries are treated as leaf entries. The visited page table entry is
+ * reloaded after invoking the walker callback, allowing the walker to descend
+ * into a newly installed table.
  *
  * Returning a negative error code from the walker callback function will
  * terminate the walk immediately with the same error code.
index e72d9aa..eefd712 100644 (file)
 #define SB_BARRIER_INSN                        __SYS_BARRIER_INSN(0, 7, 31)
 
 #define SYS_DC_ISW                     sys_insn(1, 0, 7, 6, 2)
+#define SYS_DC_IGSW                    sys_insn(1, 0, 7, 6, 4)
+#define SYS_DC_IGDSW                   sys_insn(1, 0, 7, 6, 6)
 #define SYS_DC_CSW                     sys_insn(1, 0, 7, 10, 2)
+#define SYS_DC_CGSW                    sys_insn(1, 0, 7, 10, 4)
+#define SYS_DC_CGDSW                   sys_insn(1, 0, 7, 10, 6)
 #define SYS_DC_CISW                    sys_insn(1, 0, 7, 14, 2)
+#define SYS_DC_CIGSW                   sys_insn(1, 0, 7, 14, 4)
+#define SYS_DC_CIGDSW                  sys_insn(1, 0, 7, 14, 6)
 
 /*
  * Automatically generated definitions for system registers, the
index f5bcb0d..7e89968 100644 (file)
@@ -66,13 +66,10 @@ void mte_sync_tags(pte_t old_pte, pte_t pte)
                return;
 
        /* if PG_mte_tagged is set, tags have already been initialised */
-       for (i = 0; i < nr_pages; i++, page++) {
-               if (!page_mte_tagged(page)) {
+       for (i = 0; i < nr_pages; i++, page++)
+               if (!page_mte_tagged(page))
                        mte_sync_page_tags(page, old_pte, check_swap,
                                           pte_is_tagged);
-                       set_page_mte_tagged(page);
-               }
-       }
 
        /* ensure the tags are visible before the PTE is set */
        smp_wmb();
index 0119dc9..d9e1355 100644 (file)
@@ -288,7 +288,7 @@ static int aarch32_alloc_kuser_vdso_page(void)
 
        memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
               kuser_sz);
-       aarch32_vectors_page = virt_to_page(vdso_page);
+       aarch32_vectors_page = virt_to_page((void *)vdso_page);
        return 0;
 }
 
index 1279949..4c9dcd8 100644 (file)
@@ -81,26 +81,34 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
 
        fpsimd_kvm_prepare();
 
+       /*
+        * We will check TIF_FOREIGN_FPSTATE just before entering the
+        * guest in kvm_arch_vcpu_ctxflush_fp() and override this to
+        * FP_STATE_FREE if the flag set.
+        */
        vcpu->arch.fp_state = FP_STATE_HOST_OWNED;
 
        vcpu_clear_flag(vcpu, HOST_SVE_ENABLED);
        if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
                vcpu_set_flag(vcpu, HOST_SVE_ENABLED);
 
-       /*
-        * We don't currently support SME guests but if we leave
-        * things in streaming mode then when the guest starts running
-        * FPSIMD or SVE code it may generate SME traps so as a
-        * special case if we are in streaming mode we force the host
-        * state to be saved now and exit streaming mode so that we
-        * don't have to handle any SME traps for valid guest
-        * operations. Do this for ZA as well for now for simplicity.
-        */
        if (system_supports_sme()) {
                vcpu_clear_flag(vcpu, HOST_SME_ENABLED);
                if (read_sysreg(cpacr_el1) & CPACR_EL1_SMEN_EL0EN)
                        vcpu_set_flag(vcpu, HOST_SME_ENABLED);
 
+               /*
+                * If PSTATE.SM is enabled then save any pending FP
+                * state and disable PSTATE.SM. If we leave PSTATE.SM
+                * enabled and the guest does not enable SME via
+                * CPACR_EL1.SMEN then operations that should be valid
+                * may generate SME traps from EL1 to EL1 which we
+                * can't intercept and which would confuse the guest.
+                *
+                * Do the same for PSTATE.ZA in the case where there
+                * is state in the registers which has not already
+                * been saved, this is very unlikely to happen.
+                */
                if (read_sysreg_s(SYS_SVCR) & (SVCR_SM_MASK | SVCR_ZA_MASK)) {
                        vcpu->arch.fp_state = FP_STATE_FREE;
                        fpsimd_save_and_flush_cpu_state();
index c41166f..5c15c58 100644 (file)
@@ -177,9 +177,17 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
        sve_guest = vcpu_has_sve(vcpu);
        esr_ec = kvm_vcpu_trap_get_class(vcpu);
 
-       /* Don't handle SVE traps for non-SVE vcpus here: */
-       if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD)
+       /* Only handle traps the vCPU can support here: */
+       switch (esr_ec) {
+       case ESR_ELx_EC_FP_ASIMD:
+               break;
+       case ESR_ELx_EC_SVE:
+               if (!sve_guest)
+                       return false;
+               break;
+       default:
                return false;
+       }
 
        /* Valid trap.  Switch the context: */
 
@@ -404,17 +412,21 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
        return false;
 }
 
-static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
        if (!__populate_fault_info(vcpu))
                return true;
 
        return false;
 }
+static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+       __alias(kvm_hyp_handle_memory_fault);
+static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+       __alias(kvm_hyp_handle_memory_fault);
 
 static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
-       if (!__populate_fault_info(vcpu))
+       if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
                return true;
 
        if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
index 2e9ec4a..a8813b2 100644 (file)
@@ -575,7 +575,7 @@ struct pkvm_mem_donation {
 
 struct check_walk_data {
        enum pkvm_page_state    desired;
-       enum pkvm_page_state    (*get_page_state)(kvm_pte_t pte);
+       enum pkvm_page_state    (*get_page_state)(kvm_pte_t pte, u64 addr);
 };
 
 static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
@@ -583,10 +583,7 @@ static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
 {
        struct check_walk_data *d = ctx->arg;
 
-       if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old)))
-               return -EINVAL;
-
-       return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM;
+       return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM;
 }
 
 static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
@@ -601,8 +598,11 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
        return kvm_pgtable_walk(pgt, addr, size, &walker);
 }
 
-static enum pkvm_page_state host_get_page_state(kvm_pte_t pte)
+static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr)
 {
+       if (!addr_is_allowed_memory(addr))
+               return PKVM_NOPAGE;
+
        if (!kvm_pte_valid(pte) && pte)
                return PKVM_NOPAGE;
 
@@ -709,7 +709,7 @@ static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx
        return host_stage2_set_owner_locked(addr, size, host_id);
 }
 
-static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
+static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
 {
        if (!kvm_pte_valid(pte))
                return PKVM_NOPAGE;
index 71fa16a..7779149 100644 (file)
@@ -186,6 +186,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
        [ESR_ELx_EC_FP_ASIMD]           = kvm_hyp_handle_fpsimd,
        [ESR_ELx_EC_IABT_LOW]           = kvm_hyp_handle_iabt_low,
        [ESR_ELx_EC_DABT_LOW]           = kvm_hyp_handle_dabt_low,
+       [ESR_ELx_EC_WATCHPT_LOW]        = kvm_hyp_handle_watchpt_low,
        [ESR_ELx_EC_PAC]                = kvm_hyp_handle_ptrauth,
 };
 
@@ -196,6 +197,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
        [ESR_ELx_EC_FP_ASIMD]           = kvm_hyp_handle_fpsimd,
        [ESR_ELx_EC_IABT_LOW]           = kvm_hyp_handle_iabt_low,
        [ESR_ELx_EC_DABT_LOW]           = kvm_hyp_handle_dabt_low,
+       [ESR_ELx_EC_WATCHPT_LOW]        = kvm_hyp_handle_watchpt_low,
        [ESR_ELx_EC_PAC]                = kvm_hyp_handle_ptrauth,
 };
 
index 3d61bd3..95dae02 100644 (file)
@@ -58,8 +58,9 @@
 struct kvm_pgtable_walk_data {
        struct kvm_pgtable_walker       *walker;
 
+       const u64                       start;
        u64                             addr;
-       u64                             end;
+       const u64                       end;
 };
 
 static bool kvm_phys_is_valid(u64 phys)
@@ -201,20 +202,33 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
                .old    = READ_ONCE(*ptep),
                .arg    = data->walker->arg,
                .mm_ops = mm_ops,
+               .start  = data->start,
                .addr   = data->addr,
                .end    = data->end,
                .level  = level,
                .flags  = flags,
        };
        int ret = 0;
+       bool reload = false;
        kvm_pteref_t childp;
        bool table = kvm_pte_table(ctx.old, level);
 
-       if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE))
+       if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
                ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
+               reload = true;
+       }
 
        if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
                ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
+               reload = true;
+       }
+
+       /*
+        * Reload the page table after invoking the walker callback for leaf
+        * entries or after pre-order traversal, to allow the walker to descend
+        * into a newly installed or replaced table.
+        */
+       if (reload) {
                ctx.old = READ_ONCE(*ptep);
                table = kvm_pte_table(ctx.old, level);
        }
@@ -293,6 +307,7 @@ int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
                     struct kvm_pgtable_walker *walker)
 {
        struct kvm_pgtable_walk_data walk_data = {
+               .start  = ALIGN_DOWN(addr, PAGE_SIZE),
                .addr   = ALIGN_DOWN(addr, PAGE_SIZE),
                .end    = PAGE_ALIGN(walk_data.addr + size),
                .walker = walker,
@@ -349,7 +364,7 @@ int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
 }
 
 struct hyp_map_data {
-       u64                             phys;
+       const u64                       phys;
        kvm_pte_t                       attr;
 };
 
@@ -407,13 +422,12 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
 static bool hyp_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
                                    struct hyp_map_data *data)
 {
+       u64 phys = data->phys + (ctx->addr - ctx->start);
        kvm_pte_t new;
-       u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
 
        if (!kvm_block_mapping_supported(ctx, phys))
                return false;
 
-       data->phys += granule;
        new = kvm_init_valid_leaf_pte(phys, data->attr, ctx->level);
        if (ctx->old == new)
                return true;
@@ -576,7 +590,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
 }
 
 struct stage2_map_data {
-       u64                             phys;
+       const u64                       phys;
        kvm_pte_t                       attr;
        u8                              owner_id;
 
@@ -794,20 +808,43 @@ static bool stage2_pte_executable(kvm_pte_t pte)
        return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
 }
 
+static u64 stage2_map_walker_phys_addr(const struct kvm_pgtable_visit_ctx *ctx,
+                                      const struct stage2_map_data *data)
+{
+       u64 phys = data->phys;
+
+       /*
+        * Stage-2 walks to update ownership data are communicated to the map
+        * walker using an invalid PA. Avoid offsetting an already invalid PA,
+        * which could overflow and make the address valid again.
+        */
+       if (!kvm_phys_is_valid(phys))
+               return phys;
+
+       /*
+        * Otherwise, work out the correct PA based on how far the walk has
+        * gotten.
+        */
+       return phys + (ctx->addr - ctx->start);
+}
+
 static bool stage2_leaf_mapping_allowed(const struct kvm_pgtable_visit_ctx *ctx,
                                        struct stage2_map_data *data)
 {
+       u64 phys = stage2_map_walker_phys_addr(ctx, data);
+
        if (data->force_pte && (ctx->level < (KVM_PGTABLE_MAX_LEVELS - 1)))
                return false;
 
-       return kvm_block_mapping_supported(ctx, data->phys);
+       return kvm_block_mapping_supported(ctx, phys);
 }
 
 static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
                                      struct stage2_map_data *data)
 {
        kvm_pte_t new;
-       u64 granule = kvm_granule_size(ctx->level), phys = data->phys;
+       u64 phys = stage2_map_walker_phys_addr(ctx, data);
+       u64 granule = kvm_granule_size(ctx->level);
        struct kvm_pgtable *pgt = data->mmu->pgt;
        struct kvm_pgtable_mm_ops *mm_ops = ctx->mm_ops;
 
@@ -841,8 +878,6 @@ static int stage2_map_walker_try_leaf(const struct kvm_pgtable_visit_ctx *ctx,
 
        stage2_make_pte(ctx, new);
 
-       if (kvm_phys_is_valid(phys))
-               data->phys += granule;
        return 0;
 }
 
@@ -1297,4 +1332,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg
        };
 
        WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
+
+       WARN_ON(mm_ops->page_count(pgtable) != 1);
+       mm_ops->put_page(pgtable);
 }
index 3d868e8..7a1aa51 100644 (file)
@@ -110,6 +110,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
        [ESR_ELx_EC_FP_ASIMD]           = kvm_hyp_handle_fpsimd,
        [ESR_ELx_EC_IABT_LOW]           = kvm_hyp_handle_iabt_low,
        [ESR_ELx_EC_DABT_LOW]           = kvm_hyp_handle_dabt_low,
+       [ESR_ELx_EC_WATCHPT_LOW]        = kvm_hyp_handle_watchpt_low,
        [ESR_ELx_EC_PAC]                = kvm_hyp_handle_ptrauth,
 };
 
index 64c3aec..0bd93a5 100644 (file)
@@ -204,7 +204,7 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
         * Size Fault at level 0, as if exceeding PARange.
         *
         * Non-LPAE guests will only get the external abort, as there
-        * is no way to to describe the ASF.
+        * is no way to describe the ASF.
         */
        if (vcpu_el1_is_32bit(vcpu) &&
            !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
index 45727d5..491ca7e 100644 (file)
@@ -694,45 +694,23 @@ out_unlock:
 
 static struct arm_pmu *kvm_pmu_probe_armpmu(void)
 {
-       struct perf_event_attr attr = { };
-       struct perf_event *event;
-       struct arm_pmu *pmu = NULL;
-
-       /*
-        * Create a dummy event that only counts user cycles. As we'll never
-        * leave this function with the event being live, it will never
-        * count anything. But it allows us to probe some of the PMU
-        * details. Yes, this is terrible.
-        */
-       attr.type = PERF_TYPE_RAW;
-       attr.size = sizeof(attr);
-       attr.pinned = 1;
-       attr.disabled = 0;
-       attr.exclude_user = 0;
-       attr.exclude_kernel = 1;
-       attr.exclude_hv = 1;
-       attr.exclude_host = 1;
-       attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
-       attr.sample_period = GENMASK(63, 0);
+       struct arm_pmu *tmp, *pmu = NULL;
+       struct arm_pmu_entry *entry;
+       int cpu;
 
-       event = perf_event_create_kernel_counter(&attr, -1, current,
-                                                kvm_pmu_perf_overflow, &attr);
+       mutex_lock(&arm_pmus_lock);
 
-       if (IS_ERR(event)) {
-               pr_err_once("kvm: pmu event creation failed %ld\n",
-                           PTR_ERR(event));
-               return NULL;
-       }
+       cpu = smp_processor_id();
+       list_for_each_entry(entry, &arm_pmus, entry) {
+               tmp = entry->arm_pmu;
 
-       if (event->pmu) {
-               pmu = to_arm_pmu(event->pmu);
-               if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
-                   pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
-                       pmu = NULL;
+               if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
+                       pmu = tmp;
+                       break;
+               }
        }
 
-       perf_event_disable(event);
-       perf_event_release_kernel(event);
+       mutex_unlock(&arm_pmus_lock);
 
        return pmu;
 }
@@ -912,7 +890,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
                return -EBUSY;
 
        if (!kvm->arch.arm_pmu) {
-               /* No PMU set, get the default one */
+               /*
+                * No PMU set, get the default one.
+                *
+                * The observant among you will notice that the supported_cpus
+                * mask does not get updated for the default PMU even though it
+                * is quite possible the selected instance supports only a
+                * subset of cores in the system. This is intentional, and
+                * upholds the preexisting behavior on heterogeneous systems
+                * where vCPUs can be scheduled on any core but the guest
+                * counters could stop working.
+                */
                kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
                if (!kvm->arch.arm_pmu)
                        return -ENODEV;
index 71b1209..753aa74 100644 (file)
@@ -211,6 +211,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
        return true;
 }
 
+static bool access_dcgsw(struct kvm_vcpu *vcpu,
+                        struct sys_reg_params *p,
+                        const struct sys_reg_desc *r)
+{
+       if (!kvm_has_mte(vcpu->kvm)) {
+               kvm_inject_undefined(vcpu);
+               return false;
+       }
+
+       /* Treat MTE S/W ops as we treat the classic ones: with contempt */
+       return access_dcsw(vcpu, p, r);
+}
+
 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
 {
        switch (r->aarch32_map) {
@@ -1756,8 +1769,14 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
  */
 static const struct sys_reg_desc sys_reg_descs[] = {
        { SYS_DESC(SYS_DC_ISW), access_dcsw },
+       { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
+       { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
        { SYS_DESC(SYS_DC_CSW), access_dcsw },
+       { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
+       { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
        { SYS_DESC(SYS_DC_CISW), access_dcsw },
+       { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
+       { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
 
        DBG_BCR_BVR_WCR_WVR_EL1(0),
        DBG_BCR_BVR_WCR_WVR_EL1(1),
index 9d42c7c..6eafc2c 100644 (file)
@@ -235,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
         * KVM io device for the redistributor that belongs to this VCPU.
         */
        if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
-               mutex_lock(&vcpu->kvm->arch.config_lock);
+               mutex_lock(&vcpu->kvm->slots_lock);
                ret = vgic_register_redist_iodev(vcpu);
-               mutex_unlock(&vcpu->kvm->arch.config_lock);
+               mutex_unlock(&vcpu->kvm->slots_lock);
        }
        return ret;
 }
@@ -406,7 +406,7 @@ void kvm_vgic_destroy(struct kvm *kvm)
 
 /**
  * vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
- * is a GICv2. A GICv3 must be explicitly initialized by the guest using the
+ * is a GICv2. A GICv3 must be explicitly initialized by userspace using the
  * KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group.
  * @kvm: kvm struct pointer
  */
@@ -446,11 +446,13 @@ int vgic_lazy_init(struct kvm *kvm)
 int kvm_vgic_map_resources(struct kvm *kvm)
 {
        struct vgic_dist *dist = &kvm->arch.vgic;
+       gpa_t dist_base;
        int ret = 0;
 
        if (likely(vgic_ready(kvm)))
                return 0;
 
+       mutex_lock(&kvm->slots_lock);
        mutex_lock(&kvm->arch.config_lock);
        if (vgic_ready(kvm))
                goto out;
@@ -463,13 +465,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
        else
                ret = vgic_v3_map_resources(kvm);
 
-       if (ret)
+       if (ret) {
                __kvm_vgic_destroy(kvm);
-       else
-               dist->ready = true;
+               goto out;
+       }
+       dist->ready = true;
+       dist_base = dist->vgic_dist_base;
+       mutex_unlock(&kvm->arch.config_lock);
+
+       ret = vgic_register_dist_iodev(kvm, dist_base,
+                                      kvm_vgic_global_state.type);
+       if (ret) {
+               kvm_err("Unable to register VGIC dist MMIO regions\n");
+               kvm_vgic_destroy(kvm);
+       }
+       mutex_unlock(&kvm->slots_lock);
+       return ret;
 
 out:
        mutex_unlock(&kvm->arch.config_lock);
+       mutex_unlock(&kvm->slots_lock);
        return ret;
 }
 
index 750e51e..5fe2365 100644 (file)
@@ -1936,6 +1936,7 @@ void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
 
 static int vgic_its_create(struct kvm_device *dev, u32 type)
 {
+       int ret;
        struct vgic_its *its;
 
        if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
@@ -1945,9 +1946,12 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
        if (!its)
                return -ENOMEM;
 
+       mutex_lock(&dev->kvm->arch.config_lock);
+
        if (vgic_initialized(dev->kvm)) {
-               int ret = vgic_v4_init(dev->kvm);
+               ret = vgic_v4_init(dev->kvm);
                if (ret < 0) {
+                       mutex_unlock(&dev->kvm->arch.config_lock);
                        kfree(its);
                        return ret;
                }
@@ -1960,12 +1964,10 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
 
        /* Yep, even more trickery for lock ordering... */
 #ifdef CONFIG_LOCKDEP
-       mutex_lock(&dev->kvm->arch.config_lock);
        mutex_lock(&its->cmd_lock);
        mutex_lock(&its->its_lock);
        mutex_unlock(&its->its_lock);
        mutex_unlock(&its->cmd_lock);
-       mutex_unlock(&dev->kvm->arch.config_lock);
 #endif
 
        its->vgic_its_base = VGIC_ADDR_UNDEF;
@@ -1986,7 +1988,11 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
 
        dev->private = its;
 
-       return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
+       ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1);
+
+       mutex_unlock(&dev->kvm->arch.config_lock);
+
+       return ret;
 }
 
 static void vgic_its_destroy(struct kvm_device *kvm_dev)
index 35cfa26..212b73a 100644 (file)
@@ -102,7 +102,11 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
                if (get_user(addr, uaddr))
                        return -EFAULT;
 
-       mutex_lock(&kvm->arch.config_lock);
+       /*
+        * Since we can't hold config_lock while registering the redistributor
+        * iodevs, take the slots_lock immediately.
+        */
+       mutex_lock(&kvm->slots_lock);
        switch (attr->attr) {
        case KVM_VGIC_V2_ADDR_TYPE_DIST:
                r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
@@ -182,6 +186,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
        if (r)
                goto out;
 
+       mutex_lock(&kvm->arch.config_lock);
        if (write) {
                r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
                if (!r)
@@ -189,9 +194,10 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
        } else {
                addr = *addr_ptr;
        }
+       mutex_unlock(&kvm->arch.config_lock);
 
 out:
-       mutex_unlock(&kvm->arch.config_lock);
+       mutex_unlock(&kvm->slots_lock);
 
        if (!r && !write)
                r =  put_user(addr, uaddr);
index 472b18a..188d218 100644 (file)
@@ -769,10 +769,13 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
        struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
        struct vgic_redist_region *rdreg;
        gpa_t rd_base;
-       int ret;
+       int ret = 0;
+
+       lockdep_assert_held(&kvm->slots_lock);
+       mutex_lock(&kvm->arch.config_lock);
 
        if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
-               return 0;
+               goto out_unlock;
 
        /*
         * We may be creating VCPUs before having set the base address for the
@@ -782,10 +785,12 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
         */
        rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
        if (!rdreg)
-               return 0;
+               goto out_unlock;
 
-       if (!vgic_v3_check_base(kvm))
-               return -EINVAL;
+       if (!vgic_v3_check_base(kvm)) {
+               ret = -EINVAL;
+               goto out_unlock;
+       }
 
        vgic_cpu->rdreg = rdreg;
        vgic_cpu->rdreg_index = rdreg->free_index;
@@ -799,16 +804,20 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
        rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
        rd_dev->redist_vcpu = vcpu;
 
-       mutex_lock(&kvm->slots_lock);
+       mutex_unlock(&kvm->arch.config_lock);
+
        ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
                                      2 * SZ_64K, &rd_dev->dev);
-       mutex_unlock(&kvm->slots_lock);
-
        if (ret)
                return ret;
 
+       /* Protected by slots_lock */
        rdreg->free_index++;
        return 0;
+
+out_unlock:
+       mutex_unlock(&kvm->arch.config_lock);
+       return ret;
 }
 
 static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
@@ -834,12 +843,10 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
                /* The current c failed, so iterate over the previous ones. */
                int i;
 
-               mutex_lock(&kvm->slots_lock);
                for (i = 0; i < c; i++) {
                        vcpu = kvm_get_vcpu(kvm, i);
                        vgic_unregister_redist_iodev(vcpu);
                }
-               mutex_unlock(&kvm->slots_lock);
        }
 
        return ret;
@@ -938,7 +945,9 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
 {
        int ret;
 
+       mutex_lock(&kvm->arch.config_lock);
        ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
+       mutex_unlock(&kvm->arch.config_lock);
        if (ret)
                return ret;
 
@@ -950,8 +959,10 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
        if (ret) {
                struct vgic_redist_region *rdreg;
 
+               mutex_lock(&kvm->arch.config_lock);
                rdreg = vgic_v3_rdist_region_from_index(kvm, index);
                vgic_v3_free_redist_region(rdreg);
+               mutex_unlock(&kvm->arch.config_lock);
                return ret;
        }
 
index 1939c94..ff558c0 100644 (file)
@@ -1096,7 +1096,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
                             enum vgic_type type)
 {
        struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
-       int ret = 0;
        unsigned int len;
 
        switch (type) {
@@ -1114,10 +1113,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
        io_device->iodev_type = IODEV_DIST;
        io_device->redist_vcpu = NULL;
 
-       mutex_lock(&kvm->slots_lock);
-       ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
-                                     len, &io_device->dev);
-       mutex_unlock(&kvm->slots_lock);
-
-       return ret;
+       return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
+                                      len, &io_device->dev);
 }
index 6456483..7e9cdb7 100644 (file)
@@ -312,12 +312,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
                return ret;
        }
 
-       ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
-       if (ret) {
-               kvm_err("Unable to register VGIC MMIO regions\n");
-               return ret;
-       }
-
        if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
                ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
                                            kvm_vgic_global_state.vcpu_base,
index 469d816..c3b8e13 100644 (file)
@@ -539,7 +539,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
 {
        struct vgic_dist *dist = &kvm->arch.vgic;
        struct kvm_vcpu *vcpu;
-       int ret = 0;
        unsigned long c;
 
        kvm_for_each_vcpu(c, vcpu, kvm) {
@@ -569,12 +568,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
                return -EBUSY;
        }
 
-       ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
-       if (ret) {
-               kvm_err("Unable to register VGICv3 dist MMIO regions\n");
-               return ret;
-       }
-
        if (kvm_vgic_global_state.has_gicv4_1)
                vgic_v4_configure_vsgis(kvm);
 
@@ -616,6 +609,10 @@ static const struct midr_range broken_seis[] = {
        MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
        MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
        MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_PRO),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_PRO),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD_MAX),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE_MAX),
        {},
 };
 
index 3bb0034..c1c28fe 100644 (file)
@@ -184,13 +184,14 @@ static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
        }
 }
 
-/* Must be called with the kvm lock held */
 void vgic_v4_configure_vsgis(struct kvm *kvm)
 {
        struct vgic_dist *dist = &kvm->arch.vgic;
        struct kvm_vcpu *vcpu;
        unsigned long i;
 
+       lockdep_assert_held(&kvm->arch.config_lock);
+
        kvm_arm_halt_guest(kvm);
 
        kvm_for_each_vcpu(i, vcpu, kvm) {
index 08978d0..7fe8ba1 100644 (file)
@@ -47,7 +47,7 @@ static void flush_context(void)
        int cpu;
        u64 vmid;
 
-       bitmap_clear(vmid_map, 0, NUM_USER_VMIDS);
+       bitmap_zero(vmid_map, NUM_USER_VMIDS);
 
        for_each_possible_cpu(cpu) {
                vmid = atomic64_xchg_relaxed(&per_cpu(active_vmids, cpu), 0);
@@ -182,8 +182,7 @@ int __init kvm_arm_vmid_alloc_init(void)
         */
        WARN_ON(NUM_USER_VMIDS - 1 <= num_possible_cpus());
        atomic64_set(&vmid_generation, VMID_FIRST_VERSION);
-       vmid_map = kcalloc(BITS_TO_LONGS(NUM_USER_VMIDS),
-                          sizeof(*vmid_map), GFP_KERNEL);
+       vmid_map = bitmap_zalloc(NUM_USER_VMIDS, GFP_KERNEL);
        if (!vmid_map)
                return -ENOMEM;
 
@@ -192,5 +191,5 @@ int __init kvm_arm_vmid_alloc_init(void)
 
 void __init kvm_arm_vmid_alloc_free(void)
 {
-       kfree(vmid_map);
+       bitmap_free(vmid_map);
 }
index 4aadcfb..a7bb200 100644 (file)
@@ -21,9 +21,10 @@ void copy_highpage(struct page *to, struct page *from)
 
        copy_page(kto, kfrom);
 
+       if (kasan_hw_tags_enabled())
+               page_kasan_tag_reset(to);
+
        if (system_supports_mte() && page_mte_tagged(from)) {
-               if (kasan_hw_tags_enabled())
-                       page_kasan_tag_reset(to);
                /* It's a new page, shouldn't have been tagged yet */
                WARN_ON_ONCE(!try_page_mte_tagging(to));
                mte_copy_page_tags(kto, kfrom);
index 9e0db5c..6045a51 100644 (file)
@@ -480,8 +480,8 @@ static void do_bad_area(unsigned long far, unsigned long esr,
        }
 }
 
-#define VM_FAULT_BADMAP                0x010000
-#define VM_FAULT_BADACCESS     0x020000
+#define VM_FAULT_BADMAP                ((__force vm_fault_t)0x010000)
+#define VM_FAULT_BADACCESS     ((__force vm_fault_t)0x020000)
 
 static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
                                  unsigned int mm_flags, unsigned long vm_flags,
@@ -600,8 +600,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
                vma_end_read(vma);
                goto lock_mmap;
        }
-       fault = handle_mm_fault(vma, addr & PAGE_MASK,
-                               mm_flags | FAULT_FLAG_VMA_LOCK, regs);
+       fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
        vma_end_read(vma);
 
        if (!(fault & VM_FAULT_RETRY)) {
index b9f6908..ba468b5 100644 (file)
@@ -858,11 +858,17 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
 }
 
 static inline void __user *
-get_sigframe(struct ksignal *ksig, size_t frame_size)
+get_sigframe(struct ksignal *ksig, struct pt_regs *tregs, size_t frame_size)
 {
        unsigned long usp = sigsp(rdusp(), ksig);
+       unsigned long gap = 0;
 
-       return (void __user *)((usp - frame_size) & -8UL);
+       if (CPU_IS_020_OR_030 && tregs->format == 0xb) {
+               /* USP is unreliable so use worst-case value */
+               gap = 256;
+       }
+
+       return (void __user *)((usp - gap - frame_size) & -8UL);
 }
 
 static int setup_frame(struct ksignal *ksig, sigset_t *set,
@@ -880,7 +886,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
                return -EFAULT;
        }
 
-       frame = get_sigframe(ksig, sizeof(*frame) + fsize);
+       frame = get_sigframe(ksig, tregs, sizeof(*frame) + fsize);
 
        if (fsize)
                err |= copy_to_user (frame + 1, regs + 1, fsize);
@@ -952,7 +958,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
                return -EFAULT;
        }
 
-       frame = get_sigframe(ksig, sizeof(*frame));
+       frame = get_sigframe(ksig, tregs, sizeof(*frame));
 
        if (fsize)
                err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
index c2f5498..675a866 100644 (file)
@@ -79,6 +79,7 @@ config MIPS
        select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
        select HAVE_MOD_ARCH_SPECIFIC
        select HAVE_NMI
+       select HAVE_PATA_PLATFORM
        select HAVE_PERF_EVENTS
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
index 5ab0430..6a3c890 100644 (file)
@@ -30,6 +30,7 @@
  *
  */
 
+#include <linux/dma-map-ops.h> /* for dma_default_coherent */
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -623,17 +624,18 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
                dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
 
        /*
-        * There is an errata on the Au1200/Au1550 parts that could result
-        * in "stale" data being DMA'ed. It has to do with the snoop logic on
-        * the cache eviction buffer.  DMA_NONCOHERENT is on by default for
-        * these parts. If it is fixed in the future, these dma_cache_inv will
-        * just be nothing more than empty macros. See io.h.
+        * There is an erratum on certain Au1200/Au1550 revisions that could
+        * result in "stale" data being DMA'ed. It has to do with the snoop
+        * logic on the cache eviction buffer.  dma_default_coherent is set
+        * to false on these parts.
         */
-       dma_cache_wback_inv((unsigned long)buf, nbytes);
+       if (!dma_default_coherent)
+               dma_cache_wback_inv(KSEG0ADDR(buf), nbytes);
        dp->dscr_cmd0 |= DSCR_CMD0_V;   /* Let it rip */
        wmb(); /* drain writebuffer */
        dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
        ctp->chan_ptr->ddma_dbell = 0;
+       wmb(); /* force doorbell write out to dma engine */
 
        /* Get next descriptor pointer. */
        ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
@@ -685,17 +687,18 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
                          dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1);
 #endif
        /*
-        * There is an errata on the Au1200/Au1550 parts that could result in
-        * "stale" data being DMA'ed. It has to do with the snoop logic on the
-        * cache eviction buffer.  DMA_NONCOHERENT is on by default for these
-        * parts. If it is fixed in the future, these dma_cache_inv will just
-        * be nothing more than empty macros. See io.h.
+        * There is an erratum on certain Au1200/Au1550 revisions that could
+        * result in "stale" data being DMA'ed. It has to do with the snoop
+        * logic on the cache eviction buffer.  dma_default_coherent is set
+        * to false on these parts.
         */
-       dma_cache_inv((unsigned long)buf, nbytes);
+       if (!dma_default_coherent)
+               dma_cache_inv(KSEG0ADDR(buf), nbytes);
        dp->dscr_cmd0 |= DSCR_CMD0_V;   /* Let it rip */
        wmb(); /* drain writebuffer */
        dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
        ctp->chan_ptr->ddma_dbell = 0;
+       wmb(); /* force doorbell write out to dma engine */
 
        /* Get next descriptor pointer. */
        ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
index 6d15a39..e79adcb 100644 (file)
@@ -1502,6 +1502,10 @@ static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
                        break;
                }
                break;
+       case PRID_IMP_NETLOGIC_AU13XX:
+               c->cputype = CPU_ALCHEMY;
+               __cpu_name[cpu] = "Au1300";
+               break;
        }
 }
 
@@ -1863,6 +1867,7 @@ void cpu_probe(void)
                cpu_probe_mips(c, cpu);
                break;
        case PRID_COMP_ALCHEMY:
+       case PRID_COMP_NETLOGIC:
                cpu_probe_alchemy(c, cpu);
                break;
        case PRID_COMP_SIBYTE:
index febdc55..c0e6513 100644 (file)
@@ -158,10 +158,6 @@ static unsigned long __init init_initrd(void)
                pr_err("initrd start must be page aligned\n");
                goto disable;
        }
-       if (initrd_start < PAGE_OFFSET) {
-               pr_err("initrd start < PAGE_OFFSET\n");
-               goto disable;
-       }
 
        /*
         * Sanitize initrd addresses. For example firmware
@@ -174,6 +170,11 @@ static unsigned long __init init_initrd(void)
        initrd_end = (unsigned long)__va(end);
        initrd_start = (unsigned long)__va(__pa(initrd_start));
 
+       if (initrd_start < PAGE_OFFSET) {
+               pr_err("initrd start < PAGE_OFFSET\n");
+               goto disable;
+       }
+
        ROOT_DEV = Root_RAM0;
        return PFN_UP(end);
 disable:
index 466a255..967bde6 100644 (file)
@@ -130,6 +130,10 @@ config PM
 config STACKTRACE_SUPPORT
        def_bool y
 
+config LOCKDEP_SUPPORT
+       bool
+       default y
+
 config ISA_DMA_API
        bool
 
index f66554c..3a059cb 100644 (file)
@@ -1 +1,12 @@
 # SPDX-License-Identifier: GPL-2.0
+#
+config LIGHTWEIGHT_SPINLOCK_CHECK
+       bool "Enable lightweight spinlock checks"
+       depends on SMP && !DEBUG_SPINLOCK
+       default y
+       help
+         Add checks with low performance impact to the spinlock functions
+         to catch memory overwrites at runtime. For more advanced
+         spinlock debugging you should choose the DEBUG_SPINLOCK option
+         which will detect unitialized spinlocks too.
+         If unsure say Y here.
index 0bdee67..c8b6928 100644 (file)
@@ -48,6 +48,10 @@ void flush_dcache_page(struct page *page);
 
 #define flush_dcache_mmap_lock(mapping)                xa_lock_irq(&mapping->i_pages)
 #define flush_dcache_mmap_unlock(mapping)      xa_unlock_irq(&mapping->i_pages)
+#define flush_dcache_mmap_lock_irqsave(mapping, flags)         \
+               xa_lock_irqsave(&mapping->i_pages, flags)
+#define flush_dcache_mmap_unlock_irqrestore(mapping, flags)    \
+               xa_unlock_irqrestore(&mapping->i_pages, flags)
 
 #define flush_icache_page(vma,page)    do {            \
        flush_kernel_dcache_page_addr(page_address(page)); \
index a6e5d66..edfcb98 100644 (file)
@@ -7,10 +7,26 @@
 #include <asm/processor.h>
 #include <asm/spinlock_types.h>
 
+#define SPINLOCK_BREAK_INSN    0x0000c006      /* break 6,6 */
+
+static inline void arch_spin_val_check(int lock_val)
+{
+       if (IS_ENABLED(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK))
+               asm volatile(   "andcm,= %0,%1,%%r0\n"
+                               ".word %2\n"
+               : : "r" (lock_val), "r" (__ARCH_SPIN_LOCK_UNLOCKED_VAL),
+                       "i" (SPINLOCK_BREAK_INSN));
+}
+
 static inline int arch_spin_is_locked(arch_spinlock_t *x)
 {
-       volatile unsigned int *a = __ldcw_align(x);
-       return READ_ONCE(*a) == 0;
+       volatile unsigned int *a;
+       int lock_val;
+
+       a = __ldcw_align(x);
+       lock_val = READ_ONCE(*a);
+       arch_spin_val_check(lock_val);
+       return (lock_val == 0);
 }
 
 static inline void arch_spin_lock(arch_spinlock_t *x)
@@ -18,9 +34,18 @@ static inline void arch_spin_lock(arch_spinlock_t *x)
        volatile unsigned int *a;
 
        a = __ldcw_align(x);
-       while (__ldcw(a) == 0)
+       do {
+               int lock_val_old;
+
+               lock_val_old = __ldcw(a);
+               arch_spin_val_check(lock_val_old);
+               if (lock_val_old)
+                       return; /* got lock */
+
+               /* wait until we should try to get lock again */
                while (*a == 0)
                        continue;
+       } while (1);
 }
 
 static inline void arch_spin_unlock(arch_spinlock_t *x)
@@ -29,15 +54,19 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
 
        a = __ldcw_align(x);
        /* Release with ordered store. */
-       __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
+       __asm__ __volatile__("stw,ma %0,0(%1)"
+               : : "r"(__ARCH_SPIN_LOCK_UNLOCKED_VAL), "r"(a) : "memory");
 }
 
 static inline int arch_spin_trylock(arch_spinlock_t *x)
 {
        volatile unsigned int *a;
+       int lock_val;
 
        a = __ldcw_align(x);
-       return __ldcw(a) != 0;
+       lock_val = __ldcw(a);
+       arch_spin_val_check(lock_val);
+       return lock_val != 0;
 }
 
 /*
index ca39ee3..d659340 100644 (file)
@@ -2,13 +2,17 @@
 #ifndef __ASM_SPINLOCK_TYPES_H
 #define __ASM_SPINLOCK_TYPES_H
 
+#define __ARCH_SPIN_LOCK_UNLOCKED_VAL  0x1a46
+
 typedef struct {
 #ifdef CONFIG_PA20
        volatile unsigned int slock;
-# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
+# define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED_VAL }
 #else
        volatile unsigned int lock[4];
-# define __ARCH_SPIN_LOCK_UNLOCKED     { { 1, 1, 1, 1 } }
+# define __ARCH_SPIN_LOCK_UNLOCKED     \
+       { { __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL, \
+           __ARCH_SPIN_LOCK_UNLOCKED_VAL, __ARCH_SPIN_LOCK_UNLOCKED_VAL } }
 #endif
 } arch_spinlock_t;
 
index 66f5672..25c4d6c 100644 (file)
@@ -25,7 +25,7 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
 {
        struct alt_instr *entry;
        int index = 0, applied = 0;
-       int num_cpus = num_online_cpus();
+       int num_cpus = num_present_cpus();
        u16 cond_check;
 
        cond_check = ALT_COND_ALWAYS |
index 1d3b8bc..ca4a302 100644 (file)
@@ -399,6 +399,7 @@ void flush_dcache_page(struct page *page)
        unsigned long offset;
        unsigned long addr, old_addr = 0;
        unsigned long count = 0;
+       unsigned long flags;
        pgoff_t pgoff;
 
        if (mapping && !mapping_mapped(mapping)) {
@@ -420,7 +421,7 @@ void flush_dcache_page(struct page *page)
         * to flush one address here for them all to become coherent
         * on machines that support equivalent aliasing
         */
-       flush_dcache_mmap_lock(mapping);
+       flush_dcache_mmap_lock_irqsave(mapping, flags);
        vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
                offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
                addr = mpnt->vm_start + offset;
@@ -460,7 +461,7 @@ void flush_dcache_page(struct page *page)
                }
                WARN_ON(++count == 4096);
        }
-       flush_dcache_mmap_unlock(mapping);
+       flush_dcache_mmap_unlock_irqrestore(mapping, flags);
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
index ba87f79..71ed539 100644 (file)
@@ -446,11 +446,27 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
 void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
                enum dma_data_direction dir)
 {
+       /*
+        * fdc: The data cache line is written back to memory, if and only if
+        * it is dirty, and then invalidated from the data cache.
+        */
        flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
 }
 
 void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
                enum dma_data_direction dir)
 {
-       flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
+       unsigned long addr = (unsigned long) phys_to_virt(paddr);
+
+       switch (dir) {
+       case DMA_TO_DEVICE:
+       case DMA_BIDIRECTIONAL:
+               flush_kernel_dcache_range(addr, size);
+               return;
+       case DMA_FROM_DEVICE:
+               purge_kernel_dcache_range_asm(addr, addr + size);
+               return;
+       default:
+               BUG();
+       }
 }
index 97c6f87..24411ab 100644 (file)
@@ -122,13 +122,18 @@ void machine_power_off(void)
        /* It seems we have no way to power the system off via
         * software. The user has to press the button himself. */
 
-       printk(KERN_EMERG "System shut down completed.\n"
-              "Please power this system off now.");
+       printk("Power off or press RETURN to reboot.\n");
 
        /* prevent soft lockup/stalled CPU messages for endless loop. */
        rcu_sysrq_start();
        lockup_detector_soft_poweroff();
-       for (;;);
+       while (1) {
+               /* reboot if user presses RETURN key */
+               if (pdc_iodc_getc() == 13) {
+                       printk("Rebooting...\n");
+                       machine_restart(NULL);
+               }
+       }
 }
 
 void (*pm_power_off)(void);
index f9696fb..304eebd 100644 (file)
 #include <linux/kgdb.h>
 #include <linux/kprobes.h>
 
+#if defined(CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK)
+#include <asm/spinlock.h>
+#endif
+
 #include "../math-emu/math-emu.h"      /* for handle_fpe() */
 
 static void parisc_show_stack(struct task_struct *task,
@@ -291,24 +295,30 @@ static void handle_break(struct pt_regs *regs)
        }
 
 #ifdef CONFIG_KPROBES
-       if (unlikely(iir == PARISC_KPROBES_BREAK_INSN)) {
+       if (unlikely(iir == PARISC_KPROBES_BREAK_INSN && !user_mode(regs))) {
                parisc_kprobe_break_handler(regs);
                return;
        }
-       if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2)) {
+       if (unlikely(iir == PARISC_KPROBES_BREAK_INSN2 && !user_mode(regs))) {
                parisc_kprobe_ss_handler(regs);
                return;
        }
 #endif
 
 #ifdef CONFIG_KGDB
-       if (unlikely(iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
-               iir == PARISC_KGDB_BREAK_INSN)) {
+       if (unlikely((iir == PARISC_KGDB_COMPILED_BREAK_INSN ||
+               iir == PARISC_KGDB_BREAK_INSN)) && !user_mode(regs)) {
                kgdb_handle_exception(9, SIGTRAP, 0, regs);
                return;
        }
 #endif
 
+#ifdef CONFIG_LIGHTWEIGHT_SPINLOCK_CHECK
+        if ((iir == SPINLOCK_BREAK_INSN) && !user_mode(regs)) {
+               die_if_kernel("Spinlock was trashed", regs, 1);
+       }
+#endif
+
        if (unlikely(iir != GDB_BREAK_INSN))
                parisc_printk_ratelimited(0, regs,
                        KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
index 539d1f0..bff5820 100644 (file)
@@ -906,11 +906,17 @@ config DATA_SHIFT
 
 config ARCH_FORCE_MAX_ORDER
        int "Order of maximal physically contiguous allocations"
+       range 7 8 if PPC64 && PPC_64K_PAGES
        default "8" if PPC64 && PPC_64K_PAGES
+       range 12 12 if PPC64 && !PPC_64K_PAGES
        default "12" if PPC64 && !PPC_64K_PAGES
+       range 8 10 if PPC32 && PPC_16K_PAGES
        default "8" if PPC32 && PPC_16K_PAGES
+       range 6 10 if PPC32 && PPC_64K_PAGES
        default "6" if PPC32 && PPC_64K_PAGES
+       range 4 10 if PPC32 && PPC_256K_PAGES
        default "4" if PPC32 && PPC_256K_PAGES
+       range 10 10
        default "10"
        help
          The kernel page allocator limits the size of maximal physically
index 85cde5b..771b794 100644 (file)
@@ -34,8 +34,6 @@ endif
 
 BOOTCFLAGS    := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
                 -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
-                $(call cc-option,-mno-prefixed) $(call cc-option,-mno-pcrel) \
-                $(call cc-option,-mno-mma) \
                 $(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \
                 -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
                 $(LINUXINCLUDE)
@@ -71,6 +69,10 @@ BOOTAFLAGS   := -D__ASSEMBLY__ $(BOOTCFLAGS) -nostdinc
 
 BOOTARFLAGS    := -crD
 
+BOOTCFLAGS     += $(call cc-option,-mno-prefixed) \
+                  $(call cc-option,-mno-pcrel) \
+                  $(call cc-option,-mno-mma)
+
 ifdef CONFIG_CC_IS_CLANG
 BOOTCFLAGS += $(CLANG_FLAGS)
 BOOTAFLAGS += $(CLANG_FLAGS)
index 7113f93..ad18725 100644 (file)
@@ -96,7 +96,7 @@ config CRYPTO_AES_PPC_SPE
 
 config CRYPTO_AES_GCM_P10
        tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
-       depends on PPC64 && CPU_LITTLE_ENDIAN
+       depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
        select CRYPTO_LIB_AES
        select CRYPTO_ALGAPI
        select CRYPTO_AEAD
index 05c7486..7b4f516 100644 (file)
@@ -22,15 +22,15 @@ sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
 sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
 crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
 crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
-aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp8-ppc.o aesp8-ppc.o
+aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
 
 quiet_cmd_perl = PERL    $@
       cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@
 
-targets += aesp8-ppc.S ghashp8-ppc.S
+targets += aesp10-ppc.S ghashp10-ppc.S
 
-$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
+$(obj)/aesp10-ppc.S $(obj)/ghashp10-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
        $(call if_changed,perl)
 
-OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
-OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y
+OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y
+OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y
index bd3475f..4b6e899 100644 (file)
@@ -30,15 +30,15 @@ MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com");
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS_CRYPTO("aes");
 
-asmlinkage int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
+asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits,
                                      void *key);
-asmlinkage void aes_p8_encrypt(const u8 *in, u8 *out, const void *key);
+asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key);
 asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
                                    void *rkey, u8 *iv, void *Xi);
 asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
                                    void *rkey, u8 *iv, void *Xi);
 asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]);
-asmlinkage void gcm_ghash_p8(unsigned char *Xi, unsigned char *Htable,
+asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
                unsigned char *aad, unsigned int alen);
 
 struct aes_key {
@@ -93,7 +93,7 @@ static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
        gctx->aadLen = alen;
        i = alen & ~0xf;
        if (i) {
-               gcm_ghash_p8(nXi, hash->Htable+32, aad, i);
+               gcm_ghash_p10(nXi, hash->Htable+32, aad, i);
                aad += i;
                alen -= i;
        }
@@ -102,7 +102,7 @@ static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
                        nXi[i] ^= aad[i];
 
                memset(gctx->aad_hash, 0, 16);
-               gcm_ghash_p8(gctx->aad_hash, hash->Htable+32, nXi, 16);
+               gcm_ghash_p10(gctx->aad_hash, hash->Htable+32, nXi, 16);
        } else {
                memcpy(gctx->aad_hash, nXi, 16);
        }
@@ -115,7 +115,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
 {
        __be32 counter = cpu_to_be32(1);
 
-       aes_p8_encrypt(hash->H, hash->H, rdkey);
+       aes_p10_encrypt(hash->H, hash->H, rdkey);
        set_subkey(hash->H);
        gcm_init_htable(hash->Htable+32, hash->H);
 
@@ -126,7 +126,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
        /*
         * Encrypt counter vector as iv tag and increment counter.
         */
-       aes_p8_encrypt(iv, gctx->ivtag, rdkey);
+       aes_p10_encrypt(iv, gctx->ivtag, rdkey);
 
        counter = cpu_to_be32(2);
        *((__be32 *)(iv+12)) = counter;
@@ -160,7 +160,7 @@ static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len)
        /*
         * hash (AAD len and len)
         */
-       gcm_ghash_p8(hash->Htable, hash->Htable+32, aclen, 16);
+       gcm_ghash_p10(hash->Htable, hash->Htable+32, aclen, 16);
 
        for (i = 0; i < 16; i++)
                hash->Htable[i] ^= gctx->ivtag[i];
@@ -192,7 +192,7 @@ static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
        int ret;
 
        vsx_begin();
-       ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
+       ret = aes_p10_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
        vsx_end();
 
        return ret ? -EINVAL : 0;
similarity index 99%
rename from arch/powerpc/crypto/aesp8-ppc.pl
rename to arch/powerpc/crypto/aesp10-ppc.pl
index 1f22aec..2c06ce2 100644 (file)
@@ -110,7 +110,7 @@ die "can't locate ppc-xlate.pl";
 open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
 
 $FRAME=8*$SIZE_T;
-$prefix="aes_p8";
+$prefix="aes_p10";
 
 $sp="r1";
 $vrsave="r12";
similarity index 97%
rename from arch/powerpc/crypto/ghashp8-ppc.pl
rename to arch/powerpc/crypto/ghashp10-ppc.pl
index b56603b..27a6b0b 100644 (file)
@@ -64,7 +64,7 @@ $code=<<___;
 
 .text
 
-.globl .gcm_init_p8
+.globl .gcm_init_p10
        lis             r0,0xfff0
        li              r8,0x10
        mfspr           $vrsave,256
@@ -110,7 +110,7 @@ $code=<<___;
        .long           0
        .byte           0,12,0x14,0,0,0,2,0
        .long           0
-.size  .gcm_init_p8,.-.gcm_init_p8
+.size  .gcm_init_p10,.-.gcm_init_p10
 
 .globl .gcm_init_htable
        lis             r0,0xfff0
@@ -237,7 +237,7 @@ $code=<<___;
        .long           0
 .size  .gcm_init_htable,.-.gcm_init_htable
 
-.globl .gcm_gmult_p8
+.globl .gcm_gmult_p10
        lis             r0,0xfff8
        li              r8,0x10
        mfspr           $vrsave,256
@@ -283,9 +283,9 @@ $code=<<___;
        .long           0
        .byte           0,12,0x14,0,0,0,2,0
        .long           0
-.size  .gcm_gmult_p8,.-.gcm_gmult_p8
+.size  .gcm_gmult_p10,.-.gcm_gmult_p10
 
-.globl .gcm_ghash_p8
+.globl .gcm_ghash_p10
        lis             r0,0xfff8
        li              r8,0x10
        mfspr           $vrsave,256
@@ -350,7 +350,7 @@ Loop:
        .long           0
        .byte           0,12,0x14,0,0,0,4,0
        .long           0
-.size  .gcm_ghash_p8,.-.gcm_ghash_p8
+.size  .gcm_ghash_p10,.-.gcm_ghash_p10
 
 .asciz  "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
 .align  2
index 678b5bd..34e14df 100644 (file)
@@ -205,7 +205,6 @@ extern void iommu_register_group(struct iommu_table_group *table_group,
                                 int pci_domain_number, unsigned long pe_num);
 extern int iommu_add_device(struct iommu_table_group *table_group,
                struct device *dev);
-extern void iommu_del_device(struct device *dev);
 extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
                unsigned long entry, unsigned long *hpa,
                enum dma_data_direction *direction);
@@ -229,10 +228,6 @@ static inline int iommu_add_device(struct iommu_table_group *table_group,
 {
        return 0;
 }
-
-static inline void iommu_del_device(struct device *dev)
-{
-}
 #endif /* !CONFIG_IOMMU_API */
 
 u64 dma_iommu_get_required_mask(struct device *dev);
index 038ce8d..8920862 100644 (file)
@@ -144,7 +144,7 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
 /* We support DMA to/from any memory page via the iommu */
 int dma_iommu_dma_supported(struct device *dev, u64 mask)
 {
-       struct iommu_table *tbl = get_iommu_table_base(dev);
+       struct iommu_table *tbl;
 
        if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
                /*
@@ -162,6 +162,8 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
                return 1;
        }
 
+       tbl = get_iommu_table_base(dev);
+
        if (!tbl) {
                dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask);
                return 0;
index 0089dd4..67f0b01 100644 (file)
@@ -518,7 +518,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
                /* Convert entry to a dma_addr_t */
                entry += tbl->it_offset;
                dma_addr = entry << tbl->it_page_shift;
-               dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
+               dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
 
                DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
                            npages, entry, dma_addr);
@@ -905,6 +905,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
        unsigned int order;
        unsigned int nio_pages, io_order;
        struct page *page;
+       int tcesize = (1 << tbl->it_page_shift);
 
        size = PAGE_ALIGN(size);
        order = get_order(size);
@@ -931,7 +932,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
        memset(ret, 0, size);
 
        /* Set up tces to cover the allocated range */
-       nio_pages = size >> tbl->it_page_shift;
+       nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
+
        io_order = get_iommu_order(size, tbl);
        mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
                              mask >> tbl->it_page_shift, io_order, 0);
@@ -939,7 +941,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
                free_pages((unsigned long)ret, order);
                return NULL;
        }
-       *dma_handle = mapping;
+
+       *dma_handle = mapping | ((u64)ret & (tcesize - 1));
        return ret;
 }
 
@@ -950,7 +953,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
                unsigned int nio_pages;
 
                size = PAGE_ALIGN(size);
-               nio_pages = size >> tbl->it_page_shift;
+               nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
                iommu_free(tbl, dma_handle, nio_pages);
                size = PAGE_ALIGN(size);
                free_pages((unsigned long)vaddr, get_order(size));
@@ -1168,23 +1171,6 @@ int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
 }
 EXPORT_SYMBOL_GPL(iommu_add_device);
 
-void iommu_del_device(struct device *dev)
-{
-       /*
-        * Some devices might not have IOMMU table and group
-        * and we needn't detach them from the associated
-        * IOMMU groups
-        */
-       if (!device_iommu_mapped(dev)) {
-               pr_debug("iommu_tce: skipping device %s with no tbl\n",
-                        dev_name(dev));
-               return;
-       }
-
-       iommu_group_remove_device(dev);
-}
-EXPORT_SYMBOL_GPL(iommu_del_device);
-
 /*
  * A simple iommu_table_group_ops which only allows reusing the existing
  * iommu_table. This handles VFIO for POWER7 or the nested KVM.
index 85bdd7d..48e0eaf 100644 (file)
@@ -93,11 +93,12 @@ static int process_ISA_OF_ranges(struct device_node *isa_node,
        }
 
 inval_range:
-       if (!phb_io_base_phys) {
+       if (phb_io_base_phys) {
                pr_err("no ISA IO ranges or unexpected isa range, mapping 64k\n");
                remap_isa_base(phb_io_base_phys, 0x10000);
+               return 0;
        }
-       return 0;
+       return -EINVAL;
 }
 
 
index 26245aa..2297aa7 100644 (file)
@@ -1040,8 +1040,8 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
                                  pte_t entry, unsigned long address, int psize)
 {
        struct mm_struct *mm = vma->vm_mm;
-       unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
-                                             _PAGE_RW | _PAGE_EXEC);
+       unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY |
+                                             _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
 
        unsigned long change = pte_val(entry) ^ pte_val(*ptep);
        /*
index e93aefc..37043df 100644 (file)
@@ -101,6 +101,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
                bpf_hdr = jit_data->header;
                proglen = jit_data->proglen;
                extra_pass = true;
+               /* During extra pass, ensure index is reset before repopulating extable entries */
+               cgctx.exentry_idx = 0;
                goto skip_init_ctx;
        }
 
index 0d9b760..3e2e252 100644 (file)
@@ -265,6 +265,7 @@ config CPM2
 config FSL_ULI1575
        bool "ULI1575 PCIe south bridge support"
        depends on FSL_SOC_BOOKE || PPC_86xx
+       depends on PCI
        select FSL_PCI
        select GENERIC_ISA_DMA
        help
index 233a50e..7725492 100644 (file)
@@ -865,28 +865,3 @@ void __init pnv_pci_init(void)
        /* Configure IOMMU DMA hooks */
        set_pci_dma_ops(&dma_iommu_ops);
 }
-
-static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
-               unsigned long action, void *data)
-{
-       struct device *dev = data;
-
-       switch (action) {
-       case BUS_NOTIFY_DEL_DEVICE:
-               iommu_del_device(dev);
-               return 0;
-       default:
-               return 0;
-       }
-}
-
-static struct notifier_block pnv_tce_iommu_bus_nb = {
-       .notifier_call = pnv_tce_iommu_bus_notifier,
-};
-
-static int __init pnv_tce_iommu_bus_notifier_init(void)
-{
-       bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb);
-       return 0;
-}
-machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init);
index 7464fa6..d59e8a9 100644 (file)
@@ -91,19 +91,24 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node)
 static void iommu_pseries_free_group(struct iommu_table_group *table_group,
                const char *node_name)
 {
-       struct iommu_table *tbl;
-
        if (!table_group)
                return;
 
-       tbl = table_group->tables[0];
 #ifdef CONFIG_IOMMU_API
        if (table_group->group) {
                iommu_group_put(table_group->group);
                BUG_ON(table_group->group);
        }
 #endif
-       iommu_tce_table_put(tbl);
+
+       /* Default DMA window table is at index 0, while DDW at 1. SR-IOV
+        * adapters only have table on index 1.
+        */
+       if (table_group->tables[0])
+               iommu_tce_table_put(table_group->tables[0]);
+
+       if (table_group->tables[1])
+               iommu_tce_table_put(table_group->tables[1]);
 
        kfree(table_group);
 }
@@ -312,13 +317,22 @@ static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
 static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
 {
        u64 rc;
+       long rpages = npages;
+       unsigned long limit;
 
        if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
                return tce_free_pSeriesLP(tbl->it_index, tcenum,
                                          tbl->it_page_shift, npages);
 
-       rc = plpar_tce_stuff((u64)tbl->it_index,
-                            (u64)tcenum << tbl->it_page_shift, 0, npages);
+       do {
+               limit = min_t(unsigned long, rpages, 512);
+
+               rc = plpar_tce_stuff((u64)tbl->it_index,
+                                    (u64)tcenum << tbl->it_page_shift, 0, limit);
+
+               rpages -= limit;
+               tcenum += limit;
+       } while (rpages > 0 && !rc);
 
        if (rc && printk_ratelimit()) {
                printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
@@ -1695,31 +1709,6 @@ static int __init disable_multitce(char *str)
 
 __setup("multitce=", disable_multitce);
 
-static int tce_iommu_bus_notifier(struct notifier_block *nb,
-               unsigned long action, void *data)
-{
-       struct device *dev = data;
-
-       switch (action) {
-       case BUS_NOTIFY_DEL_DEVICE:
-               iommu_del_device(dev);
-               return 0;
-       default:
-               return 0;
-       }
-}
-
-static struct notifier_block tce_iommu_bus_nb = {
-       .notifier_call = tce_iommu_bus_notifier,
-};
-
-static int __init tce_iommu_bus_notifier_init(void)
-{
-       bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
-       return 0;
-}
-machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);
-
 #ifdef CONFIG_SPAPR_TCE_IOMMU
 struct iommu_group *pSeries_pci_device_group(struct pci_controller *hose,
                                             struct pci_dev *pdev)
index 728d3c2..70c4c59 100644 (file)
@@ -88,7 +88,7 @@ static unsigned long ndump = 64;
 static unsigned long nidump = 16;
 static unsigned long ncsum = 4096;
 static int termch;
-static char tmpstr[128];
+static char tmpstr[KSYM_NAME_LEN];
 static int tracing_enabled;
 
 static long bus_error_jmp[JMP_BUF_LEN];
index 348c0fa..2bb0c38 100644 (file)
@@ -799,8 +799,11 @@ menu "Power management options"
 
 source "kernel/power/Kconfig"
 
+# Hibernation is only possible on systems where the SBI implementation has
+# marked its reserved memory as not accessible from, or does not run
+# from the same memory as, Linux
 config ARCH_HIBERNATION_POSSIBLE
-       def_bool y
+       def_bool NONPORTABLE
 
 config ARCH_HIBERNATION_HEADER
        def_bool HIBERNATION
index a105596..7b2637c 100644 (file)
@@ -1,2 +1,6 @@
+ifdef CONFIG_RELOCATABLE
+KBUILD_CFLAGS += -fno-pie
+endif
+
 obj-$(CONFIG_ERRATA_SIFIVE) += sifive/
 obj-$(CONFIG_ERRATA_THEAD) += thead/
index fe6f230..ce1ebda 100644 (file)
@@ -36,6 +36,9 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
                               unsigned long addr, pte_t *ptep,
                               pte_t pte, int dirty);
 
+#define __HAVE_ARCH_HUGE_PTEP_GET
+pte_t huge_ptep_get(pte_t *ptep);
+
 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
 #define arch_make_huge_pte arch_make_huge_pte
 
index d42c901..665bbc9 100644 (file)
 
 #include <linux/perf_event.h>
 #define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
+
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+       (regs)->epc = (__ip); \
+       (regs)->s0 = (unsigned long) __builtin_frame_address(0); \
+       (regs)->sp = current_stack_pointer; \
+       (regs)->status = SR_PP; \
+}
 #endif /* _ASM_RISCV_PERF_EVENT_H */
index fbdccc2..153864e 100644 (file)
@@ -23,6 +23,10 @@ ifdef CONFIG_FTRACE
 CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE)
 CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE)
 endif
+ifdef CONFIG_RELOCATABLE
+CFLAGS_alternative.o += -fno-pie
+CFLAGS_cpufeature.o += -fno-pie
+endif
 ifdef CONFIG_KASAN
 KASAN_SANITIZE_alternative.o := n
 KASAN_SANITIZE_cpufeature.o := n
index c40139e..8265ff4 100644 (file)
@@ -4,3 +4,5 @@ obj-$(CONFIG_RETHOOK)           += rethook.o rethook_trampoline.o
 obj-$(CONFIG_KPROBES_ON_FTRACE)        += ftrace.o
 obj-$(CONFIG_UPROBES)          += uprobes.o decode-insn.o simulate-insn.o
 CFLAGS_REMOVE_simulate-insn.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_rethook.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_rethook_trampoline.o = $(CC_FLAGS_FTRACE)
index a163a3e..e0ef56d 100644 (file)
@@ -3,6 +3,30 @@
 #include <linux/err.h>
 
 #ifdef CONFIG_RISCV_ISA_SVNAPOT
+pte_t huge_ptep_get(pte_t *ptep)
+{
+       unsigned long pte_num;
+       int i;
+       pte_t orig_pte = ptep_get(ptep);
+
+       if (!pte_present(orig_pte) || !pte_napot(orig_pte))
+               return orig_pte;
+
+       pte_num = napot_pte_num(napot_cont_order(orig_pte));
+
+       for (i = 0; i < pte_num; i++, ptep++) {
+               pte_t pte = ptep_get(ptep);
+
+               if (pte_dirty(pte))
+                       orig_pte = pte_mkdirty(orig_pte);
+
+               if (pte_young(pte))
+                       orig_pte = pte_mkyoung(orig_pte);
+       }
+
+       return orig_pte;
+}
+
 pte_t *huge_pte_alloc(struct mm_struct *mm,
                      struct vm_area_struct *vma,
                      unsigned long addr,
@@ -218,6 +242,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
 {
        pte_t pte = ptep_get(ptep);
        unsigned long order;
+       pte_t orig_pte;
        int i, pte_num;
 
        if (!pte_napot(pte)) {
@@ -228,9 +253,12 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
        order = napot_cont_order(pte);
        pte_num = napot_pte_num(order);
        ptep = huge_pte_offset(mm, addr, napot_cont_size(order));
+       orig_pte = get_clear_contig_flush(mm, addr, ptep, pte_num);
+
+       orig_pte = pte_wrprotect(orig_pte);
 
        for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
-               ptep_set_wrprotect(mm, addr, ptep);
+               set_pte_at(mm, addr, ptep, orig_pte);
 }
 
 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
index 747e5b1..c6bb966 100644 (file)
@@ -922,9 +922,9 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
 static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
                                               uintptr_t dtb_pa)
 {
+#ifndef CONFIG_BUILTIN_DTB
        uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
 
-#ifndef CONFIG_BUILTIN_DTB
        /* Make sure the fdt fixmap address is always aligned on PMD size */
        BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE));
 
index 8983837..6b2a051 100644 (file)
@@ -10,6 +10,7 @@ CFLAGS_REMOVE_ftrace.o                = $(CC_FLAGS_FTRACE)
 
 # Do not trace early setup code
 CFLAGS_REMOVE_early.o          = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_rethook.o                = $(CC_FLAGS_FTRACE)
 
 endif
 
index dee6f66..a461a95 100644 (file)
@@ -16,7 +16,8 @@ mconsole-objs := mconsole_kern.o mconsole_user.o
 hostaudio-objs := hostaudio_kern.o
 ubd-objs := ubd_kern.o ubd_user.o
 port-objs := port_kern.o port_user.o
-harddog-objs := harddog_kern.o harddog_user.o
+harddog-objs := harddog_kern.o
+harddog-builtin-$(CONFIG_UML_WATCHDOG) := harddog_user.o harddog_user_exp.o
 rtc-objs := rtc_kern.o rtc_user.o
 
 LDFLAGS_pcap.o = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libpcap.a)
@@ -60,6 +61,7 @@ obj-$(CONFIG_PTY_CHAN) += pty.o
 obj-$(CONFIG_TTY_CHAN) += tty.o 
 obj-$(CONFIG_XTERM_CHAN) += xterm.o xterm_kern.o
 obj-$(CONFIG_UML_WATCHDOG) += harddog.o
+obj-y += $(harddog-builtin-y) $(harddog-builtin-m)
 obj-$(CONFIG_BLK_DEV_COW_COMMON) += cow_user.o
 obj-$(CONFIG_UML_RANDOM) += random.o
 obj-$(CONFIG_VIRTIO_UML) += virtio_uml.o
diff --git a/arch/um/drivers/harddog.h b/arch/um/drivers/harddog.h
new file mode 100644 (file)
index 0000000..6d9ea60
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef UM_WATCHDOG_H
+#define UM_WATCHDOG_H
+
+int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock);
+void stop_watchdog(int in_fd, int out_fd);
+int ping_watchdog(int fd);
+
+#endif /* UM_WATCHDOG_H */
index e6d4f43..60d1c6c 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/spinlock.h>
 #include <linux/uaccess.h>
 #include "mconsole.h"
+#include "harddog.h"
 
 MODULE_LICENSE("GPL");
 
@@ -60,8 +61,6 @@ static int harddog_out_fd = -1;
  *     Allow only one person to hold it open
  */
 
-extern int start_watchdog(int *in_fd_ret, int *out_fd_ret, char *sock);
-
 static int harddog_open(struct inode *inode, struct file *file)
 {
        int err = -EBUSY;
@@ -92,8 +91,6 @@ err:
        return err;
 }
 
-extern void stop_watchdog(int in_fd, int out_fd);
-
 static int harddog_release(struct inode *inode, struct file *file)
 {
        /*
@@ -112,8 +109,6 @@ static int harddog_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-extern int ping_watchdog(int fd);
-
 static ssize_t harddog_write(struct file *file, const char __user *data, size_t len,
                             loff_t *ppos)
 {
index 070468d..9ed8930 100644 (file)
@@ -7,6 +7,7 @@
 #include <unistd.h>
 #include <errno.h>
 #include <os.h>
+#include "harddog.h"
 
 struct dog_data {
        int stdin_fd;
diff --git a/arch/um/drivers/harddog_user_exp.c b/arch/um/drivers/harddog_user_exp.c
new file mode 100644 (file)
index 0000000..c74d4b8
--- /dev/null
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
+#include "harddog.h"
+
+#if IS_MODULE(CONFIG_UML_WATCHDOG)
+EXPORT_SYMBOL(start_watchdog);
+EXPORT_SYMBOL(stop_watchdog);
+EXPORT_SYMBOL(ping_watchdog);
+#endif
index 7c1abc5..9556dac 100644 (file)
        .octa 0x3F893781E95FE1576CDA64D2BA0CB204
 
 #ifdef CONFIG_AS_GFNI
-.section       .rodata.cst8, "aM", @progbits, 8
-.align 8
 /* AES affine: */
 #define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0)
 .Ltf_aff_bitmatrix:
index 070cc4e..89b9c1c 100644 (file)
@@ -4074,7 +4074,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
        if (x86_pmu.intel_cap.pebs_baseline) {
                arr[(*nr)++] = (struct perf_guest_switch_msr){
                        .msr = MSR_PEBS_DATA_CFG,
-                       .host = cpuc->pebs_data_cfg,
+                       .host = cpuc->active_pebs_data_cfg,
                        .guest = kvm_pmu->pebs_data_cfg,
                };
        }
index fa9b209..d49e90d 100644 (file)
@@ -6150,6 +6150,7 @@ static struct intel_uncore_type spr_uncore_mdf = {
 };
 
 #define UNCORE_SPR_NUM_UNCORE_TYPES            12
+#define UNCORE_SPR_CHA                         0
 #define UNCORE_SPR_IIO                         1
 #define UNCORE_SPR_IMC                         6
 #define UNCORE_SPR_UPI                         8
@@ -6460,12 +6461,22 @@ static int uncore_type_max_boxes(struct intel_uncore_type **types,
        return max + 1;
 }
 
+#define SPR_MSR_UNC_CBO_CONFIG         0x2FFE
+
 void spr_uncore_cpu_init(void)
 {
+       struct intel_uncore_type *type;
+       u64 num_cbo;
+
        uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR,
                                                UNCORE_SPR_MSR_EXTRA_UNCORES,
                                                spr_msr_uncores);
 
+       type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA);
+       if (type) {
+               rdmsrl(SPR_MSR_UNC_CBO_CONFIG, num_cbo);
+               type->num_boxes = num_cbo;
+       }
        spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO);
 }
 
index c2d6cd7..78fcde7 100644 (file)
@@ -39,7 +39,7 @@ extern void fpu_flush_thread(void);
 static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
 {
        if (cpu_feature_enabled(X86_FEATURE_FPU) &&
-           !(current->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+           !(current->flags & (PF_KTHREAD | PF_USER_WORKER))) {
                save_fpregs_to_fpstate(old_fpu);
                /*
                 * The save operation preserved register state, so the
index 498dc60..0d02c4a 100644 (file)
@@ -13,7 +13,9 @@
 
 
 #include <linux/bitops.h>
+#include <linux/bug.h>
 #include <linux/types.h>
+
 #include <uapi/asm/vmx.h>
 #include <asm/vmxfeatures.h>
 
index dd61752..4070a01 100644 (file)
@@ -17,6 +17,7 @@ CFLAGS_REMOVE_ftrace.o = -pg
 CFLAGS_REMOVE_early_printk.o = -pg
 CFLAGS_REMOVE_head64.o = -pg
 CFLAGS_REMOVE_sev.o = -pg
+CFLAGS_REMOVE_rethook.o = -pg
 endif
 
 KASAN_SANITIZE_head$(BITS).o                           := n
index 5e868b6..0270925 100644 (file)
@@ -79,7 +79,7 @@ int detect_extended_topology_early(struct cpuinfo_x86 *c)
         * initial apic id, which also represents 32-bit extended x2apic id.
         */
        c->initial_apicid = edx;
-       smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
+       smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
 #endif
        return 0;
 }
@@ -109,7 +109,8 @@ int detect_extended_topology(struct cpuinfo_x86 *c)
         */
        cpuid_count(leaf, SMT_LEVEL, &eax, &ebx, &ecx, &edx);
        c->initial_apicid = edx;
-       core_level_siblings = smp_num_siblings = LEVEL_MAX_SIBLINGS(ebx);
+       core_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
+       smp_num_siblings = max_t(int, smp_num_siblings, LEVEL_MAX_SIBLINGS(ebx));
        core_plus_mask_width = ht_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
        die_level_siblings = LEVEL_MAX_SIBLINGS(ebx);
        pkg_mask_width = die_plus_mask_width = BITS_SHIFT_NEXT_LEVEL(eax);
index 0bf6779..f18ca44 100644 (file)
@@ -195,7 +195,6 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
        printk("%sCall Trace:\n", log_lvl);
 
        unwind_start(&state, task, regs, stack);
-       stack = stack ? : get_stack_pointer(task, regs);
        regs = unwind_get_entry_regs(&state, &partial);
 
        /*
@@ -214,9 +213,13 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
         * - hardirq stack
         * - entry stack
         */
-       for ( ; stack; stack = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
+       for (stack = stack ?: get_stack_pointer(task, regs);
+            stack;
+            stack = stack_info.next_sp) {
                const char *stack_name;
 
+               stack = PTR_ALIGN(stack, sizeof(long));
+
                if (get_stack_info(stack, task, &stack_info, &visit_mask)) {
                        /*
                         * We weren't on a valid stack.  It's possible that
index 9fcfa5c..af5cbdd 100644 (file)
@@ -57,7 +57,7 @@ static inline void fpregs_restore_userregs(void)
        struct fpu *fpu = &current->thread.fpu;
        int cpu = smp_processor_id();
 
-       if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_IO_WORKER)))
+       if (WARN_ON_ONCE(current->flags & (PF_KTHREAD | PF_USER_WORKER)))
                return;
 
        if (!fpregs_state_valid(fpu, cpu)) {
index caf3348..1015af1 100644 (file)
@@ -426,7 +426,7 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask)
 
        this_cpu_write(in_kernel_fpu, true);
 
-       if (!(current->flags & (PF_KTHREAD | PF_IO_WORKER)) &&
+       if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) &&
            !test_thread_flag(TIF_NEED_FPU_LOAD)) {
                set_thread_flag(TIF_NEED_FPU_LOAD);
                save_fpregs_to_fpstate(&current->thread.fpu);
index 123bf8b..0c9660a 100644 (file)
@@ -253,7 +253,6 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
                                       int nent)
 {
        struct kvm_cpuid_entry2 *best;
-       u64 guest_supported_xcr0 = cpuid_get_supported_xcr0(entries, nent);
 
        best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
        if (best) {
@@ -292,21 +291,6 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
                                           vcpu->arch.ia32_misc_enable_msr &
                                           MSR_IA32_MISC_ENABLE_MWAIT);
        }
-
-       /*
-        * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
-        * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
-        * requested XCR0 value.  The enclave's XFRM must be a subset of XCRO
-        * at the time of EENTER, thus adjust the allowed XFRM by the guest's
-        * supported XCR0.  Similar to XCR0 handling, FP and SSE are forced to
-        * '1' even on CPUs that don't support XSAVE.
-        */
-       best = cpuid_entry2_find(entries, nent, 0x12, 0x1);
-       if (best) {
-               best->ecx &= guest_supported_xcr0 & 0xffffffff;
-               best->edx &= guest_supported_xcr0 >> 32;
-               best->ecx |= XFEATURE_MASK_FPSSE;
-       }
 }
 
 void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
index e542cf2..3c300a1 100644 (file)
@@ -229,6 +229,23 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
        u32 physical_id;
 
        /*
+        * For simplicity, KVM always allocates enough space for all possible
+        * xAPIC IDs.  Yell, but don't kill the VM, as KVM can continue on
+        * without the optimized map.
+        */
+       if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
+               return -EINVAL;
+
+       /*
+        * Bail if a vCPU was added and/or enabled its APIC between allocating
+        * the map and doing the actual calculations for the map.  Note, KVM
+        * hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if
+        * the compiler decides to reload x2apic_id after this check.
+        */
+       if (x2apic_id > new->max_apic_id)
+               return -E2BIG;
+
+       /*
         * Deliberately truncate the vCPU ID when detecting a mismatched APIC
         * ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a
         * 32-bit value.  Any unwanted aliasing due to truncation results will
@@ -253,8 +270,7 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
         */
        if (vcpu->kvm->arch.x2apic_format) {
                /* See also kvm_apic_match_physical_addr(). */
-               if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
-                       x2apic_id <= new->max_apic_id)
+               if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
                        new->phys_map[x2apic_id] = apic;
 
                if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
index c8961f4..6eaa3d6 100644 (file)
@@ -7091,7 +7091,10 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
                 */
                slot = NULL;
                if (atomic_read(&kvm->nr_memslots_dirty_logging)) {
-                       slot = gfn_to_memslot(kvm, sp->gfn);
+                       struct kvm_memslots *slots;
+
+                       slots = kvm_memslots_for_spte_role(kvm, sp->role);
+                       slot = __gfn_to_memslot(slots, sp->gfn);
                        WARN_ON_ONCE(!slot);
                }
 
index ca32389..54089f9 100644 (file)
@@ -3510,7 +3510,7 @@ static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
        if (!is_vnmi_enabled(svm))
                return false;
 
-       return !!(svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK);
+       return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
 }
 
 static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
index 0574030..2261b68 100644 (file)
@@ -170,12 +170,19 @@ static int __handle_encls_ecreate(struct kvm_vcpu *vcpu,
                return 1;
        }
 
-       /* Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM. */
+       /*
+        * Enforce CPUID restrictions on MISCSELECT, ATTRIBUTES and XFRM.  Note
+        * that the allowed XFRM (XFeature Request Mask) isn't strictly bound
+        * by the supported XCR0.  FP+SSE *must* be set in XFRM, even if XSAVE
+        * is unsupported, i.e. even if XCR0 itself is completely unsupported.
+        */
        if ((u32)miscselect & ~sgx_12_0->ebx ||
            (u32)attributes & ~sgx_12_1->eax ||
            (u32)(attributes >> 32) & ~sgx_12_1->ebx ||
            (u32)xfrm & ~sgx_12_1->ecx ||
-           (u32)(xfrm >> 32) & ~sgx_12_1->edx) {
+           (u32)(xfrm >> 32) & ~sgx_12_1->edx ||
+           xfrm & ~(vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE) ||
+           (xfrm & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
                kvm_inject_gp(vcpu, 0);
                return 1;
        }
index ceb7c5e..04b57a3 100644 (file)
@@ -1446,7 +1446,7 @@ static const u32 msrs_to_save_base[] = {
 #endif
        MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
        MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
-       MSR_IA32_SPEC_CTRL,
+       MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL,
        MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
        MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
        MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
@@ -7155,6 +7155,10 @@ static void kvm_probe_msr_to_save(u32 msr_index)
                if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
                        return;
                break;
+       case MSR_IA32_TSX_CTRL:
+               if (!(kvm_get_arch_capabilities() & ARCH_CAP_TSX_CTRL_MSR))
+                       return;
+               break;
        default:
                break;
        }
@@ -10754,6 +10758,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                        exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
                        break;
                }
+
+               /* Note, VM-Exits that go down the "slow" path are accounted below. */
+               ++vcpu->stat.exits;
        }
 
        /*
index 4fc5c2d..01c5de4 100644 (file)
@@ -7,6 +7,8 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/cpufeatures.h>
+#include <asm/alternative.h>
 #include <asm/asm.h>
 #include <asm/export.h>
 
@@ -29,7 +31,7 @@
  */
 SYM_FUNC_START(rep_movs_alternative)
        cmpq $64,%rcx
-       jae .Lunrolled
+       jae .Llarge
 
        cmp $8,%ecx
        jae .Lword
@@ -65,6 +67,12 @@ SYM_FUNC_START(rep_movs_alternative)
        _ASM_EXTABLE_UA( 2b, .Lcopy_user_tail)
        _ASM_EXTABLE_UA( 3b, .Lcopy_user_tail)
 
+.Llarge:
+0:     ALTERNATIVE "jmp .Lunrolled", "rep movsb", X86_FEATURE_ERMS
+1:     RET
+
+        _ASM_EXTABLE_UA( 0b, 1b)
+
        .p2align 4
 .Lunrolled:
 10:    movq (%rsi),%r8
index 3cdac0f..8192452 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/sched/task.h>
 
 #include <asm/set_memory.h>
+#include <asm/cpu_device_id.h>
 #include <asm/e820/api.h>
 #include <asm/init.h>
 #include <asm/page.h>
@@ -261,6 +262,24 @@ static void __init probe_page_size_mask(void)
        }
 }
 
+#define INTEL_MATCH(_model) { .vendor  = X86_VENDOR_INTEL,     \
+                             .family  = 6,                     \
+                             .model = _model,                  \
+                           }
+/*
+ * INVLPG may not properly flush Global entries
+ * on these CPUs when PCIDs are enabled.
+ */
+static const struct x86_cpu_id invlpg_miss_ids[] = {
+       INTEL_MATCH(INTEL_FAM6_ALDERLAKE   ),
+       INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
+       INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ),
+       INTEL_MATCH(INTEL_FAM6_RAPTORLAKE  ),
+       INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
+       INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
+       {}
+};
+
 static void setup_pcid(void)
 {
        if (!IS_ENABLED(CONFIG_X86_64))
@@ -269,6 +288,12 @@ static void setup_pcid(void)
        if (!boot_cpu_has(X86_FEATURE_PCID))
                return;
 
+       if (x86_match_cpu(invlpg_miss_ids)) {
+               pr_info("Incomplete global flushes, disabling PCID");
+               setup_clear_cpu_cap(X86_FEATURE_PCID);
+               return;
+       }
+
        if (boot_cpu_has(X86_FEATURE_PGE)) {
                /*
                 * This can't be cr4_set_bits_and_update_boot() -- the
index 8babce7..014c508 100644 (file)
@@ -198,7 +198,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
                i++;
        }
        kfree(v);
-       return 0;
+       return msi_device_populate_sysfs(&dev->dev);
 
 error:
        if (ret == -ENOSYS)
@@ -254,7 +254,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
                dev_dbg(&dev->dev,
                        "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq);
        }
-       return 0;
+       return msi_device_populate_sysfs(&dev->dev);
 
 error:
        dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n",
@@ -346,7 +346,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
                if (ret < 0)
                        goto out;
        }
-       ret = 0;
+       ret = msi_device_populate_sysfs(&dev->dev);
 out:
        return ret;
 }
@@ -394,6 +394,8 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev)
                        xen_destroy_irq(msidesc->irq + i);
                msidesc->irq = 0;
        }
+
+       msi_device_destroy_sysfs(&dev->dev);
 }
 
 static void xen_pv_teardown_msi_irqs(struct pci_dev *dev)
index 876d5df..5c01d7e 100644 (file)
@@ -343,7 +343,19 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        struct rt_sigframe *frame;
        int err = 0, sig = ksig->sig;
        unsigned long sp, ra, tp, ps;
+       unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
+       unsigned long handler_fdpic_GOT = 0;
        unsigned int base;
+       bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&
+               (current->personality & FDPIC_FUNCPTRS);
+
+       if (fdpic) {
+               unsigned long __user *fdpic_func_desc =
+                       (unsigned long __user *)handler;
+               if (__get_user(handler, &fdpic_func_desc[0]) ||
+                   __get_user(handler_fdpic_GOT, &fdpic_func_desc[1]))
+                       return -EFAULT;
+       }
 
        sp = regs->areg[1];
 
@@ -373,20 +385,26 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
 
        if (ksig->ka.sa.sa_flags & SA_RESTORER) {
-               ra = (unsigned long)ksig->ka.sa.sa_restorer;
+               if (fdpic) {
+                       unsigned long __user *fdpic_func_desc =
+                               (unsigned long __user *)ksig->ka.sa.sa_restorer;
+
+                       err |= __get_user(ra, fdpic_func_desc);
+               } else {
+                       ra = (unsigned long)ksig->ka.sa.sa_restorer;
+               }
        } else {
 
                /* Create sys_rt_sigreturn syscall in stack frame */
 
                err |= gen_return_code(frame->retcode);
-
-               if (err) {
-                       return -EFAULT;
-               }
                ra = (unsigned long) frame->retcode;
        }
 
-       /* 
+       if (err)
+               return -EFAULT;
+
+       /*
         * Create signal handler execution context.
         * Return context not modified until this point.
         */
@@ -394,8 +412,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        /* Set up registers for signal handler; preserve the threadptr */
        tp = regs->threadptr;
        ps = regs->ps;
-       start_thread(regs, (unsigned long) ksig->ka.sa.sa_handler,
-                    (unsigned long) frame);
+       start_thread(regs, handler, (unsigned long)frame);
 
        /* Set up a stack frame for a call4 if userspace uses windowed ABI */
        if (ps & PS_WOE_MASK) {
@@ -413,6 +430,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
        regs->areg[base + 4] = (unsigned long) &frame->uc;
        regs->threadptr = tp;
        regs->ps = ps;
+       if (fdpic)
+               regs->areg[base + 11] = handler_fdpic_GOT;
 
        pr_debug("SIG rt deliver (%s:%d): signal=%d sp=%p pc=%08lx\n",
                 current->comm, current->pid, sig, frame, regs->pc);
index 2a31b1a..17a7ef8 100644 (file)
@@ -56,6 +56,8 @@ EXPORT_SYMBOL(empty_zero_page);
  */
 extern long long __ashrdi3(long long, int);
 extern long long __ashldi3(long long, int);
+extern long long __bswapdi2(long long);
+extern int __bswapsi2(int);
 extern long long __lshrdi3(long long, int);
 extern int __divsi3(int, int);
 extern int __modsi3(int, int);
@@ -66,6 +68,8 @@ extern unsigned long long __umulsidi3(unsigned int, unsigned int);
 
 EXPORT_SYMBOL(__ashldi3);
 EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__bswapdi2);
+EXPORT_SYMBOL(__bswapsi2);
 EXPORT_SYMBOL(__lshrdi3);
 EXPORT_SYMBOL(__divsi3);
 EXPORT_SYMBOL(__modsi3);
index 7ecef05..c9c2614 100644 (file)
@@ -4,7 +4,7 @@
 #
 
 lib-y  += memcopy.o memset.o checksum.o \
-          ashldi3.o ashrdi3.o lshrdi3.o \
+          ashldi3.o ashrdi3.o bswapdi2.o bswapsi2.o lshrdi3.o \
           divsi3.o udivsi3.o modsi3.o umodsi3.o mulsi3.o umulsidi3.o \
           usercopy.o strncpy_user.o strnlen_user.o
 lib-$(CONFIG_PCI) += pci-auto.o
diff --git a/arch/xtensa/lib/bswapdi2.S b/arch/xtensa/lib/bswapdi2.S
new file mode 100644 (file)
index 0000000..d8e52e0
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
+#include <linux/linkage.h>
+#include <asm/asmmacro.h>
+#include <asm/core.h>
+
+ENTRY(__bswapdi2)
+
+       abi_entry_default
+       ssai    8
+       srli    a4, a2, 16
+       src     a4, a4, a2
+       src     a4, a4, a4
+       src     a4, a2, a4
+       srli    a2, a3, 16
+       src     a2, a2, a3
+       src     a2, a2, a2
+       src     a2, a3, a2
+       mov     a3, a4
+       abi_ret_default
+
+ENDPROC(__bswapdi2)
diff --git a/arch/xtensa/lib/bswapsi2.S b/arch/xtensa/lib/bswapsi2.S
new file mode 100644 (file)
index 0000000..9c1de13
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH GCC-exception-2.0 */
+#include <linux/linkage.h>
+#include <asm/asmmacro.h>
+#include <asm/core.h>
+
+ENTRY(__bswapsi2)
+
+       abi_entry_default
+       ssai    8
+       srli    a3, a2, 16
+       src     a3, a3, a2
+       src     a3, a3, a3
+       src     a2, a2, a3
+       abi_ret_default
+
+ENDPROC(__bswapsi2)
index 00c7433..1da77e7 100644 (file)
@@ -520,7 +520,7 @@ static inline int bio_check_eod(struct bio *bio)
        sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
        unsigned int nr_sectors = bio_sectors(bio);
 
-       if (nr_sectors && maxsector &&
+       if (nr_sectors &&
            (nr_sectors > maxsector ||
             bio->bi_iter.bi_sector > maxsector - nr_sectors)) {
                pr_info_ratelimited("%s: attempt to access beyond end of device\n"
index 04c55f1..46eed2e 100644 (file)
@@ -248,7 +248,7 @@ static struct bio *blk_rq_map_bio_alloc(struct request *rq,
 {
        struct bio *bio;
 
-       if (rq->cmd_flags & REQ_ALLOC_CACHE) {
+       if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
                bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
                                        &fs_bio_set);
                if (!bio)
index d6af9d4..dfd81ca 100644 (file)
@@ -39,16 +39,20 @@ void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
 {
        unsigned int users;
 
+       /*
+        * calling test_bit() prior to test_and_set_bit() is intentional,
+        * it avoids dirtying the cacheline if the queue is already active.
+        */
        if (blk_mq_is_shared_tags(hctx->flags)) {
                struct request_queue *q = hctx->queue;
 
-               if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
+               if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
+                   test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
                        return;
-               set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags);
        } else {
-               if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
+               if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
+                   test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
                        return;
-               set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state);
        }
 
        users = atomic_inc_return(&hctx->tags->active_queues);
index 896b465..4dd5905 100644 (file)
@@ -915,6 +915,7 @@ static bool disk_has_partitions(struct gendisk *disk)
 void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
 {
        struct request_queue *q = disk->queue;
+       unsigned int old_model = q->limits.zoned;
 
        switch (model) {
        case BLK_ZONED_HM:
@@ -952,7 +953,7 @@ void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
                 */
                blk_queue_zone_write_granularity(q,
                                                queue_logical_block_size(q));
-       } else {
+       } else if (old_model != BLK_ZONED_NONE) {
                disk_clear_zone_settings(disk);
        }
 }
index e49a486..9ec2a2f 100644 (file)
@@ -730,14 +730,16 @@ void wbt_enable_default(struct gendisk *disk)
 {
        struct request_queue *q = disk->queue;
        struct rq_qos *rqos;
-       bool disable_flag = q->elevator &&
-                   test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags);
+       bool enable = IS_ENABLED(CONFIG_BLK_WBT_MQ);
+
+       if (q->elevator &&
+           test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags))
+               enable = false;
 
        /* Throttling already enabled? */
        rqos = wbt_rq_qos(q);
        if (rqos) {
-               if (!disable_flag &&
-                   RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
+               if (enable && RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
                        RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
                return;
        }
@@ -746,7 +748,7 @@ void wbt_enable_default(struct gendisk *disk)
        if (!blk_queue_registered(q))
                return;
 
-       if (queue_is_mq(q) && !disable_flag)
+       if (queue_is_mq(q) && enable)
                wbt_init(disk);
 }
 EXPORT_SYMBOL_GPL(wbt_enable_default);
index d2e6be4..58d0aeb 100644 (file)
@@ -678,6 +678,16 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
        return error;
 }
 
+static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       struct inode *bd_inode = bdev_file_inode(file);
+
+       if (bdev_read_only(I_BDEV(bd_inode)))
+               return generic_file_readonly_mmap(file, vma);
+
+       return generic_file_mmap(file, vma);
+}
+
 const struct file_operations def_blk_fops = {
        .open           = blkdev_open,
        .release        = blkdev_close,
@@ -685,7 +695,7 @@ const struct file_operations def_blk_fops = {
        .read_iter      = blkdev_read_iter,
        .write_iter     = blkdev_write_iter,
        .iopoll         = iocb_bio_iopoll,
-       .mmap           = generic_file_mmap,
+       .mmap           = blkdev_mmap,
        .fsync          = blkdev_fsync,
        .unlocked_ioctl = blkdev_ioctl,
 #ifdef CONFIG_COMPAT
index eca5671..50c933f 100644 (file)
@@ -380,9 +380,10 @@ int public_key_verify_signature(const struct public_key *pkey,
        struct crypto_wait cwait;
        struct crypto_akcipher *tfm;
        struct akcipher_request *req;
-       struct scatterlist src_sg[2];
+       struct scatterlist src_sg;
        char alg_name[CRYPTO_MAX_ALG_NAME];
-       char *key, *ptr;
+       char *buf, *ptr;
+       size_t buf_len;
        int ret;
 
        pr_devel("==>%s()\n", __func__);
@@ -420,34 +421,37 @@ int public_key_verify_signature(const struct public_key *pkey,
        if (!req)
                goto error_free_tfm;
 
-       key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
-                     GFP_KERNEL);
-       if (!key)
+       buf_len = max_t(size_t, pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
+                       sig->s_size + sig->digest_size);
+
+       buf = kmalloc(buf_len, GFP_KERNEL);
+       if (!buf)
                goto error_free_req;
 
-       memcpy(key, pkey->key, pkey->keylen);
-       ptr = key + pkey->keylen;
+       memcpy(buf, pkey->key, pkey->keylen);
+       ptr = buf + pkey->keylen;
        ptr = pkey_pack_u32(ptr, pkey->algo);
        ptr = pkey_pack_u32(ptr, pkey->paramlen);
        memcpy(ptr, pkey->params, pkey->paramlen);
 
        if (pkey->key_is_private)
-               ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
+               ret = crypto_akcipher_set_priv_key(tfm, buf, pkey->keylen);
        else
-               ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
+               ret = crypto_akcipher_set_pub_key(tfm, buf, pkey->keylen);
        if (ret)
-               goto error_free_key;
+               goto error_free_buf;
 
        if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) {
                ret = cert_sig_digest_update(sig, tfm);
                if (ret)
-                       goto error_free_key;
+                       goto error_free_buf;
        }
 
-       sg_init_table(src_sg, 2);
-       sg_set_buf(&src_sg[0], sig->s, sig->s_size);
-       sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
-       akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
+       memcpy(buf, sig->s, sig->s_size);
+       memcpy(buf + sig->s_size, sig->digest, sig->digest_size);
+
+       sg_init_one(&src_sg, buf, sig->s_size + sig->digest_size);
+       akcipher_request_set_crypt(req, &src_sg, NULL, sig->s_size,
                                   sig->digest_size);
        crypto_init_wait(&cwait);
        akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
@@ -455,8 +459,8 @@ int public_key_verify_signature(const struct public_key *pkey,
                                      crypto_req_done, &cwait);
        ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
 
-error_free_key:
-       kfree(key);
+error_free_buf:
+       kfree(buf);
 error_free_req:
        akcipher_request_free(req);
 error_free_tfm:
index 9bdf168..1a4c4ed 100644 (file)
@@ -7,6 +7,7 @@ config DRM_ACCEL_IVPU
        depends on PCI && PCI_MSI
        select FW_LOADER
        select SHMEM
+       select GENERIC_ALLOCATOR
        help
          Choose this option if you have a system that has an 14th generation Intel CPU
          or newer. VPU stands for Versatile Processing Unit and it's a CPU-integrated
index 382ec12..fef3542 100644 (file)
@@ -197,6 +197,11 @@ static void ivpu_pll_init_frequency_ratios(struct ivpu_device *vdev)
        hw->pll.pn_ratio = clamp_t(u8, fuse_pn_ratio, hw->pll.min_ratio, hw->pll.max_ratio);
 }
 
+static int ivpu_hw_mtl_wait_for_vpuip_bar(struct ivpu_device *vdev)
+{
+       return REGV_POLL_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, AON, 0, 100);
+}
+
 static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
 {
        struct ivpu_hw_info *hw = vdev->hw;
@@ -239,6 +244,12 @@ static int ivpu_pll_drive(struct ivpu_device *vdev, bool enable)
                        ivpu_err(vdev, "Timed out waiting for PLL ready status\n");
                        return ret;
                }
+
+               ret = ivpu_hw_mtl_wait_for_vpuip_bar(vdev);
+               if (ret) {
+                       ivpu_err(vdev, "Timed out waiting for VPUIP bar\n");
+                       return ret;
+               }
        }
 
        return 0;
@@ -256,7 +267,7 @@ static int ivpu_pll_disable(struct ivpu_device *vdev)
 
 static void ivpu_boot_host_ss_rst_clr_assert(struct ivpu_device *vdev)
 {
-       u32 val = REGV_RD32(MTL_VPU_HOST_SS_CPR_RST_CLR);
+       u32 val = 0;
 
        val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
        val = REG_SET_FLD(MTL_VPU_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
@@ -754,9 +765,8 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
 {
        int ret = 0;
 
-       if (ivpu_hw_mtl_reset(vdev)) {
+       if (!ivpu_hw_mtl_is_idle(vdev) && ivpu_hw_mtl_reset(vdev)) {
                ivpu_err(vdev, "Failed to reset the VPU\n");
-               ret = -EIO;
        }
 
        if (ivpu_pll_disable(vdev)) {
@@ -764,8 +774,10 @@ static int ivpu_hw_mtl_power_down(struct ivpu_device *vdev)
                ret = -EIO;
        }
 
-       if (ivpu_hw_mtl_d0i3_enable(vdev))
-               ivpu_warn(vdev, "Failed to enable D0I3\n");
+       if (ivpu_hw_mtl_d0i3_enable(vdev)) {
+               ivpu_err(vdev, "Failed to enter D0I3\n");
+               ret = -EIO;
+       }
 
        return ret;
 }
index d83ccfd..593b8ff 100644 (file)
@@ -91,6 +91,7 @@
 #define MTL_VPU_HOST_SS_CPR_RST_SET_MSS_MAS_MASK                       BIT_MASK(11)
 
 #define MTL_VPU_HOST_SS_CPR_RST_CLR                                    0x00000098u
+#define MTL_VPU_HOST_SS_CPR_RST_CLR_AON_MASK                           BIT_MASK(0)
 #define MTL_VPU_HOST_SS_CPR_RST_CLR_TOP_NOC_MASK                       BIT_MASK(1)
 #define MTL_VPU_HOST_SS_CPR_RST_CLR_DSS_MAS_MASK                       BIT_MASK(10)
 #define MTL_VPU_HOST_SS_CPR_RST_CLR_MSS_MAS_MASK                       BIT_MASK(11)
index 3adcfa8..fa0af59 100644 (file)
@@ -183,9 +183,7 @@ ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct v
        struct ivpu_ipc_info *ipc = vdev->ipc;
        int ret;
 
-       ret = mutex_lock_interruptible(&ipc->lock);
-       if (ret)
-               return ret;
+       mutex_lock(&ipc->lock);
 
        if (!ipc->on) {
                ret = -EAGAIN;
index 3c6f1e1..d45be06 100644 (file)
@@ -431,6 +431,7 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
        struct ivpu_file_priv *file_priv = file->driver_priv;
        struct ivpu_device *vdev = file_priv->vdev;
        struct ww_acquire_ctx acquire_ctx;
+       enum dma_resv_usage usage;
        struct ivpu_bo *bo;
        int ret;
        u32 i;
@@ -461,22 +462,28 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32
 
        job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset;
 
-       ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
+       ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count,
+                                       &acquire_ctx);
        if (ret) {
                ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret);
                return ret;
        }
 
-       ret = dma_resv_reserve_fences(bo->base.resv, 1);
-       if (ret) {
-               ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
-               goto unlock_reservations;
+       for (i = 0; i < buf_count; i++) {
+               ret = dma_resv_reserve_fences(job->bos[i]->base.resv, 1);
+               if (ret) {
+                       ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret);
+                       goto unlock_reservations;
+               }
        }
 
-       dma_resv_add_fence(bo->base.resv, job->done_fence, DMA_RESV_USAGE_WRITE);
+       for (i = 0; i < buf_count; i++) {
+               usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP;
+               dma_resv_add_fence(job->bos[i]->base.resv, job->done_fence, usage);
+       }
 
 unlock_reservations:
-       drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, 1, &acquire_ctx);
+       drm_gem_unlock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx);
 
        wmb(); /* Flush write combining buffers */
 
index 694e978..b8b259b 100644 (file)
@@ -587,16 +587,11 @@ static int ivpu_mmu_strtab_init(struct ivpu_device *vdev)
 int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid)
 {
        struct ivpu_mmu_info *mmu = vdev->mmu;
-       int ret;
-
-       ret = mutex_lock_interruptible(&mmu->lock);
-       if (ret)
-               return ret;
+       int ret = 0;
 
-       if (!mmu->on) {
-               ret = 0;
+       mutex_lock(&mmu->lock);
+       if (!mmu->on)
                goto unlock;
-       }
 
        ret = ivpu_mmu_cmdq_write_tlbi_nh_asid(vdev, ssid);
        if (ret)
@@ -614,7 +609,7 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
        struct ivpu_mmu_cdtab *cdtab = &mmu->cdtab;
        u64 *entry;
        u64 cd[4];
-       int ret;
+       int ret = 0;
 
        if (ssid > IVPU_MMU_CDTAB_ENT_COUNT)
                return -EINVAL;
@@ -655,14 +650,9 @@ static int ivpu_mmu_cd_add(struct ivpu_device *vdev, u32 ssid, u64 cd_dma)
        ivpu_dbg(vdev, MMU, "CDTAB %s entry (SSID=%u, dma=%pad): 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
                 cd_dma ? "write" : "clear", ssid, &cd_dma, cd[0], cd[1], cd[2], cd[3]);
 
-       ret = mutex_lock_interruptible(&mmu->lock);
-       if (ret)
-               return ret;
-
-       if (!mmu->on) {
-               ret = 0;
+       mutex_lock(&mmu->lock);
+       if (!mmu->on)
                goto unlock;
-       }
 
        ret = ivpu_mmu_cmdq_write_cfgi_all(vdev);
        if (ret)
index 9f216eb..5c57f7b 100644 (file)
@@ -997,14 +997,34 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
        struct xfer_queue_elem elem;
        struct wire_msg *out_buf;
        struct wrapper_msg *w;
+       long ret = -EAGAIN;
+       int xfer_count = 0;
        int retry_count;
-       long ret;
 
        if (qdev->in_reset) {
                mutex_unlock(&qdev->cntl_mutex);
                return ERR_PTR(-ENODEV);
        }
 
+       /* Attempt to avoid a partial commit of a message */
+       list_for_each_entry(w, &wrappers->list, list)
+               xfer_count++;
+
+       for (retry_count = 0; retry_count < QAIC_MHI_RETRY_MAX; retry_count++) {
+               if (xfer_count <= mhi_get_free_desc_count(qdev->cntl_ch, DMA_TO_DEVICE)) {
+                       ret = 0;
+                       break;
+               }
+               msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS);
+               if (signal_pending(current))
+                       break;
+       }
+
+       if (ret) {
+               mutex_unlock(&qdev->cntl_mutex);
+               return ERR_PTR(ret);
+       }
+
        elem.seq_num = seq_num;
        elem.buf = NULL;
        init_completion(&elem.xfer_done);
@@ -1038,16 +1058,9 @@ static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u
        list_for_each_entry(w, &wrappers->list, list) {
                kref_get(&w->ref_count);
                retry_count = 0;
-retry:
                ret = mhi_queue_buf(qdev->cntl_ch, DMA_TO_DEVICE, &w->msg, w->len,
                                    list_is_last(&w->list, &wrappers->list) ? MHI_EOT : MHI_CHAIN);
                if (ret) {
-                       if (ret == -EAGAIN && retry_count++ < QAIC_MHI_RETRY_MAX) {
-                               msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS);
-                               if (!signal_pending(current))
-                                       goto retry;
-                       }
-
                        qdev->cntl_lost_buf = true;
                        kref_put(&w->ref_count, free_wrapper);
                        mutex_unlock(&qdev->cntl_mutex);
@@ -1249,7 +1262,7 @@ dma_cont_failed:
 
 int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
-       struct qaic_manage_msg *user_msg;
+       struct qaic_manage_msg *user_msg = data;
        struct qaic_device *qdev;
        struct manage_msg *msg;
        struct qaic_user *usr;
@@ -1258,6 +1271,9 @@ int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
        int usr_rcu_id;
        int ret;
 
+       if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH)
+               return -EINVAL;
+
        usr = file_priv->driver_priv;
 
        usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
@@ -1275,13 +1291,6 @@ int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
                return -ENODEV;
        }
 
-       user_msg = data;
-
-       if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        msg = kzalloc(QAIC_MANAGE_MAX_MSG_LENGTH + sizeof(*msg), GFP_KERNEL);
        if (!msg) {
                ret = -ENOMEM;
index c0a574c..e42c1f9 100644 (file)
@@ -591,7 +591,7 @@ static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struc
        struct qaic_bo *bo = to_qaic_bo(obj);
        unsigned long offset = 0;
        struct scatterlist *sg;
-       int ret;
+       int ret = 0;
 
        if (obj->import_attach)
                return -EINVAL;
@@ -663,6 +663,10 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
        if (args->pad)
                return -EINVAL;
 
+       size = PAGE_ALIGN(args->size);
+       if (size == 0)
+               return -EINVAL;
+
        usr = file_priv->driver_priv;
        usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
        if (!usr->qddev) {
@@ -677,12 +681,6 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
                goto unlock_dev_srcu;
        }
 
-       size = PAGE_ALIGN(args->size);
-       if (size == 0) {
-               ret = -EINVAL;
-               goto unlock_dev_srcu;
-       }
-
        bo = qaic_alloc_init_bo();
        if (IS_ERR(bo)) {
                ret = PTR_ERR(bo);
@@ -926,8 +924,8 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
 {
        struct qaic_attach_slice_entry *slice_ent;
        struct qaic_attach_slice *args = data;
+       int rcu_id, usr_rcu_id, qdev_rcu_id;
        struct dma_bridge_chan  *dbc;
-       int usr_rcu_id, qdev_rcu_id;
        struct drm_gem_object *obj;
        struct qaic_device *qdev;
        unsigned long arg_size;
@@ -936,6 +934,22 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
        struct qaic_bo *bo;
        int ret;
 
+       if (args->hdr.count == 0)
+               return -EINVAL;
+
+       arg_size = args->hdr.count * sizeof(*slice_ent);
+       if (arg_size / args->hdr.count != sizeof(*slice_ent))
+               return -EINVAL;
+
+       if (args->hdr.size == 0)
+               return -EINVAL;
+
+       if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE))
+               return -EINVAL;
+
+       if (args->data == 0)
+               return -EINVAL;
+
        usr = file_priv->driver_priv;
        usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
        if (!usr->qddev) {
@@ -950,43 +964,11 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
                goto unlock_dev_srcu;
        }
 
-       if (args->hdr.count == 0) {
-               ret = -EINVAL;
-               goto unlock_dev_srcu;
-       }
-
-       arg_size = args->hdr.count * sizeof(*slice_ent);
-       if (arg_size / args->hdr.count != sizeof(*slice_ent)) {
-               ret = -EINVAL;
-               goto unlock_dev_srcu;
-       }
-
        if (args->hdr.dbc_id >= qdev->num_dbc) {
                ret = -EINVAL;
                goto unlock_dev_srcu;
        }
 
-       if (args->hdr.size == 0) {
-               ret = -EINVAL;
-               goto unlock_dev_srcu;
-       }
-
-       if (!(args->hdr.dir == DMA_TO_DEVICE  || args->hdr.dir == DMA_FROM_DEVICE)) {
-               ret = -EINVAL;
-               goto unlock_dev_srcu;
-       }
-
-       dbc = &qdev->dbc[args->hdr.dbc_id];
-       if (dbc->usr != usr) {
-               ret = -EINVAL;
-               goto unlock_dev_srcu;
-       }
-
-       if (args->data == 0) {
-               ret = -EINVAL;
-               goto unlock_dev_srcu;
-       }
-
        user_data = u64_to_user_ptr(args->data);
 
        slice_ent = kzalloc(arg_size, GFP_KERNEL);
@@ -1013,9 +995,21 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
 
        bo = to_qaic_bo(obj);
 
+       if (bo->sliced) {
+               ret = -EINVAL;
+               goto put_bo;
+       }
+
+       dbc = &qdev->dbc[args->hdr.dbc_id];
+       rcu_id = srcu_read_lock(&dbc->ch_lock);
+       if (dbc->usr != usr) {
+               ret = -EINVAL;
+               goto unlock_ch_srcu;
+       }
+
        ret = qaic_prepare_bo(qdev, bo, &args->hdr);
        if (ret)
-               goto put_bo;
+               goto unlock_ch_srcu;
 
        ret = qaic_attach_slicing_bo(qdev, bo, &args->hdr, slice_ent);
        if (ret)
@@ -1025,6 +1019,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
                dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir);
 
        bo->dbc = dbc;
+       srcu_read_unlock(&dbc->ch_lock, rcu_id);
        drm_gem_object_put(obj);
        srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
        srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
@@ -1033,6 +1028,8 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
 
 unprepare_bo:
        qaic_unprepare_bo(qdev, bo);
+unlock_ch_srcu:
+       srcu_read_unlock(&dbc->ch_lock, rcu_id);
 put_bo:
        drm_gem_object_put(obj);
 free_slice_ent:
@@ -1316,7 +1313,6 @@ static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct dr
        received_ts = ktime_get_ns();
 
        size = is_partial ? sizeof(*pexec) : sizeof(*exec);
-
        n = (unsigned long)size * args->hdr.count;
        if (args->hdr.count == 0 || n / args->hdr.count != size)
                return -EINVAL;
@@ -1665,6 +1661,9 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
        int rcu_id;
        int ret;
 
+       if (args->pad != 0)
+               return -EINVAL;
+
        usr = file_priv->driver_priv;
        usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
        if (!usr->qddev) {
@@ -1679,11 +1678,6 @@ int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file
                goto unlock_dev_srcu;
        }
 
-       if (args->pad != 0) {
-               ret = -EINVAL;
-               goto unlock_dev_srcu;
-       }
-
        if (args->dbc_id >= qdev->num_dbc) {
                ret = -EINVAL;
                goto unlock_dev_srcu;
@@ -1855,6 +1849,11 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
        dbc->usr = NULL;
        empty_xfer_list(qdev, dbc);
        synchronize_srcu(&dbc->ch_lock);
+       /*
+        * Threads holding channel lock, may add more elements in the xfer_list.
+        * Flush out these elements from xfer_list.
+        */
+       empty_xfer_list(qdev, dbc);
 }
 
 void release_dbc(struct qaic_device *qdev, u32 dbc_id)
index ff80eb5..2d0828d 100644 (file)
@@ -262,8 +262,8 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
 
 static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
 {
+       u16 major = -1, minor = -1;
        struct qaic_device *qdev;
-       u16 major, minor;
        int ret;
 
        /*
index 1d6ef96..67c2c3b 100644 (file)
@@ -7,7 +7,6 @@
 #ifndef APEI_INTERNAL_H
 #define APEI_INTERNAL_H
 
-#include <linux/cper.h>
 #include <linux/acpi.h>
 
 struct apei_exec_context;
@@ -130,10 +129,5 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
                return sizeof(*estatus) + estatus->data_length;
 }
 
-void cper_estatus_print(const char *pfx,
-                       const struct acpi_hest_generic_status *estatus);
-int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
-int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
-
 int apei_osc_setup(void);
 #endif
index c23eb75..7514e38 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
+#include <linux/cper.h>
 #include <linux/io.h>
 
 #include "apei-internal.h"
index e8492b3..0800a9d 100644 (file)
@@ -516,6 +516,17 @@ static const struct dmi_system_id maingear_laptop[] = {
        { }
 };
 
+static const struct dmi_system_id lg_laptop[] = {
+       {
+               .ident = "LG Electronics 17U70P",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
+                       DMI_MATCH(DMI_BOARD_NAME, "17U70P"),
+               },
+       },
+       { }
+};
+
 struct irq_override_cmp {
        const struct dmi_system_id *system;
        unsigned char irq;
@@ -532,6 +543,7 @@ static const struct irq_override_cmp override_table[] = {
        { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
        { tongfang_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
        { maingear_laptop, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
+       { lg_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
 };
 
 static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
index fb56bfc..8fb7672 100644 (file)
@@ -1934,24 +1934,23 @@ static void binder_deferred_fd_close(int fd)
 static void binder_transaction_buffer_release(struct binder_proc *proc,
                                              struct binder_thread *thread,
                                              struct binder_buffer *buffer,
-                                             binder_size_t failed_at,
+                                             binder_size_t off_end_offset,
                                              bool is_failure)
 {
        int debug_id = buffer->debug_id;
-       binder_size_t off_start_offset, buffer_offset, off_end_offset;
+       binder_size_t off_start_offset, buffer_offset;
 
        binder_debug(BINDER_DEBUG_TRANSACTION,
                     "%d buffer release %d, size %zd-%zd, failed at %llx\n",
                     proc->pid, buffer->debug_id,
                     buffer->data_size, buffer->offsets_size,
-                    (unsigned long long)failed_at);
+                    (unsigned long long)off_end_offset);
 
        if (buffer->target_node)
                binder_dec_node(buffer->target_node, 1, 0);
 
        off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
-       off_end_offset = is_failure && failed_at ? failed_at :
-                               off_start_offset + buffer->offsets_size;
+
        for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
             buffer_offset += sizeof(binder_size_t)) {
                struct binder_object_header *hdr;
@@ -2111,6 +2110,21 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
        }
 }
 
+/* Clean up all the objects in the buffer */
+static inline void binder_release_entire_buffer(struct binder_proc *proc,
+                                               struct binder_thread *thread,
+                                               struct binder_buffer *buffer,
+                                               bool is_failure)
+{
+       binder_size_t off_end_offset;
+
+       off_end_offset = ALIGN(buffer->data_size, sizeof(void *));
+       off_end_offset += buffer->offsets_size;
+
+       binder_transaction_buffer_release(proc, thread, buffer,
+                                         off_end_offset, is_failure);
+}
+
 static int binder_translate_binder(struct flat_binder_object *fp,
                                   struct binder_transaction *t,
                                   struct binder_thread *thread)
@@ -2806,7 +2820,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
                t_outdated->buffer = NULL;
                buffer->transaction = NULL;
                trace_binder_transaction_update_buffer_release(buffer);
-               binder_transaction_buffer_release(proc, NULL, buffer, 0, 0);
+               binder_release_entire_buffer(proc, NULL, buffer, false);
                binder_alloc_free_buf(&proc->alloc, buffer);
                kfree(t_outdated);
                binder_stats_deleted(BINDER_STAT_TRANSACTION);
@@ -3775,7 +3789,7 @@ binder_free_buf(struct binder_proc *proc,
                binder_node_inner_unlock(buf_node);
        }
        trace_binder_transaction_buffer_release(buffer);
-       binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
+       binder_release_entire_buffer(proc, thread, buffer, is_failure);
        binder_alloc_free_buf(&proc->alloc, buffer);
 }
 
index 55a3c3c..662a2a2 100644 (file)
@@ -212,8 +212,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
                mm = alloc->mm;
 
        if (mm) {
-               mmap_read_lock(mm);
-               vma = vma_lookup(mm, alloc->vma_addr);
+               mmap_write_lock(mm);
+               vma = alloc->vma;
        }
 
        if (!vma && need_mm) {
@@ -270,7 +270,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
                trace_binder_alloc_page_end(alloc, index);
        }
        if (mm) {
-               mmap_read_unlock(mm);
+               mmap_write_unlock(mm);
                mmput(mm);
        }
        return 0;
@@ -303,21 +303,24 @@ err_page_ptr_cleared:
        }
 err_no_vma:
        if (mm) {
-               mmap_read_unlock(mm);
+               mmap_write_unlock(mm);
                mmput(mm);
        }
        return vma ? -ENOMEM : -ESRCH;
 }
 
+static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
+               struct vm_area_struct *vma)
+{
+       /* pairs with smp_load_acquire in binder_alloc_get_vma() */
+       smp_store_release(&alloc->vma, vma);
+}
+
 static inline struct vm_area_struct *binder_alloc_get_vma(
                struct binder_alloc *alloc)
 {
-       struct vm_area_struct *vma = NULL;
-
-       if (alloc->vma_addr)
-               vma = vma_lookup(alloc->mm, alloc->vma_addr);
-
-       return vma;
+       /* pairs with smp_store_release in binder_alloc_set_vma() */
+       return smp_load_acquire(&alloc->vma);
 }
 
 static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
@@ -380,15 +383,13 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
        size_t size, data_offsets_size;
        int ret;
 
-       mmap_read_lock(alloc->mm);
+       /* Check binder_alloc is fully initialized */
        if (!binder_alloc_get_vma(alloc)) {
-               mmap_read_unlock(alloc->mm);
                binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
                                   "%d: binder_alloc_buf, no vma\n",
                                   alloc->pid);
                return ERR_PTR(-ESRCH);
        }
-       mmap_read_unlock(alloc->mm);
 
        data_offsets_size = ALIGN(data_size, sizeof(void *)) +
                ALIGN(offsets_size, sizeof(void *));
@@ -778,7 +779,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
        buffer->free = 1;
        binder_insert_free_buffer(alloc, buffer);
        alloc->free_async_space = alloc->buffer_size / 2;
-       alloc->vma_addr = vma->vm_start;
+
+       /* Signal binder_alloc is fully initialized */
+       binder_alloc_set_vma(alloc, vma);
 
        return 0;
 
@@ -808,8 +811,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
 
        buffers = 0;
        mutex_lock(&alloc->mutex);
-       BUG_ON(alloc->vma_addr &&
-              vma_lookup(alloc->mm, alloc->vma_addr));
+       BUG_ON(alloc->vma);
 
        while ((n = rb_first(&alloc->allocated_buffers))) {
                buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -916,25 +918,17 @@ void binder_alloc_print_pages(struct seq_file *m,
         * Make sure the binder_alloc is fully initialized, otherwise we might
         * read inconsistent state.
         */
-
-       mmap_read_lock(alloc->mm);
-       if (binder_alloc_get_vma(alloc) == NULL) {
-               mmap_read_unlock(alloc->mm);
-               goto uninitialized;
-       }
-
-       mmap_read_unlock(alloc->mm);
-       for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
-               page = &alloc->pages[i];
-               if (!page->page_ptr)
-                       free++;
-               else if (list_empty(&page->lru))
-                       active++;
-               else
-                       lru++;
+       if (binder_alloc_get_vma(alloc) != NULL) {
+               for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+                       page = &alloc->pages[i];
+                       if (!page->page_ptr)
+                               free++;
+                       else if (list_empty(&page->lru))
+                               active++;
+                       else
+                               lru++;
+               }
        }
-
-uninitialized:
        mutex_unlock(&alloc->mutex);
        seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
        seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
@@ -969,7 +963,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  */
 void binder_alloc_vma_close(struct binder_alloc *alloc)
 {
-       alloc->vma_addr = 0;
+       binder_alloc_set_vma(alloc, NULL);
 }
 
 /**
index 0f811ac..138d1d5 100644 (file)
@@ -75,7 +75,7 @@ struct binder_lru_page {
 /**
  * struct binder_alloc - per-binder proc state for binder allocator
  * @mutex:              protects binder_alloc fields
- * @vma_addr:           vm_area_struct->vm_start passed to mmap_handler
+ * @vma:                vm_area_struct passed to mmap_handler
  *                      (invariant after mmap)
  * @mm:                 copy of task->mm (invariant after open)
  * @buffer:             base of per-proc address space mapped via mmap
@@ -99,7 +99,7 @@ struct binder_lru_page {
  */
 struct binder_alloc {
        struct mutex mutex;
-       unsigned long vma_addr;
+       struct vm_area_struct *vma;
        struct mm_struct *mm;
        void __user *buffer;
        struct list_head buffers;
index 43a8810..c2b323b 100644 (file)
@@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
        if (!binder_selftest_run)
                return;
        mutex_lock(&binder_selftest_lock);
-       if (!binder_selftest_run || !alloc->vma_addr)
+       if (!binder_selftest_run || !alloc->vma)
                goto done;
        pr_info("STARTED\n");
        binder_selftest_alloc_offset(alloc, end_offset, 0);
index 7bb12de..8ce9028 100644 (file)
@@ -2694,18 +2694,36 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
        return 0;
 }
 
-static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
+static struct ata_device *ata_find_dev(struct ata_port *ap, unsigned int devno)
 {
-       if (!sata_pmp_attached(ap)) {
-               if (likely(devno >= 0 &&
-                          devno < ata_link_max_devices(&ap->link)))
+       /*
+        * For the non-PMP case, ata_link_max_devices() returns 1 (SATA case),
+        * or 2 (IDE master + slave case). However, the former case includes
+        * libsas hosted devices which are numbered per scsi host, leading
+        * to devno potentially being larger than 0 but with each struct
+        * ata_device having its own struct ata_port and struct ata_link.
+        * To accommodate these, ignore devno and always use device number 0.
+        */
+       if (likely(!sata_pmp_attached(ap))) {
+               int link_max_devices = ata_link_max_devices(&ap->link);
+
+               if (link_max_devices == 1)
+                       return &ap->link.device[0];
+
+               if (devno < link_max_devices)
                        return &ap->link.device[devno];
-       } else {
-               if (likely(devno >= 0 &&
-                          devno < ap->nr_pmp_links))
-                       return &ap->pmp_link[devno].device[0];
+
+               return NULL;
        }
 
+       /*
+        * For PMP-attached devices, the device number corresponds to C
+        * (channel) of SCSI [H:C:I:L], indicating the port pmp link
+        * for the device.
+        */
+       if (devno < ap->nr_pmp_links)
+               return &ap->pmp_link[devno].device[0];
+
        return NULL;
 }
 
index bba3482..cbae8be 100644 (file)
@@ -388,6 +388,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
                                continue;/* skip if itself or no cacheinfo */
                        for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
                                sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
+
+                               /*
+                                * Comparing cache IDs only makes sense if the leaves
+                                * belong to the same cache level of same type. Skip
+                                * the check if level and type do not match.
+                                */
+                               if (sib_leaf->level != this_leaf->level ||
+                                   sib_leaf->type != this_leaf->type)
+                                       continue;
+
                                if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
                                        cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
                                        cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
@@ -400,11 +410,14 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
                        coherency_max_size = this_leaf->coherency_line_size;
        }
 
+       /* shared_cpu_map is now populated for the cpu */
+       this_cpu_ci->cpu_map_populated = true;
        return 0;
 }
 
 static void cache_shared_cpu_map_remove(unsigned int cpu)
 {
+       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
        struct cacheinfo *this_leaf, *sib_leaf;
        unsigned int sibling, index, sib_index;
 
@@ -419,6 +432,16 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
 
                        for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
                                sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
+
+                               /*
+                                * Comparing cache IDs only makes sense if the leaves
+                                * belong to the same cache level of same type. Skip
+                                * the check if level and type do not match.
+                                */
+                               if (sib_leaf->level != this_leaf->level ||
+                                   sib_leaf->type != this_leaf->type)
+                                       continue;
+
                                if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
                                        cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
                                        cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
@@ -427,6 +450,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
                        }
                }
        }
+
+       /* cpu is no longer populated in the shared map */
+       this_cpu_ci->cpu_map_populated = false;
 }
 
 static void free_cache_attributes(unsigned int cpu)
index ac1808d..05d9df9 100644 (file)
@@ -320,6 +320,7 @@ void class_dev_iter_init(struct class_dev_iter *iter, const struct class *class,
                start_knode = &start->p->knode_class;
        klist_iter_init_node(&sp->klist_devices, &iter->ki, start_knode);
        iter->type = type;
+       iter->sp = sp;
 }
 EXPORT_SYMBOL_GPL(class_dev_iter_init);
 
@@ -361,6 +362,7 @@ EXPORT_SYMBOL_GPL(class_dev_iter_next);
 void class_dev_iter_exit(struct class_dev_iter *iter)
 {
        klist_iter_exit(&iter->ki);
+       subsys_put(iter->sp);
 }
 EXPORT_SYMBOL_GPL(class_dev_iter_exit);
 
index 9d79d5a..b58c42f 100644 (file)
@@ -812,7 +812,7 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, st
        char *outbuf;
 
        alg = crypto_alloc_shash("sha256", 0, 0);
-       if (!alg)
+       if (IS_ERR(alg))
                return;
 
        sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
index 33a8366..0db2021 100644 (file)
@@ -4,16 +4,23 @@
 # subsystems should select the appropriate symbols.
 
 config REGMAP
+       bool "Register Map support" if KUNIT_ALL_TESTS
        default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SOUNDWIRE_MBQ || REGMAP_SCCB || REGMAP_I3C || REGMAP_SPI_AVMM || REGMAP_MDIO || REGMAP_FSI)
        select IRQ_DOMAIN if REGMAP_IRQ
        select MDIO_BUS if REGMAP_MDIO
-       bool
+       help
+         Enable support for the Register Map (regmap) access API.
+
+         Usually, this option is automatically selected when needed.
+         However, you may want to enable it manually for running the regmap
+         KUnit tests.
+
+         If unsure, say N.
 
 config REGMAP_KUNIT
        tristate "KUnit tests for regmap"
-       depends on KUNIT
+       depends on KUNIT && REGMAP
        default KUNIT_ALL_TESTS
-       select REGMAP
        select REGMAP_RAM
 
 config REGMAP_AC97
index 9b1b559..c2e3a0f 100644 (file)
@@ -203,15 +203,18 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min,
 
        mas_for_each(&mas, entry, max) {
                for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
+                       mas_pause(&mas);
+                       rcu_read_unlock();
                        ret = regcache_sync_val(map, r, entry[r - mas.index]);
                        if (ret != 0)
                                goto out;
+                       rcu_read_lock();
                }
        }
 
-out:
        rcu_read_unlock();
 
+out:
        map->cache_bypass = false;
 
        return ret;
index 09899ae..159c0b7 100644 (file)
@@ -59,6 +59,10 @@ static int regmap_sdw_config_check(const struct regmap_config *config)
        if (config->pad_bits != 0)
                return -ENOTSUPP;
 
+       /* Only bulk writes are supported not multi-register writes */
+       if (config->can_multi_write)
+               return -ENOTSUPP;
+
        return 0;
 }
 
index db7851f..fa2d3fb 100644 (file)
@@ -2082,6 +2082,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
        size_t val_count = val_len / val_bytes;
        size_t chunk_count, chunk_bytes;
        size_t chunk_regs = val_count;
+       size_t max_data = map->max_raw_write - map->format.reg_bytes -
+                       map->format.pad_bytes;
        int ret, i;
 
        if (!val_count)
@@ -2089,8 +2091,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
 
        if (map->use_single_write)
                chunk_regs = 1;
-       else if (map->max_raw_write && val_len > map->max_raw_write)
-               chunk_regs = map->max_raw_write / val_bytes;
+       else if (map->max_raw_write && val_len > max_data)
+               chunk_regs = max_data / val_bytes;
 
        chunk_count = val_count / chunk_regs;
        chunk_bytes = chunk_regs * val_bytes;
index c7ed5d6..33d3298 100644 (file)
@@ -1120,6 +1120,11 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq)
        return ubq->nr_io_ready == ubq->q_depth;
 }
 
+static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+{
+       io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+}
+
 static void ublk_cancel_queue(struct ublk_queue *ubq)
 {
        int i;
@@ -1131,8 +1136,8 @@ static void ublk_cancel_queue(struct ublk_queue *ubq)
                struct ublk_io *io = &ubq->ios[i];
 
                if (io->flags & UBLK_IO_FLAG_ACTIVE)
-                       io_uring_cmd_done(io->cmd, UBLK_IO_RES_ABORT, 0,
-                                               IO_URING_F_UNLOCKED);
+                       io_uring_cmd_complete_in_task(io->cmd,
+                                                     ublk_cmd_cancel_cb);
        }
 
        /* all io commands are canceled */
index 23ed258..c1890c8 100644 (file)
@@ -780,7 +780,8 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
                ring_req->u.rw.handle = info->handle;
                ring_req->operation = rq_data_dir(req) ?
                        BLKIF_OP_WRITE : BLKIF_OP_READ;
-               if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
+               if (req_op(req) == REQ_OP_FLUSH ||
+                   (req_op(req) == REQ_OP_WRITE && (req->cmd_flags & REQ_FUA))) {
                        /*
                         * Ideally we can do an unordered flush-to-disk.
                         * In case the backend onlysupports barriers, use that.
index 3a34d7c..52ef446 100644 (file)
@@ -1319,17 +1319,17 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
        hci_free_dev(hdev);
 }
 
-static struct btnxpuart_data w8987_data = {
+static struct btnxpuart_data w8987_data __maybe_unused = {
        .helper_fw_name = NULL,
        .fw_name = FIRMWARE_W8987,
 };
 
-static struct btnxpuart_data w8997_data = {
+static struct btnxpuart_data w8997_data __maybe_unused = {
        .helper_fw_name = FIRMWARE_HELPER,
        .fw_name = FIRMWARE_W8997,
 };
 
-static const struct of_device_id nxpuart_of_match_table[] = {
+static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = {
        { .compatible = "nxp,88w8987-bt", .data = &w8987_data },
        { .compatible = "nxp,88w8997-bt", .data = &w8997_data },
        { }
index 1b06450..e30c979 100644 (file)
@@ -78,7 +78,8 @@ enum qca_flags {
        QCA_HW_ERROR_EVENT,
        QCA_SSR_TRIGGERED,
        QCA_BT_OFF,
-       QCA_ROM_FW
+       QCA_ROM_FW,
+       QCA_DEBUGFS_CREATED,
 };
 
 enum qca_capabilities {
@@ -635,6 +636,9 @@ static void qca_debugfs_init(struct hci_dev *hdev)
        if (!hdev->debugfs)
                return;
 
+       if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags))
+               return;
+
        ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
 
        /* read only */
index d68d05d..514f9f2 100644 (file)
@@ -90,6 +90,9 @@ parisc_agp_tlbflush(struct agp_memory *mem)
 {
        struct _parisc_agp_info *info = &parisc_agp_info;
 
+       /* force fdc ops to be visible to IOMMU */
+       asm_io_sync();
+
        writeq(info->gart_base | ilog2(info->gart_size), info->ioc_regs+IOC_PCOM);
        readq(info->ioc_regs+IOC_PCOM); /* flush */
 }
@@ -158,6 +161,7 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
                        info->gatt[j] =
                                parisc_agp_mask_memory(agp_bridge,
                                        paddr, type);
+                       asm_io_fdc(&info->gatt[j]);
                }
        }
 
@@ -191,7 +195,16 @@ static unsigned long
 parisc_agp_mask_memory(struct agp_bridge_data *bridge, dma_addr_t addr,
                       int type)
 {
-       return SBA_PDIR_VALID_BIT | addr;
+       unsigned ci;                    /* coherent index */
+       dma_addr_t pa;
+
+       pa = addr & IOVP_MASK;
+       asm("lci 0(%1), %0" : "=r" (ci) : "r" (phys_to_virt(pa)));
+
+       pa |= (ci >> PAGE_SHIFT) & 0xff;/* move CI (8 bits) into lowest byte */
+       pa |= SBA_PDIR_VALID_BIT;       /* set "valid" bit */
+
+       return cpu_to_le64(pa);
 }
 
 static void
index c10a4aa..cd48033 100644 (file)
@@ -571,6 +571,10 @@ static int tpm_hwrng_read(struct hwrng *rng, void *data, size_t max, bool wait)
 {
        struct tpm_chip *chip = container_of(rng, struct tpm_chip, hwrng);
 
+       /* Give back zero bytes, as TPM chip has not yet fully resumed: */
+       if (chip->flags & TPM_CHIP_FLAG_SUSPENDED)
+               return 0;
+
        return tpm_get_random(chip, data, max);
 }
 
index 4463d00..586ca10 100644 (file)
@@ -412,6 +412,8 @@ int tpm_pm_suspend(struct device *dev)
        }
 
 suspended:
+       chip->flags |= TPM_CHIP_FLAG_SUSPENDED;
+
        if (rc)
                dev_err(dev, "Ignoring error %d while suspending\n", rc);
        return 0;
@@ -429,6 +431,14 @@ int tpm_pm_resume(struct device *dev)
        if (chip == NULL)
                return -ENODEV;
 
+       chip->flags &= ~TPM_CHIP_FLAG_SUSPENDED;
+
+       /*
+        * Guarantee that SUSPENDED is written last, so that hwrng does not
+        * activate before the chip has been fully resumed.
+        */
+       wmb();
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(tpm_pm_resume);
index 7af3898..7db3593 100644 (file)
@@ -122,6 +122,29 @@ static const struct dmi_system_id tpm_tis_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"),
                },
        },
+       {
+               .callback = tpm_tis_disable_irq,
+               .ident = "ThinkStation P360 Tiny",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkStation P360 Tiny"),
+               },
+       },
+       {
+               .callback = tpm_tis_disable_irq,
+               .ident = "ThinkPad L490",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L490"),
+               },
+       },
+       {
+               .callback = tpm_tis_disable_irq,
+               .ident = "UPX-TGL",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "AAEON"),
+               },
+       },
        {}
 };
 
index 02945d5..558144f 100644 (file)
@@ -1209,25 +1209,20 @@ static void tpm_tis_reenable_interrupts(struct tpm_chip *chip)
        u32 intmask;
        int rc;
 
-       if (chip->ops->clk_enable != NULL)
-               chip->ops->clk_enable(chip, true);
-
-       /* reenable interrupts that device may have lost or
-        * BIOS/firmware may have disabled
+       /*
+        * Re-enable interrupts that device may have lost or BIOS/firmware may
+        * have disabled.
         */
        rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), priv->irq);
-       if (rc < 0)
-               goto out;
+       if (rc < 0) {
+               dev_err(&chip->dev, "Setting IRQ failed.\n");
+               return;
+       }
 
        intmask = priv->int_mask | TPM_GLOBAL_INT_ENABLE;
-
-       tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
-
-out:
-       if (chip->ops->clk_enable != NULL)
-               chip->ops->clk_enable(chip, false);
-
-       return;
+       rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
+       if (rc < 0)
+               dev_err(&chip->dev, "Enabling interrupts failed.\n");
 }
 
 int tpm_tis_resume(struct device *dev)
@@ -1235,27 +1230,27 @@ int tpm_tis_resume(struct device *dev)
        struct tpm_chip *chip = dev_get_drvdata(dev);
        int ret;
 
-       ret = tpm_tis_request_locality(chip, 0);
-       if (ret < 0)
+       ret = tpm_chip_start(chip);
+       if (ret)
                return ret;
 
        if (chip->flags & TPM_CHIP_FLAG_IRQ)
                tpm_tis_reenable_interrupts(chip);
 
-       ret = tpm_pm_resume(dev);
-       if (ret)
-               goto out;
-
        /*
         * TPM 1.2 requires self-test on resume. This function actually returns
         * an error code but for unknown reason it isn't handled.
         */
        if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
                tpm1_do_selftest(chip);
-out:
-       tpm_tis_relinquish_locality(chip, 0);
 
-       return ret;
+       tpm_chip_stop(chip);
+
+       ret = tpm_pm_resume(dev);
+       if (ret)
+               return ret;
+
+       return 0;
 }
 EXPORT_SYMBOL_GPL(tpm_tis_resume);
 #endif
index e978f45..610bfad 100644 (file)
@@ -84,10 +84,10 @@ enum tis_defaults {
 #define ILB_REMAP_SIZE                 0x100
 
 enum tpm_tis_flags {
-       TPM_TIS_ITPM_WORKAROUND         = BIT(0),
-       TPM_TIS_INVALID_STATUS          = BIT(1),
-       TPM_TIS_DEFAULT_CANCELLATION    = BIT(2),
-       TPM_TIS_IRQ_TESTED              = BIT(3),
+       TPM_TIS_ITPM_WORKAROUND         = 0,
+       TPM_TIS_INVALID_STATUS          = 1,
+       TPM_TIS_DEFAULT_CANCELLATION    = 2,
+       TPM_TIS_IRQ_TESTED              = 3,
 };
 
 struct tpm_tis_data {
index 2990439..b2f05d2 100644 (file)
@@ -975,7 +975,7 @@ static int __init acpi_cpufreq_probe(struct platform_device *pdev)
 
        /* don't keep reloading if cpufreq_driver exists */
        if (cpufreq_get_current_driver())
-               return -EEXIST;
+               return -ENODEV;
 
        pr_debug("%s\n", __func__);
 
index 5a3d4aa..ddd346a 100644 (file)
@@ -444,9 +444,8 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
        return 0;
 }
 
-static int amd_pstate_target(struct cpufreq_policy *policy,
-                            unsigned int target_freq,
-                            unsigned int relation)
+static int amd_pstate_update_freq(struct cpufreq_policy *policy,
+                                 unsigned int target_freq, bool fast_switch)
 {
        struct cpufreq_freqs freqs;
        struct amd_cpudata *cpudata = policy->driver_data;
@@ -465,26 +464,51 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
        des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
                                     cpudata->max_freq);
 
-       cpufreq_freq_transition_begin(policy, &freqs);
+       WARN_ON(fast_switch && !policy->fast_switch_enabled);
+       /*
+        * If fast_switch is desired, then there aren't any registered
+        * transition notifiers. See comment for
+        * cpufreq_enable_fast_switch().
+        */
+       if (!fast_switch)
+               cpufreq_freq_transition_begin(policy, &freqs);
+
        amd_pstate_update(cpudata, min_perf, des_perf,
-                         max_perf, false, policy->governor->flags);
-       cpufreq_freq_transition_end(policy, &freqs, false);
+                       max_perf, fast_switch, policy->governor->flags);
+
+       if (!fast_switch)
+               cpufreq_freq_transition_end(policy, &freqs, false);
 
        return 0;
 }
 
+static int amd_pstate_target(struct cpufreq_policy *policy,
+                            unsigned int target_freq,
+                            unsigned int relation)
+{
+       return amd_pstate_update_freq(policy, target_freq, false);
+}
+
+static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
+                                 unsigned int target_freq)
+{
+       return amd_pstate_update_freq(policy, target_freq, true);
+}
+
 static void amd_pstate_adjust_perf(unsigned int cpu,
                                   unsigned long _min_perf,
                                   unsigned long target_perf,
                                   unsigned long capacity)
 {
        unsigned long max_perf, min_perf, des_perf,
-                     cap_perf, lowest_nonlinear_perf;
+                     cap_perf, lowest_nonlinear_perf, max_freq;
        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
        struct amd_cpudata *cpudata = policy->driver_data;
+       unsigned int target_freq;
 
        cap_perf = READ_ONCE(cpudata->highest_perf);
        lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
+       max_freq = READ_ONCE(cpudata->max_freq);
 
        des_perf = cap_perf;
        if (target_perf < capacity)
@@ -501,6 +525,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
        if (max_perf < min_perf)
                max_perf = min_perf;
 
+       des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
+       target_freq = div_u64(des_perf * max_freq, max_perf);
+       policy->cur = target_freq;
+
        amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true,
                        policy->governor->flags);
        cpufreq_cpu_put(policy);
@@ -715,6 +743,7 @@ static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
 
        freq_qos_remove_request(&cpudata->req[1]);
        freq_qos_remove_request(&cpudata->req[0]);
+       policy->fast_switch_possible = false;
        kfree(cpudata);
 
        return 0;
@@ -1079,7 +1108,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
        policy->policy = CPUFREQ_POLICY_POWERSAVE;
 
        if (boot_cpu_has(X86_FEATURE_CPPC)) {
-               policy->fast_switch_possible = true;
                ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
                if (ret)
                        return ret;
@@ -1102,7 +1130,6 @@ free_cpudata1:
 static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
 {
        pr_debug("CPU %d exiting\n", policy->cpu);
-       policy->fast_switch_possible = false;
        return 0;
 }
 
@@ -1309,6 +1336,7 @@ static struct cpufreq_driver amd_pstate_driver = {
        .flags          = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
        .verify         = amd_pstate_verify,
        .target         = amd_pstate_target,
+       .fast_switch    = amd_pstate_fast_switch,
        .init           = amd_pstate_cpu_init,
        .exit           = amd_pstate_cpu_exit,
        .suspend        = amd_pstate_cpu_suspend,
index 1d2cfea..73efbcf 100644 (file)
@@ -583,7 +583,7 @@ static int __init pcc_cpufreq_probe(struct platform_device *pdev)
 
        /* Skip initialization if another cpufreq driver is there. */
        if (cpufreq_get_current_driver())
-               return -EEXIST;
+               return -ENODEV;
 
        if (acpi_disabled)
                return -ENODEV;
index 23b9ff9..bea9cf3 100644 (file)
@@ -1028,7 +1028,7 @@ static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
  * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
  * @cxlds: The device data for the operation
  *
- * Return: 0 if identify was executed successfully.
+ * Return: 0 if identify was executed successfully or media not ready.
  *
  * This will dispatch the identify command to the device and on success populate
  * structures to be exported to sysfs.
@@ -1041,6 +1041,9 @@ int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
        u32 val;
        int rc;
 
+       if (!cxlds->media_ready)
+               return 0;
+
        mbox_cmd = (struct cxl_mbox_cmd) {
                .opcode = CXL_MBOX_OP_IDENTIFY,
                .size_out = sizeof(id),
@@ -1102,6 +1105,13 @@ int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
        struct device *dev = cxlds->dev;
        int rc;
 
+       if (!cxlds->media_ready) {
+               cxlds->dpa_res = DEFINE_RES_MEM(0, 0);
+               cxlds->ram_res = DEFINE_RES_MEM(0, 0);
+               cxlds->pmem_res = DEFINE_RES_MEM(0, 0);
+               return 0;
+       }
+
        cxlds->dpa_res =
                (struct resource)DEFINE_RES_MEM(0, cxlds->total_bytes);
 
index f332fe7..67f4ab6 100644 (file)
@@ -101,23 +101,57 @@ int devm_cxl_port_enumerate_dports(struct cxl_port *port)
 }
 EXPORT_SYMBOL_NS_GPL(devm_cxl_port_enumerate_dports, CXL);
 
-/*
- * Wait up to @media_ready_timeout for the device to report memory
- * active.
- */
-int cxl_await_media_ready(struct cxl_dev_state *cxlds)
+static int cxl_dvsec_mem_range_valid(struct cxl_dev_state *cxlds, int id)
+{
+       struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+       int d = cxlds->cxl_dvsec;
+       bool valid = false;
+       int rc, i;
+       u32 temp;
+
+       if (id > CXL_DVSEC_RANGE_MAX)
+               return -EINVAL;
+
+       /* Check MEM INFO VALID bit first, give up after 1s */
+       i = 1;
+       do {
+               rc = pci_read_config_dword(pdev,
+                                          d + CXL_DVSEC_RANGE_SIZE_LOW(id),
+                                          &temp);
+               if (rc)
+                       return rc;
+
+               valid = FIELD_GET(CXL_DVSEC_MEM_INFO_VALID, temp);
+               if (valid)
+                       break;
+               msleep(1000);
+       } while (i--);
+
+       if (!valid) {
+               dev_err(&pdev->dev,
+                       "Timeout awaiting memory range %d valid after 1s.\n",
+                       id);
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static int cxl_dvsec_mem_range_active(struct cxl_dev_state *cxlds, int id)
 {
        struct pci_dev *pdev = to_pci_dev(cxlds->dev);
        int d = cxlds->cxl_dvsec;
        bool active = false;
-       u64 md_status;
        int rc, i;
+       u32 temp;
 
-       for (i = media_ready_timeout; i; i--) {
-               u32 temp;
+       if (id > CXL_DVSEC_RANGE_MAX)
+               return -EINVAL;
 
+       /* Check MEM ACTIVE bit, up to 60s timeout by default */
+       for (i = media_ready_timeout; i; i--) {
                rc = pci_read_config_dword(
-                       pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(0), &temp);
+                       pdev, d + CXL_DVSEC_RANGE_SIZE_LOW(id), &temp);
                if (rc)
                        return rc;
 
@@ -134,6 +168,39 @@ int cxl_await_media_ready(struct cxl_dev_state *cxlds)
                return -ETIMEDOUT;
        }
 
+       return 0;
+}
+
+/*
+ * Wait up to @media_ready_timeout for the device to report memory
+ * active.
+ */
+int cxl_await_media_ready(struct cxl_dev_state *cxlds)
+{
+       struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+       int d = cxlds->cxl_dvsec;
+       int rc, i, hdm_count;
+       u64 md_status;
+       u16 cap;
+
+       rc = pci_read_config_word(pdev,
+                                 d + CXL_DVSEC_CAP_OFFSET, &cap);
+       if (rc)
+               return rc;
+
+       hdm_count = FIELD_GET(CXL_DVSEC_HDM_COUNT_MASK, cap);
+       for (i = 0; i < hdm_count; i++) {
+               rc = cxl_dvsec_mem_range_valid(cxlds, i);
+               if (rc)
+                       return rc;
+       }
+
+       for (i = 0; i < hdm_count; i++) {
+               rc = cxl_dvsec_mem_range_active(cxlds, i);
+               if (rc)
+                       return rc;
+       }
+
        md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
        if (!CXLMDEV_READY(md_status))
                return -EIO;
@@ -241,17 +308,36 @@ static void disable_hdm(void *_cxlhdm)
               hdm + CXL_HDM_DECODER_CTRL_OFFSET);
 }
 
-static int devm_cxl_enable_hdm(struct device *host, struct cxl_hdm *cxlhdm)
+int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm)
 {
-       void __iomem *hdm = cxlhdm->regs.hdm_decoder;
+       void __iomem *hdm;
        u32 global_ctrl;
 
+       /*
+        * If the hdm capability was not mapped there is nothing to enable and
+        * the caller is responsible for what happens next.  For example,
+        * emulate a passthrough decoder.
+        */
+       if (IS_ERR(cxlhdm))
+               return 0;
+
+       hdm = cxlhdm->regs.hdm_decoder;
        global_ctrl = readl(hdm + CXL_HDM_DECODER_CTRL_OFFSET);
+
+       /*
+        * If the HDM decoder capability was enabled on entry, skip
+        * registering disable_hdm() since this decode capability may be
+        * owned by platform firmware.
+        */
+       if (global_ctrl & CXL_HDM_DECODER_ENABLE)
+               return 0;
+
        writel(global_ctrl | CXL_HDM_DECODER_ENABLE,
               hdm + CXL_HDM_DECODER_CTRL_OFFSET);
 
-       return devm_add_action_or_reset(host, disable_hdm, cxlhdm);
+       return devm_add_action_or_reset(&port->dev, disable_hdm, cxlhdm);
 }
+EXPORT_SYMBOL_NS_GPL(devm_cxl_enable_hdm, CXL);
 
 int cxl_dvsec_rr_decode(struct device *dev, int d,
                        struct cxl_endpoint_dvsec_info *info)
@@ -425,7 +511,7 @@ int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,
        if (info->mem_enabled)
                return 0;
 
-       rc = devm_cxl_enable_hdm(&port->dev, cxlhdm);
+       rc = devm_cxl_enable_hdm(port, cxlhdm);
        if (rc)
                return rc;
 
index da20684..e7c284c 100644 (file)
@@ -750,11 +750,10 @@ struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
 
        parent_port = parent_dport ? parent_dport->port : NULL;
        if (IS_ERR(port)) {
-               dev_dbg(uport, "Failed to add %s%s%s%s: %ld\n",
-                       dev_name(&port->dev),
-                       parent_port ? " to " : "",
+               dev_dbg(uport, "Failed to add%s%s%s: %ld\n",
+                       parent_port ? " port to " : "",
                        parent_port ? dev_name(&parent_port->dev) : "",
-                       parent_port ? "" : " (root port)",
+                       parent_port ? "" : " root port",
                        PTR_ERR(port));
        } else {
                dev_dbg(uport, "%s added%s%s%s\n",
index 044a92d..f93a285 100644 (file)
@@ -710,6 +710,7 @@ struct cxl_endpoint_dvsec_info {
 struct cxl_hdm;
 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
                                   struct cxl_endpoint_dvsec_info *info);
+int devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm);
 int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm,
                                struct cxl_endpoint_dvsec_info *info);
 int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
index db12b63..a2845a7 100644 (file)
@@ -266,6 +266,7 @@ struct cxl_poison_state {
  * @regs: Parsed register blocks
  * @cxl_dvsec: Offset to the PCIe device DVSEC
  * @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
+ * @media_ready: Indicate whether the device media is usable
  * @payload_size: Size of space for payload
  *                (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
  * @lsa_size: Size of Label Storage Area
@@ -303,6 +304,7 @@ struct cxl_dev_state {
        int cxl_dvsec;
 
        bool rcd;
+       bool media_ready;
        size_t payload_size;
        size_t lsa_size;
        struct mutex mbox_mutex; /* Protects device mailbox and firmware */
index 0465ef9..7c02e55 100644 (file)
@@ -31,6 +31,8 @@
 #define   CXL_DVSEC_RANGE_BASE_LOW(i)  (0x24 + (i * 0x10))
 #define     CXL_DVSEC_MEM_BASE_LOW_MASK        GENMASK(31, 28)
 
+#define CXL_DVSEC_RANGE_MAX            2
+
 /* CXL 2.0 8.1.4: Non-CXL Function Map DVSEC */
 #define CXL_DVSEC_FUNCTION_MAP                                 2
 
index 10caf18..519edd0 100644 (file)
@@ -124,6 +124,9 @@ static int cxl_mem_probe(struct device *dev)
        struct dentry *dentry;
        int rc;
 
+       if (!cxlds->media_ready)
+               return -EBUSY;
+
        /*
         * Someone is trying to reattach this device after it lost its port
         * connection (an endpoint port previously registered by this memdev was
index f7a5b8e..0872f22 100644 (file)
@@ -708,6 +708,12 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (rc)
                dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
 
+       rc = cxl_await_media_ready(cxlds);
+       if (rc == 0)
+               cxlds->media_ready = true;
+       else
+               dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
+
        rc = cxl_pci_setup_mailbox(cxlds);
        if (rc)
                return rc;
index eb57324..c23b616 100644 (file)
@@ -60,13 +60,17 @@ static int discover_region(struct device *dev, void *root)
 static int cxl_switch_port_probe(struct cxl_port *port)
 {
        struct cxl_hdm *cxlhdm;
-       int rc;
+       int rc, nr_dports;
 
-       rc = devm_cxl_port_enumerate_dports(port);
-       if (rc < 0)
-               return rc;
+       nr_dports = devm_cxl_port_enumerate_dports(port);
+       if (nr_dports < 0)
+               return nr_dports;
 
        cxlhdm = devm_cxl_setup_hdm(port, NULL);
+       rc = devm_cxl_enable_hdm(port, cxlhdm);
+       if (rc)
+               return rc;
+
        if (!IS_ERR(cxlhdm))
                return devm_cxl_enumerate_decoders(cxlhdm, NULL);
 
@@ -75,7 +79,7 @@ static int cxl_switch_port_probe(struct cxl_port *port)
                return PTR_ERR(cxlhdm);
        }
 
-       if (rc == 1) {
+       if (nr_dports == 1) {
                dev_dbg(&port->dev, "Fallback to passthrough decoder\n");
                return devm_cxl_add_passthrough_decoder(port);
        }
@@ -113,12 +117,6 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
        if (rc)
                return rc;
 
-       rc = cxl_await_media_ready(cxlds);
-       if (rc) {
-               dev_err(&port->dev, "Media not active (%d)\n", rc);
-               return rc;
-       }
-
        rc = devm_cxl_enumerate_decoders(cxlhdm, &info);
        if (rc)
                return rc;
index 8858470..ee3a219 100644 (file)
 #define ATC_DST_PIP            BIT(12)         /* Destination Picture-in-Picture enabled */
 #define ATC_SRC_DSCR_DIS       BIT(16)         /* Src Descriptor fetch disable */
 #define ATC_DST_DSCR_DIS       BIT(20)         /* Dst Descriptor fetch disable */
-#define ATC_FC                 GENMASK(22, 21) /* Choose Flow Controller */
+#define ATC_FC                 GENMASK(23, 21) /* Choose Flow Controller */
 #define ATC_FC_MEM2MEM         0x0             /* Mem-to-Mem (DMA) */
 #define ATC_FC_MEM2PER         0x1             /* Mem-to-Periph (DMA) */
 #define ATC_FC_PER2MEM         0x2             /* Periph-to-Mem (DMA) */
 #define ATC_AUTO               BIT(31)         /* Auto multiple buffer tx enable */
 
 /* Bitfields in CFG */
-#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4)    /* Extract most significant bits of a handshaking identifier */
-
 #define ATC_SRC_PER            GENMASK(3, 0)   /* Channel src rq associated with periph handshaking ifc h */
 #define ATC_DST_PER            GENMASK(7, 4)   /* Channel dst rq associated with periph handshaking ifc h */
 #define ATC_SRC_REP            BIT(8)          /* Source Replay Mod */
 #define ATC_DPIP_HOLE          GENMASK(15, 0)
 #define ATC_DPIP_BOUNDARY      GENMASK(25, 16)
 
-#define ATC_SRC_PER_ID(id)     (FIELD_PREP(ATC_SRC_PER_MSB, (id)) |    \
-                                FIELD_PREP(ATC_SRC_PER, (id)))
-#define ATC_DST_PER_ID(id)     (FIELD_PREP(ATC_DST_PER_MSB, (id)) |    \
-                                FIELD_PREP(ATC_DST_PER, (id)))
+#define ATC_PER_MSB            GENMASK(5, 4)   /* Extract MSBs of a handshaking identifier */
+#define ATC_SRC_PER_ID(id)                                            \
+       ({ typeof(id) _id = (id);                                      \
+          FIELD_PREP(ATC_SRC_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) |  \
+          FIELD_PREP(ATC_SRC_PER, _id); })
+#define ATC_DST_PER_ID(id)                                            \
+       ({ typeof(id) _id = (id);                                      \
+          FIELD_PREP(ATC_DST_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) |  \
+          FIELD_PREP(ATC_DST_PER, _id); })
 
 
 
index 7da6d9b..c3b3716 100644 (file)
@@ -1102,6 +1102,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
                                                        NULL,
                                                        src_addr, dst_addr,
                                                        xt, xt->sgl);
+               if (!first)
+                       return NULL;
 
                /* Length of the block is (BLEN+1) microblocks. */
                for (i = 0; i < xt->numf - 1; i++)
@@ -1132,8 +1134,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
                                                               src_addr, dst_addr,
                                                               xt, chunk);
                        if (!desc) {
-                               list_splice_tail_init(&first->descs_list,
-                                                     &atchan->free_descs_list);
+                               if (first)
+                                       list_splice_tail_init(&first->descs_list,
+                                                             &atchan->free_descs_list);
                                return NULL;
                        }
 
index ecbf67c..d32deb9 100644 (file)
@@ -277,7 +277,6 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
                if (wq_dedicated(wq)) {
                        rc = idxd_wq_set_pasid(wq, pasid);
                        if (rc < 0) {
-                               iommu_sva_unbind_device(sva);
                                dev_err(dev, "wq set pasid failed: %d\n", rc);
                                goto failed_set_pasid;
                        }
index 0d9257f..b4731fe 100644 (file)
@@ -1050,7 +1050,7 @@ static bool _trigger(struct pl330_thread *thrd)
        return true;
 }
 
-static bool _start(struct pl330_thread *thrd)
+static bool pl330_start_thread(struct pl330_thread *thrd)
 {
        switch (_state(thrd)) {
        case PL330_STATE_FAULT_COMPLETING:
@@ -1702,7 +1702,7 @@ static int pl330_update(struct pl330_dmac *pl330)
                        thrd->req_running = -1;
 
                        /* Get going again ASAP */
-                       _start(thrd);
+                       pl330_start_thread(thrd);
 
                        /* For now, just make a list of callbacks to be done */
                        list_add_tail(&descdone->rqd, &pl330->req_done);
@@ -2089,7 +2089,7 @@ static void pl330_tasklet(struct tasklet_struct *t)
        } else {
                /* Make sure the PL330 Channel thread is active */
                spin_lock(&pch->thread->dmac->lock);
-               _start(pch->thread);
+               pl330_start_thread(pch->thread);
                spin_unlock(&pch->thread->dmac->lock);
        }
 
@@ -2107,7 +2107,7 @@ static void pl330_tasklet(struct tasklet_struct *t)
                        if (power_down) {
                                pch->active = true;
                                spin_lock(&pch->thread->dmac->lock);
-                               _start(pch->thread);
+                               pl330_start_thread(pch->thread);
                                spin_unlock(&pch->thread->dmac->lock);
                                power_down = false;
                        }
index fc3a2a0..b8329a2 100644 (file)
@@ -5527,7 +5527,7 @@ static int udma_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int udma_pm_suspend(struct device *dev)
+static int __maybe_unused udma_pm_suspend(struct device *dev)
 {
        struct udma_dev *ud = dev_get_drvdata(dev);
        struct dma_device *dma_dev = &ud->ddev;
@@ -5549,7 +5549,7 @@ static int udma_pm_suspend(struct device *dev)
        return 0;
 }
 
-static int udma_pm_resume(struct device *dev)
+static int __maybe_unused udma_pm_resume(struct device *dev)
 {
        struct udma_dev *ud = dev_get_drvdata(dev);
        struct dma_device *dma_dev = &ud->ddev;
index f29d77e..2b8bfcd 100644 (file)
@@ -15,6 +15,8 @@
 
 #include "common.h"
 
+static DEFINE_IDA(ffa_bus_id);
+
 static int ffa_device_match(struct device *dev, struct device_driver *drv)
 {
        const struct ffa_device_id *id_table;
@@ -53,7 +55,8 @@ static void ffa_device_remove(struct device *dev)
 {
        struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver);
 
-       ffa_drv->remove(to_ffa_dev(dev));
+       if (ffa_drv->remove)
+               ffa_drv->remove(to_ffa_dev(dev));
 }
 
 static int ffa_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
@@ -130,6 +133,7 @@ static void ffa_release_device(struct device *dev)
 {
        struct ffa_device *ffa_dev = to_ffa_dev(dev);
 
+       ida_free(&ffa_bus_id, ffa_dev->id);
        kfree(ffa_dev);
 }
 
@@ -170,18 +174,24 @@ bool ffa_device_is_valid(struct ffa_device *ffa_dev)
 struct ffa_device *ffa_device_register(const uuid_t *uuid, int vm_id,
                                       const struct ffa_ops *ops)
 {
-       int ret;
+       int id, ret;
        struct device *dev;
        struct ffa_device *ffa_dev;
 
+       id = ida_alloc_min(&ffa_bus_id, 1, GFP_KERNEL);
+       if (id < 0)
+               return NULL;
+
        ffa_dev = kzalloc(sizeof(*ffa_dev), GFP_KERNEL);
-       if (!ffa_dev)
+       if (!ffa_dev) {
+               ida_free(&ffa_bus_id, id);
                return NULL;
+       }
 
        dev = &ffa_dev->dev;
        dev->bus = &ffa_bus_type;
        dev->release = ffa_release_device;
-       dev_set_name(&ffa_dev->dev, "arm-ffa-%04x", vm_id);
+       dev_set_name(&ffa_dev->dev, "arm-ffa-%d", id);
 
        ffa_dev->vm_id = vm_id;
        ffa_dev->ops = ops;
@@ -217,4 +227,5 @@ void arm_ffa_bus_exit(void)
 {
        ffa_devices_unregister();
        bus_unregister(&ffa_bus_type);
+       ida_destroy(&ffa_bus_id);
 }
index fa85c64..e234091 100644 (file)
@@ -193,7 +193,8 @@ __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
        int idx, count, flags = 0, sz, buf_sz;
        ffa_value_t partition_info;
 
-       if (!buffer || !num_partitions) /* Just get the count for now */
+       if (drv_info->version > FFA_VERSION_1_0 &&
+           (!buffer || !num_partitions)) /* Just get the count for now */
                flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
 
        mutex_lock(&drv_info->rx_lock);
@@ -420,12 +421,17 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
                ep_mem_access->receiver = args->attrs[idx].receiver;
                ep_mem_access->attrs = args->attrs[idx].attrs;
                ep_mem_access->composite_off = COMPOSITE_OFFSET(args->nattrs);
+               ep_mem_access->flag = 0;
+               ep_mem_access->reserved = 0;
        }
+       mem_region->reserved_0 = 0;
+       mem_region->reserved_1 = 0;
        mem_region->ep_count = args->nattrs;
 
        composite = buffer + COMPOSITE_OFFSET(args->nattrs);
        composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
        composite->addr_range_cnt = num_entries;
+       composite->reserved = 0;
 
        length = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, num_entries);
        frag_len = COMPOSITE_CONSTITUENTS_OFFSET(args->nattrs, 0);
@@ -460,6 +466,7 @@ ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
 
                constituents->address = sg_phys(args->sg);
                constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
+               constituents->reserved = 0;
                constituents++;
                frag_len += sizeof(struct ffa_mem_region_addr_range);
        } while ((args->sg = sg_next(args->sg)));
index d40df09..6971dcf 100644 (file)
@@ -1066,7 +1066,7 @@ static int scmi_xfer_raw_worker_init(struct scmi_raw_mode_info *raw)
 
        raw->wait_wq = alloc_workqueue("scmi-raw-wait-wq-%d",
                                       WQ_UNBOUND | WQ_FREEZABLE |
-                                      WQ_HIGHPRI, WQ_SYSFS, raw->id);
+                                      WQ_HIGHPRI | WQ_SYSFS, 0, raw->id);
        if (!raw->wait_wq)
                return -ENOMEM;
 
index 89ef820..2c48962 100644 (file)
@@ -32,7 +32,8 @@ zboot-size-len-$(CONFIG_KERNEL_GZIP)   := 0
 $(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE
        $(call if_changed,$(zboot-method-y))
 
-OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \
+# avoid eager evaluation to prevent references to non-existent build artifacts
+OBJCOPYFLAGS_vmlinuz.o = -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \
                          --rename-section .data=.gzdata,load,alloc,readonly,contents
 $(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE
        $(call if_changed,objcopy)
index 67d5a20..54a2822 100644 (file)
@@ -1133,4 +1133,7 @@ const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
 void efi_remap_image(unsigned long image_base, unsigned alloc_size,
                     unsigned long code_size);
 
+asmlinkage efi_status_t __efiapi
+efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab);
+
 #endif
index 5521f06..f45c6a3 100644 (file)
@@ -897,7 +897,7 @@ config GPIO_F7188X
        help
          This option enables support for GPIOs found on Fintek Super-I/O
          chips F71869, F71869A, F71882FG, F71889F and F81866.
-         As well as Nuvoton Super-I/O chip NCT6116D.
+         As well as Nuvoton Super-I/O chip NCT6126D.
 
          To compile this driver as a module, choose M here: the module will
          be called f7188x-gpio.
index 9effa77..f54ca5a 100644 (file)
@@ -48,7 +48,7 @@
 /*
  * Nuvoton devices.
  */
-#define SIO_NCT6116D_ID                0xD283  /* NCT6116D chipset ID */
+#define SIO_NCT6126D_ID                0xD283  /* NCT6126D chipset ID */
 
 #define SIO_LD_GPIO_NUVOTON    0x07    /* GPIO logical device */
 
@@ -62,7 +62,7 @@ enum chips {
        f81866,
        f81804,
        f81865,
-       nct6116d,
+       nct6126d,
 };
 
 static const char * const f7188x_names[] = {
@@ -74,7 +74,7 @@ static const char * const f7188x_names[] = {
        "f81866",
        "f81804",
        "f81865",
-       "nct6116d",
+       "nct6126d",
 };
 
 struct f7188x_sio {
@@ -187,8 +187,8 @@ static int f7188x_gpio_set_config(struct gpio_chip *chip, unsigned offset,
 /* Output mode register (0:open drain 1:push-pull). */
 #define f7188x_gpio_out_mode(base) ((base) + 3)
 
-#define f7188x_gpio_dir_invert(type)   ((type) == nct6116d)
-#define f7188x_gpio_data_single(type)  ((type) == nct6116d)
+#define f7188x_gpio_dir_invert(type)   ((type) == nct6126d)
+#define f7188x_gpio_data_single(type)  ((type) == nct6126d)
 
 static struct f7188x_gpio_bank f71869_gpio_bank[] = {
        F7188X_GPIO_BANK(0, 6, 0xF0, DRVNAME "-0"),
@@ -274,7 +274,7 @@ static struct f7188x_gpio_bank f81865_gpio_bank[] = {
        F7188X_GPIO_BANK(60, 5, 0x90, DRVNAME "-6"),
 };
 
-static struct f7188x_gpio_bank nct6116d_gpio_bank[] = {
+static struct f7188x_gpio_bank nct6126d_gpio_bank[] = {
        F7188X_GPIO_BANK(0, 8, 0xE0, DRVNAME "-0"),
        F7188X_GPIO_BANK(10, 8, 0xE4, DRVNAME "-1"),
        F7188X_GPIO_BANK(20, 8, 0xE8, DRVNAME "-2"),
@@ -282,7 +282,7 @@ static struct f7188x_gpio_bank nct6116d_gpio_bank[] = {
        F7188X_GPIO_BANK(40, 8, 0xF0, DRVNAME "-4"),
        F7188X_GPIO_BANK(50, 8, 0xF4, DRVNAME "-5"),
        F7188X_GPIO_BANK(60, 8, 0xF8, DRVNAME "-6"),
-       F7188X_GPIO_BANK(70, 1, 0xFC, DRVNAME "-7"),
+       F7188X_GPIO_BANK(70, 8, 0xFC, DRVNAME "-7"),
 };
 
 static int f7188x_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -490,9 +490,9 @@ static int f7188x_gpio_probe(struct platform_device *pdev)
                data->nr_bank = ARRAY_SIZE(f81865_gpio_bank);
                data->bank = f81865_gpio_bank;
                break;
-       case nct6116d:
-               data->nr_bank = ARRAY_SIZE(nct6116d_gpio_bank);
-               data->bank = nct6116d_gpio_bank;
+       case nct6126d:
+               data->nr_bank = ARRAY_SIZE(nct6126d_gpio_bank);
+               data->bank = nct6126d_gpio_bank;
                break;
        default:
                return -ENODEV;
@@ -559,9 +559,9 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
        case SIO_F81865_ID:
                sio->type = f81865;
                break;
-       case SIO_NCT6116D_ID:
+       case SIO_NCT6126D_ID:
                sio->device = SIO_LD_GPIO_NUVOTON;
-               sio->type = nct6116d;
+               sio->type = nct6126d;
                break;
        default:
                pr_info("Unsupported Fintek device 0x%04x\n", devid);
@@ -569,7 +569,7 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
        }
 
        /* double check manufacturer where possible */
-       if (sio->type != nct6116d) {
+       if (sio->type != nct6126d) {
                manid = superio_inw(addr, SIO_FINTEK_MANID);
                if (manid != SIO_FINTEK_ID) {
                        pr_debug("Not a Fintek device at 0x%08x\n", addr);
@@ -581,7 +581,7 @@ static int __init f7188x_find(int addr, struct f7188x_sio *sio)
        err = 0;
 
        pr_info("Found %s at %#x\n", f7188x_names[sio->type], (unsigned int)addr);
-       if (sio->type != nct6116d)
+       if (sio->type != nct6126d)
                pr_info("   revision %d\n", superio_inb(addr, SIO_FINTEK_DEVREV));
 
 err:
index e6a7049..b32063a 100644 (file)
@@ -369,7 +369,7 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
                priv->offset = i;
                priv->desc = gpiochip_get_desc(gc, i);
 
-               debugfs_create_file(name, 0200, chip->dbg_dir, priv,
+               debugfs_create_file(name, 0600, chip->dbg_dir, priv,
                                    &gpio_mockup_debugfs_ops);
        }
 }
index a1c8702..8b49b0a 100644 (file)
@@ -696,6 +696,9 @@ static char **gpio_sim_make_line_names(struct gpio_sim_bank *bank,
        char **line_names;
 
        list_for_each_entry(line, &bank->line_list, siblings) {
+               if (line->offset >= bank->num_lines)
+                       continue;
+
                if (line->name) {
                        if (line->offset > max_offset)
                                max_offset = line->offset;
@@ -721,8 +724,13 @@ static char **gpio_sim_make_line_names(struct gpio_sim_bank *bank,
        if (!line_names)
                return ERR_PTR(-ENOMEM);
 
-       list_for_each_entry(line, &bank->line_list, siblings)
-               line_names[line->offset] = line->name;
+       list_for_each_entry(line, &bank->line_list, siblings) {
+               if (line->offset >= bank->num_lines)
+                       continue;
+
+               if (line->name && (line->offset <= max_offset))
+                       line_names[line->offset] = line->name;
+       }
 
        return line_names;
 }
@@ -754,6 +762,9 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev)
 
        list_for_each_entry(bank, &dev->bank_list, siblings) {
                list_for_each_entry(line, &bank->line_list, siblings) {
+                       if (line->offset >= bank->num_lines)
+                               continue;
+
                        if (line->hog)
                                num_hogs++;
                }
@@ -769,6 +780,9 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev)
 
        list_for_each_entry(bank, &dev->bank_list, siblings) {
                list_for_each_entry(line, &bank->line_list, siblings) {
+                       if (line->offset >= bank->num_lines)
+                               continue;
+
                        if (!line->hog)
                                continue;
 
index 04fb05d..a7220e0 100644 (file)
@@ -209,6 +209,8 @@ static int gpiochip_find_base(int ngpio)
                        break;
                /* nope, check the space right after the chip */
                base = gdev->base + gdev->ngpio;
+               if (base < GPIO_DYNAMIC_BASE)
+                       base = GPIO_DYNAMIC_BASE;
        }
 
        if (gpio_is_valid(base)) {
index aeeec21..fd6e837 100644 (file)
@@ -1092,16 +1092,20 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
         * S0ix even though the system is suspending to idle, so return false
         * in that case.
         */
-       if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
-               dev_warn_once(adev->dev,
+       if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+               dev_err_once(adev->dev,
                              "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
                              "To use suspend-to-idle change the sleep mode in BIOS setup.\n");
+               return false;
+       }
 
 #if !IS_ENABLED(CONFIG_AMD_PMC)
-       dev_warn_once(adev->dev,
+       dev_err_once(adev->dev,
                      "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
-#endif /* CONFIG_AMD_PMC */
+       return false;
+#else
        return true;
+#endif /* CONFIG_AMD_PMC */
 }
 
 #endif /* CONFIG_SUSPEND */
index f52d0ba..a7d2508 100644 (file)
@@ -582,7 +582,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
                if (r)
                        amdgpu_fence_driver_force_completion(ring);
 
-               if (ring->fence_drv.irq_src)
+               if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
+                   ring->fence_drv.irq_src)
                        amdgpu_irq_put(adev, ring->fence_drv.irq_src,
                                       ring->fence_drv.irq_type);
 
index 4e25317..95b0f98 100644 (file)
@@ -593,6 +593,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
        case IP_VERSION(9, 3, 0):
        /* GC 10.3.7 */
        case IP_VERSION(10, 3, 7):
+       /* GC 11.0.1 */
+       case IP_VERSION(11, 0, 1):
                if (amdgpu_tmz == 0) {
                        adev->gmc.tmz_enabled = false;
                        dev_info(adev->dev,
@@ -616,7 +618,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
        case IP_VERSION(10, 3, 1):
        /* YELLOW_CARP*/
        case IP_VERSION(10, 3, 3):
-       case IP_VERSION(11, 0, 1):
        case IP_VERSION(11, 0, 4):
                /* Don't enable it by default yet.
                 */
index b07c000..4fa019c 100644 (file)
@@ -241,6 +241,31 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+       int r, i;
+
+       r = amdgpu_ras_block_late_init(adev, ras_block);
+       if (r)
+               return r;
+
+       if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+               for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+                       if (adev->jpeg.harvest_config & (1 << i))
+                               continue;
+
+                       r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
+                       if (r)
+                               goto late_fini;
+               }
+       }
+       return 0;
+
+late_fini:
+       amdgpu_ras_block_late_fini(adev, ras_block);
+       return r;
+}
+
 int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
 {
        int err;
@@ -262,7 +287,7 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
        adev->jpeg.ras_if = &ras->ras_block.ras_comm;
 
        if (!ras->ras_block.ras_late_init)
-               ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+               ras->ras_block.ras_late_init = amdgpu_jpeg_ras_late_init;
 
        return 0;
 }
index 0ca76f0..1471a1e 100644 (file)
@@ -38,6 +38,7 @@ struct amdgpu_jpeg_reg{
 struct amdgpu_jpeg_inst {
        struct amdgpu_ring ring_dec;
        struct amdgpu_irq_src irq;
+       struct amdgpu_irq_src ras_poison_irq;
        struct amdgpu_jpeg_reg external;
 };
 
@@ -72,6 +73,8 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
 int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
                                struct amdgpu_irq_src *source,
                                struct amdgpu_iv_entry *entry);
+int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
+                               struct ras_common_if *ras_block);
 int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
 
 #endif /*__AMDGPU_JPEG_H__*/
index 2bd1a54..3b225be 100644 (file)
@@ -79,9 +79,10 @@ static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
 static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
-       struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+       struct amdgpu_bo *shadow_bo = ttm_to_amdgpu_bo(tbo), *bo;
        struct amdgpu_bo_vm *vmbo;
 
+       bo = shadow_bo->parent;
        vmbo = to_amdgpu_bo_vm(bo);
        /* in case amdgpu_device_recover_vram got NULL of bo->parent */
        if (!list_empty(&vmbo->shadow_list)) {
@@ -694,11 +695,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
                return r;
 
        *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
-       INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
-       /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
-        * is initialized.
-        */
-       bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
        return r;
 }
 
@@ -715,6 +711,8 @@ void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
 
        mutex_lock(&adev->shadow_list_lock);
        list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
+       vmbo->shadow->parent = amdgpu_bo_ref(&vmbo->bo);
+       vmbo->shadow->tbo.destroy = &amdgpu_bo_vm_destroy;
        mutex_unlock(&adev->shadow_list_lock);
 }
 
index e63fcc5..2d94f1b 100644 (file)
@@ -1181,6 +1181,31 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
        return 0;
 }
 
+int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
+{
+       int r, i;
+
+       r = amdgpu_ras_block_late_init(adev, ras_block);
+       if (r)
+               return r;
+
+       if (amdgpu_ras_is_supported(adev, ras_block->block)) {
+               for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+                       if (adev->vcn.harvest_config & (1 << i))
+                               continue;
+
+                       r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
+                       if (r)
+                               goto late_fini;
+               }
+       }
+       return 0;
+
+late_fini:
+       amdgpu_ras_block_late_fini(adev, ras_block);
+       return r;
+}
+
 int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
 {
        int err;
@@ -1202,7 +1227,7 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
        adev->vcn.ras_if = &ras->ras_block.ras_comm;
 
        if (!ras->ras_block.ras_late_init)
-               ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
+               ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
 
        return 0;
 }
index c730949..f1397ef 100644 (file)
@@ -234,6 +234,7 @@ struct amdgpu_vcn_inst {
        struct amdgpu_ring      ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
        atomic_t                sched_score;
        struct amdgpu_irq_src   irq;
+       struct amdgpu_irq_src   ras_poison_irq;
        struct amdgpu_vcn_reg   external;
        struct amdgpu_bo        *dpg_sram_bo;
        struct dpg_pause_state  pause_state;
@@ -400,6 +401,8 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
 int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
                        struct amdgpu_irq_src *source,
                        struct amdgpu_iv_entry *entry);
+int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev,
+                       struct ras_common_if *ras_block);
 int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
 
 #endif
index df63dc3..051c719 100644 (file)
@@ -564,7 +564,6 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                return r;
        }
 
-       (*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
        amdgpu_bo_add_to_shadow_list(*vmbo);
 
        return 0;
index 43d6a9d..afacfb9 100644 (file)
@@ -800,7 +800,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
 {
        struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
        struct drm_buddy *mm = &mgr->mm;
-       struct drm_buddy_block *block;
+       struct amdgpu_vram_reservation *rsv;
 
        drm_printf(printer, "  vis usage:%llu\n",
                   amdgpu_vram_mgr_vis_usage(mgr));
@@ -812,8 +812,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
        drm_buddy_print(mm, printer);
 
        drm_printf(printer, "reserved:\n");
-       list_for_each_entry(block, &mgr->reserved_pages, link)
-               drm_buddy_block_print(mm, block, printer);
+       list_for_each_entry(rsv, &mgr->reserved_pages, blocks)
+               drm_printf(printer, "%#018llx-%#018llx: %llu\n",
+                       rsv->start, rsv->start + rsv->size, rsv->size);
        mutex_unlock(&mgr->lock);
 }
 
index f5b5ce1..ab44c13 100644 (file)
@@ -6892,8 +6892,10 @@ static int gfx_v10_0_kiq_resume(struct amdgpu_device *adev)
                return r;
 
        r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (unlikely(r != 0))
+       if (unlikely(r != 0)) {
+               amdgpu_bo_unreserve(ring->mqd_obj);
                return r;
+       }
 
        gfx_v10_0_kiq_init_queue(ring);
        amdgpu_bo_kunmap(ring->mqd_obj);
@@ -8152,8 +8154,14 @@ static int gfx_v10_0_set_powergating_state(void *handle,
        case IP_VERSION(10, 3, 3):
        case IP_VERSION(10, 3, 6):
        case IP_VERSION(10, 3, 7):
+               if (!enable)
+                       amdgpu_gfx_off_ctrl(adev, false);
+
                gfx_v10_cntl_pg(adev, enable);
-               amdgpu_gfx_off_ctrl(adev, enable);
+
+               if (enable)
+                       amdgpu_gfx_off_ctrl(adev, true);
+
                break;
        default:
                break;
index f5c3762..c4940b6 100644 (file)
@@ -4667,24 +4667,27 @@ static uint64_t gfx_v11_0_get_gpu_clock_counter(struct amdgpu_device *adev)
        uint64_t clock;
        uint64_t clock_counter_lo, clock_counter_hi_pre, clock_counter_hi_after;
 
-       amdgpu_gfx_off_ctrl(adev, false);
-       mutex_lock(&adev->gfx.gpu_clock_mutex);
        if (amdgpu_sriov_vf(adev)) {
+               amdgpu_gfx_off_ctrl(adev, false);
+               mutex_lock(&adev->gfx.gpu_clock_mutex);
                clock_counter_hi_pre = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
                clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
                clock_counter_hi_after = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_HI);
                if (clock_counter_hi_pre != clock_counter_hi_after)
                        clock_counter_lo = (uint64_t)RREG32_SOC15(GC, 0, regCP_MES_MTIME_LO);
+               mutex_unlock(&adev->gfx.gpu_clock_mutex);
+               amdgpu_gfx_off_ctrl(adev, true);
        } else {
+               preempt_disable();
                clock_counter_hi_pre = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
                clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
                clock_counter_hi_after = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_UPPER);
                if (clock_counter_hi_pre != clock_counter_hi_after)
                        clock_counter_lo = (uint64_t)RREG32_SOC15(SMUIO, 0, regGOLDEN_TSC_COUNT_LOWER);
+               preempt_enable();
        }
        clock = clock_counter_lo | (clock_counter_hi_after << 32ULL);
-       mutex_unlock(&adev->gfx.gpu_clock_mutex);
-       amdgpu_gfx_off_ctrl(adev, true);
+
        return clock;
 }
 
@@ -5150,8 +5153,14 @@ static int gfx_v11_0_set_powergating_state(void *handle,
                break;
        case IP_VERSION(11, 0, 1):
        case IP_VERSION(11, 0, 4):
+               if (!enable)
+                       amdgpu_gfx_off_ctrl(adev, false);
+
                gfx_v11_cntl_pg(adev, enable);
-               amdgpu_gfx_off_ctrl(adev, enable);
+
+               if (enable)
+                       amdgpu_gfx_off_ctrl(adev, true);
+
                break;
        default:
                break;
index f46d4b1..e7f2b7b 100644 (file)
@@ -149,16 +149,6 @@ MODULE_FIRMWARE("amdgpu/aldebaran_sjt_mec2.bin");
 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir                0x0026
 #define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX       1
 
-#define mmGOLDEN_TSC_COUNT_UPPER_Raven   0x007a
-#define mmGOLDEN_TSC_COUNT_UPPER_Raven_BASE_IDX 0
-#define mmGOLDEN_TSC_COUNT_LOWER_Raven   0x007b
-#define mmGOLDEN_TSC_COUNT_LOWER_Raven_BASE_IDX 0
-
-#define mmGOLDEN_TSC_COUNT_UPPER_Raven2   0x0068
-#define mmGOLDEN_TSC_COUNT_UPPER_Raven2_BASE_IDX 0
-#define mmGOLDEN_TSC_COUNT_LOWER_Raven2   0x0069
-#define mmGOLDEN_TSC_COUNT_LOWER_Raven2_BASE_IDX 0
-
 enum ta_ras_gfx_subblock {
        /*CPC*/
        TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
@@ -3617,8 +3607,10 @@ static int gfx_v9_0_kiq_resume(struct amdgpu_device *adev)
                return r;
 
        r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
-       if (unlikely(r != 0))
+       if (unlikely(r != 0)) {
+               amdgpu_bo_unreserve(ring->mqd_obj);
                return r;
+       }
 
        gfx_v9_0_kiq_init_queue(ring);
        amdgpu_bo_kunmap(ring->mqd_obj);
@@ -4002,36 +3994,6 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
                preempt_enable();
                clock = clock_lo | (clock_hi << 32ULL);
                break;
-       case IP_VERSION(9, 1, 0):
-               preempt_disable();
-               clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
-               clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven);
-               hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven);
-               /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over
-                * roughly every 42 seconds.
-                */
-               if (hi_check != clock_hi) {
-                       clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven);
-                       clock_hi = hi_check;
-               }
-               preempt_enable();
-               clock = clock_lo | (clock_hi << 32ULL);
-               break;
-       case IP_VERSION(9, 2, 2):
-               preempt_disable();
-               clock_hi = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
-               clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
-               hi_check = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_UPPER_Raven2);
-               /* The PWR TSC clock frequency is 100MHz, which sets 32-bit carry over
-                * roughly every 42 seconds.
-                */
-               if (hi_check != clock_hi) {
-                       clock_lo = RREG32_SOC15_NO_KIQ(PWR, 0, mmGOLDEN_TSC_COUNT_LOWER_Raven2);
-                       clock_hi = hi_check;
-               }
-               preempt_enable();
-               clock = clock_lo | (clock_hi << 32ULL);
-               break;
        default:
                amdgpu_gfx_off_ctrl(adev, false);
                mutex_lock(&adev->gfx.gpu_clock_mutex);
index d95f9fe..4116c11 100644 (file)
@@ -31,6 +31,8 @@
 #include "umc_v8_10.h"
 #include "athub/athub_3_0_0_sh_mask.h"
 #include "athub/athub_3_0_0_offset.h"
+#include "dcn/dcn_3_2_0_offset.h"
+#include "dcn/dcn_3_2_0_sh_mask.h"
 #include "oss/osssys_6_0_0_offset.h"
 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
 #include "navi10_enum.h"
@@ -546,7 +548,24 @@ static void gmc_v11_0_get_vm_pte(struct amdgpu_device *adev,
 
 static unsigned gmc_v11_0_get_vbios_fb_size(struct amdgpu_device *adev)
 {
-       return 0;
+       u32 d1vga_control = RREG32_SOC15(DCE, 0, regD1VGA_CONTROL);
+       unsigned size;
+
+       if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
+               size = AMDGPU_VBIOS_VGA_ALLOCATION;
+       } else {
+               u32 viewport;
+               u32 pitch;
+
+               viewport = RREG32_SOC15(DCE, 0, regHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
+               pitch = RREG32_SOC15(DCE, 0, regHUBPREQ0_DCSURF_SURFACE_PITCH);
+               size = (REG_GET_FIELD(viewport,
+                                       HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
+                               REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
+                               4);
+       }
+
+       return size;
 }
 
 static const struct amdgpu_gmc_funcs gmc_v11_0_gmc_funcs = {
index b040f51..73e0dc5 100644 (file)
@@ -102,13 +102,13 @@ static int jpeg_v2_5_sw_init(void *handle)
 
                /* JPEG DJPEG POISON EVENT */
                r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
-                       VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq);
+                       VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq);
                if (r)
                        return r;
 
                /* JPEG EJPEG POISON EVENT */
                r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
-                       VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq);
+                       VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq);
                if (r)
                        return r;
        }
@@ -221,6 +221,9 @@ static int jpeg_v2_5_hw_fini(void *handle)
                if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
                      RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
                        jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+               if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+                       amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
        }
 
        return 0;
@@ -569,6 +572,14 @@ static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev,
        return 0;
 }
 
+static int jpeg_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned int type,
+                                       enum amdgpu_interrupt_state state)
+{
+       return 0;
+}
+
 static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
                                      struct amdgpu_irq_src *source,
                                      struct amdgpu_iv_entry *entry)
@@ -593,10 +604,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
        case VCN_2_0__SRCID__JPEG_DECODE:
                amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec);
                break;
-       case VCN_2_6__SRCID_DJPEG0_POISON:
-       case VCN_2_6__SRCID_EJPEG0_POISON:
-               amdgpu_jpeg_process_poison_irq(adev, source, entry);
-               break;
        default:
                DRM_ERROR("Unhandled interrupt: %d %d\n",
                          entry->src_id, entry->src_data[0]);
@@ -725,6 +732,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = {
        .process = jpeg_v2_5_process_interrupt,
 };
 
+static const struct amdgpu_irq_src_funcs jpeg_v2_6_ras_irq_funcs = {
+       .set = jpeg_v2_6_set_ras_interrupt_state,
+       .process = amdgpu_jpeg_process_poison_irq,
+};
+
 static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
 {
        int i;
@@ -735,6 +747,9 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
 
                adev->jpeg.inst[i].irq.num_types = 1;
                adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs;
+
+               adev->jpeg.inst[i].ras_poison_irq.num_types = 1;
+               adev->jpeg.inst[i].ras_poison_irq.funcs = &jpeg_v2_6_ras_irq_funcs;
        }
 }
 
@@ -800,6 +815,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = {
 static struct amdgpu_jpeg_ras jpeg_v2_6_ras = {
        .ras_block = {
                .hw_ops = &jpeg_v2_6_ras_hw_ops,
+               .ras_late_init = amdgpu_jpeg_ras_late_init,
        },
 };
 
index 77e1e64..a3d83c9 100644 (file)
@@ -87,13 +87,13 @@ static int jpeg_v4_0_sw_init(void *handle)
 
        /* JPEG DJPEG POISON EVENT */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
-                       VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq);
+                       VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
        if (r)
                return r;
 
        /* JPEG EJPEG POISON EVENT */
        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
-                       VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq);
+                       VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
        if (r)
                return r;
 
@@ -202,7 +202,8 @@ static int jpeg_v4_0_hw_fini(void *handle)
                        RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
                        jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
        }
-       amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
+       if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
+               amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
 
        return 0;
 }
@@ -670,6 +671,14 @@ static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
        return 0;
 }
 
+static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned int type,
+                                       enum amdgpu_interrupt_state state)
+{
+       return 0;
+}
+
 static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
                                      struct amdgpu_irq_src *source,
                                      struct amdgpu_iv_entry *entry)
@@ -680,10 +689,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
        case VCN_4_0__SRCID__JPEG_DECODE:
                amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
                break;
-       case VCN_4_0__SRCID_DJPEG0_POISON:
-       case VCN_4_0__SRCID_EJPEG0_POISON:
-               amdgpu_jpeg_process_poison_irq(adev, source, entry);
-               break;
        default:
                DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
                          entry->src_id, entry->src_data[0]);
@@ -753,10 +758,18 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
        .process = jpeg_v4_0_process_interrupt,
 };
 
+static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = {
+       .set = jpeg_v4_0_set_ras_interrupt_state,
+       .process = amdgpu_jpeg_process_poison_irq,
+};
+
 static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev)
 {
        adev->jpeg.inst->irq.num_types = 1;
        adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs;
+
+       adev->jpeg.inst->ras_poison_irq.num_types = 1;
+       adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs;
 }
 
 const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = {
@@ -811,6 +824,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = {
 static struct amdgpu_jpeg_ras jpeg_v4_0_ras = {
        .ras_block = {
                .hw_ops = &jpeg_v4_0_ras_hw_ops,
+               .ras_late_init = amdgpu_jpeg_ras_late_init,
        },
 };
 
index e1b7fca..5f10883 100644 (file)
@@ -57,7 +57,13 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
        if (err)
                return err;
 
-       return psp_init_ta_microcode(psp, ucode_prefix);
+       err = psp_init_ta_microcode(psp, ucode_prefix);
+       if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 1, 0)) &&
+               (adev->pdev->revision == 0xa1) &&
+               (psp->securedisplay_context.context.bin_desc.fw_version >= 0x27000008)) {
+               adev->psp.securedisplay_context.context.bin_desc.size_bytes = 0;
+       }
+       return err;
 }
 
 static int psp_v10_0_ring_create(struct psp_context *psp,
index 6d15d5c..a2fd1ff 100644 (file)
@@ -301,10 +301,11 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
        u32 reference_clock = adev->clock.spll.reference_freq;
 
        if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||
-           adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1) ||
-           adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
-           adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
+           adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1))
                return 10000;
+       if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
+           adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
+               return reference_clock / 4;
 
        return reference_clock;
 }
index ab0b45d..515681c 100644 (file)
@@ -143,7 +143,7 @@ static int vcn_v2_5_sw_init(void *handle)
 
                /* VCN POISON TRAP */
                r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
-                       VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq);
+                       VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
                if (r)
                        return r;
        }
@@ -354,6 +354,9 @@ static int vcn_v2_5_hw_fini(void *handle)
                    (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
                     RREG32_SOC15(VCN, i, mmUVD_STATUS)))
                        vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
+
+               if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+                       amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
        }
 
        return 0;
@@ -1807,6 +1810,14 @@ static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
        return 0;
 }
 
+static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
+                                       struct amdgpu_irq_src *source,
+                                       unsigned int type,
+                                       enum amdgpu_interrupt_state state)
+{
+       return 0;
+}
+
 static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
                                      struct amdgpu_irq_src *source,
                                      struct amdgpu_iv_entry *entry)
@@ -1837,9 +1848,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
        case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
                amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
                break;
-       case VCN_2_6__SRCID_UVD_POISON:
-               amdgpu_vcn_process_poison_irq(adev, source, entry);
-               break;
        default:
                DRM_ERROR("Unhandled interrupt: %d %d\n",
                          entry->src_id, entry->src_data[0]);
@@ -1854,6 +1862,11 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
        .process = vcn_v2_5_process_interrupt,
 };
 
+static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = {
+       .set = vcn_v2_6_set_ras_interrupt_state,
+       .process = amdgpu_vcn_process_poison_irq,
+};
+
 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
 {
        int i;
@@ -1863,6 +1876,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
                        continue;
                adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
                adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
+
+               adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
+               adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
        }
 }
 
@@ -1965,6 +1981,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
 static struct amdgpu_vcn_ras vcn_v2_6_ras = {
        .ras_block = {
                .hw_ops = &vcn_v2_6_ras_hw_ops,
+               .ras_late_init = amdgpu_vcn_ras_late_init,
        },
 };
 
index bf06740..e5fd1e0 100644 (file)
@@ -139,7 +139,7 @@ static int vcn_v4_0_sw_init(void *handle)
 
                /* VCN POISON TRAP */
                r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
-                               VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
+                               VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq);
                if (r)
                        return r;
 
@@ -305,8 +305,8 @@ static int vcn_v4_0_hw_fini(void *handle)
                         vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
                        }
                }
-
-               amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
+               if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
+                       amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
        }
 
        return 0;
@@ -1976,6 +1976,24 @@ static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgp
 }
 
 /**
+ * vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
+ *
+ * @adev: amdgpu_device pointer
+ * @source: interrupt sources
+ * @type: interrupt types
+ * @state: interrupt states
+ *
+ * Set VCN block RAS interrupt state
+ */
+static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
+       struct amdgpu_irq_src *source,
+       unsigned int type,
+       enum amdgpu_interrupt_state state)
+{
+       return 0;
+}
+
+/**
  * vcn_v4_0_process_interrupt - process VCN block interrupt
  *
  * @adev: amdgpu_device pointer
@@ -2007,9 +2025,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
        case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
                amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
                break;
-       case VCN_4_0__SRCID_UVD_POISON:
-               amdgpu_vcn_process_poison_irq(adev, source, entry);
-               break;
        default:
                DRM_ERROR("Unhandled interrupt: %d %d\n",
                          entry->src_id, entry->src_data[0]);
@@ -2024,6 +2039,11 @@ static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
        .process = vcn_v4_0_process_interrupt,
 };
 
+static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = {
+       .set = vcn_v4_0_set_ras_interrupt_state,
+       .process = amdgpu_vcn_process_poison_irq,
+};
+
 /**
  * vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions
  *
@@ -2041,6 +2061,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
 
                adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
                adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs;
+
+               adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
+               adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs;
        }
 }
 
@@ -2114,6 +2137,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = {
 static struct amdgpu_vcn_ras vcn_v4_0_ras = {
        .ras_block = {
                .hw_ops = &vcn_v4_0_ras_hw_ops,
+               .ras_late_init = amdgpu_vcn_ras_late_init,
        },
 };
 
index 531f173..c0360db 100644 (file)
@@ -542,8 +542,15 @@ static u32 vi_get_xclk(struct amdgpu_device *adev)
        u32 reference_clock = adev->clock.spll.reference_freq;
        u32 tmp;
 
-       if (adev->flags & AMD_IS_APU)
-               return reference_clock;
+       if (adev->flags & AMD_IS_APU) {
+               switch (adev->asic_type) {
+               case CHIP_STONEY:
+                       /* vbios says 48Mhz, but the actual freq is 100Mhz */
+                       return 10000;
+               default:
+                       return reference_clock;
+               }
+       }
 
        tmp = RREG32_SMC(ixCG_CLKPIN_CNTL_2);
        if (REG_GET_FIELD(tmp, CG_CLKPIN_CNTL_2, MUX_TCLK_TO_XCLK))
index 8b4b186..d5cec03 100644 (file)
@@ -2479,20 +2479,25 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
                if (acrtc && state->stream_status[i].plane_count != 0) {
                        irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
                        rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
-                       DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
-                                     acrtc->crtc_id, enable ? "en" : "dis", rc);
                        if (rc)
                                DRM_WARN("Failed to %s pflip interrupts\n",
                                         enable ? "enable" : "disable");
 
                        if (enable) {
-                               rc = amdgpu_dm_crtc_enable_vblank(&acrtc->base);
-                               if (rc)
-                                       DRM_WARN("Failed to enable vblank interrupts\n");
-                       } else {
-                               amdgpu_dm_crtc_disable_vblank(&acrtc->base);
-                       }
+                               if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
+                                       rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
+                       } else
+                               rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
 
+                       if (rc)
+                               DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+
+                       irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+                       /* During gpu-reset we disable and then enable vblank irq, so
+                        * don't use amdgpu_irq_get/put() to avoid refcount change.
+                        */
+                       if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+                               DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
                }
        }
 
@@ -2852,7 +2857,7 @@ static int dm_resume(void *handle)
                 * this is the case when traversing through already created
                 * MST connectors, should be skipped
                 */
-               if (aconnector->dc_link->type == dc_connection_mst_branch)
+               if (aconnector && aconnector->mst_root)
                        continue;
 
                mutex_lock(&aconnector->hpd_lock);
@@ -6737,7 +6742,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
        int clock, bpp = 0;
        bool is_y420 = false;
 
-       if (!aconnector->mst_output_port || !aconnector->dc_sink)
+       if (!aconnector->mst_output_port)
                return 0;
 
        mst_port = aconnector->mst_output_port;
index e3762e8..440fc08 100644 (file)
@@ -146,7 +146,6 @@ static void vblank_control_worker(struct work_struct *work)
 
 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
 {
-       enum dc_irq_source irq_source;
        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
        struct amdgpu_device *adev = drm_to_adev(crtc->dev);
        struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
@@ -169,18 +168,9 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
        if (rc)
                return rc;
 
-       if (amdgpu_in_reset(adev)) {
-               irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
-               /* During gpu-reset we disable and then enable vblank irq, so
-                * don't use amdgpu_irq_get/put() to avoid refcount change.
-                */
-               if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
-                       rc = -EBUSY;
-       } else {
-               rc = (enable)
-                       ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
-                       : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
-       }
+       rc = (enable)
+               ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
+               : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
 
        if (rc)
                return rc;
index 52564b9..7cde67b 100644 (file)
@@ -1981,6 +1981,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
        return result;
 }
 
+static bool commit_minimal_transition_state(struct dc *dc,
+               struct dc_state *transition_base_context);
+
 /**
  * dc_commit_streams - Commit current stream state
  *
@@ -2002,6 +2005,8 @@ enum dc_status dc_commit_streams(struct dc *dc,
        struct dc_state *context;
        enum dc_status res = DC_OK;
        struct dc_validation_set set[MAX_STREAMS] = {0};
+       struct pipe_ctx *pipe;
+       bool handle_exit_odm2to1 = false;
 
        if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
                return res;
@@ -2026,6 +2031,22 @@ enum dc_status dc_commit_streams(struct dc *dc,
                }
        }
 
+       /* Check for case where we are going from odm 2:1 to max
+        *  pipe scenario.  For these cases, we will call
+        *  commit_minimal_transition_state() to exit out of odm 2:1
+        *  first before processing new streams
+        */
+       if (stream_count == dc->res_pool->pipe_count) {
+               for (i = 0; i < dc->res_pool->pipe_count; i++) {
+                       pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+                       if (pipe->next_odm_pipe)
+                               handle_exit_odm2to1 = true;
+               }
+       }
+
+       if (handle_exit_odm2to1)
+               res = commit_minimal_transition_state(dc, dc->current_state);
+
        context = dc_create_state(dc);
        if (!context)
                goto context_alloc_fail;
@@ -3872,6 +3893,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
        unsigned int i, j;
        unsigned int pipe_in_use = 0;
        bool subvp_in_use = false;
+       bool odm_in_use = false;
 
        if (!transition_context)
                return false;
@@ -3900,6 +3922,18 @@ static bool commit_minimal_transition_state(struct dc *dc,
                }
        }
 
+       /* If ODM is enabled and we are adding or removing planes from any ODM
+        * pipe, we must use the minimal transition.
+        */
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+               if (pipe->stream && pipe->next_odm_pipe) {
+                       odm_in_use = true;
+                       break;
+               }
+       }
+
        /* When the OS add a new surface if we have been used all of pipes with odm combine
         * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
         * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
@@ -3908,7 +3942,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
         * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
         * enter/exit MPO when DCN still have enough resources.
         */
-       if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use) {
+       if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
                dc_release_state(transition_context);
                return true;
        }
index 117d80c..fe15513 100644 (file)
@@ -1446,6 +1446,26 @@ static int acquire_first_split_pipe(
 
                        split_pipe->stream = stream;
                        return i;
+               } else if (split_pipe->prev_odm_pipe &&
+                               split_pipe->prev_odm_pipe->plane_state == split_pipe->plane_state) {
+                       split_pipe->prev_odm_pipe->next_odm_pipe = split_pipe->next_odm_pipe;
+                       if (split_pipe->next_odm_pipe)
+                               split_pipe->next_odm_pipe->prev_odm_pipe = split_pipe->prev_odm_pipe;
+
+                       if (split_pipe->prev_odm_pipe->plane_state)
+                               resource_build_scaling_params(split_pipe->prev_odm_pipe);
+
+                       memset(split_pipe, 0, sizeof(*split_pipe));
+                       split_pipe->stream_res.tg = pool->timing_generators[i];
+                       split_pipe->plane_res.hubp = pool->hubps[i];
+                       split_pipe->plane_res.ipp = pool->ipps[i];
+                       split_pipe->plane_res.dpp = pool->dpps[i];
+                       split_pipe->stream_res.opp = pool->opps[i];
+                       split_pipe->plane_res.mpcc_inst = pool->dpps[i]->inst;
+                       split_pipe->pipe_idx = i;
+
+                       split_pipe->stream = stream;
+                       return i;
                }
        }
        return -1;
index 422fbf7..5403e93 100644 (file)
@@ -2113,15 +2113,6 @@ void dcn20_optimize_bandwidth(
        if (hubbub->funcs->program_compbuf_size)
                hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
 
-       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
-               dc_dmub_srv_p_state_delegate(dc,
-                       true, context);
-               context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
-               dc->clk_mgr->clks.fw_based_mclk_switching = true;
-       } else {
-               dc->clk_mgr->clks.fw_based_mclk_switching = false;
-       }
-
        dc->clk_mgr->funcs->update_clocks(
                        dc->clk_mgr,
                        context,
index 8263a07..32121db 100644 (file)
@@ -983,36 +983,13 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
 }
 
 void dcn30_prepare_bandwidth(struct dc *dc,
-       struct dc_state *context)
+                            struct dc_state *context)
 {
-       bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
-       /* Any transition into an FPO config should disable MCLK switching first to avoid
-        * driver and FW P-State synchronization issues.
-        */
-       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
-               dc->optimized_required = true;
-               context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
-       }
-
        if (dc->clk_mgr->dc_mode_softmax_enabled)
                if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
                                context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
                        dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
 
        dcn20_prepare_bandwidth(dc, context);
-       /*
-        * enabled -> enabled: do not disable
-        * enabled -> disabled: disable
-        * disabled -> enabled: don't care
-        * disabled -> disabled: don't care
-        */
-       if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
-               dc_dmub_srv_p_state_delegate(dc, false, context);
-
-       if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
-               /* After disabling P-State, restore the original value to ensure we get the correct P-State
-                * on the next optimize. */
-               context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
-       }
 }
 
index 47beb4e..0c4c320 100644 (file)
@@ -138,7 +138,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_2_soc = {
        .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
        .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
        .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
-       .pct_ideal_sdp_bw_after_urgent = 100.0,
+       .pct_ideal_sdp_bw_after_urgent = 90.0,
        .pct_ideal_fabric_bw_after_urgent = 67.0,
        .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 20.0,
        .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, // N/A, for now keep as is until DML implemented
index d4b7da5..e8b2fc4 100644 (file)
@@ -359,5 +359,8 @@ bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const un
                link[i] = stream[i].link;
                bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing);
        }
+
+       ret = dpia_validate_usb4_bw(link, bw_needed, num_streams);
+
        return ret;
 }
index 58c2246..f4f4045 100644 (file)
@@ -871,13 +871,11 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
        }
        if (ret == -ENOENT) {
                size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
-               if (size > 0) {
-                       size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
-                       size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
-                       size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
-                       size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
-                       size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
-               }
+               size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
+               size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
+               size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
+               size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
+               size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
        }
 
        if (size == 0)
index d6d9e3b..02e69cc 100644 (file)
@@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
        return 0;
 }
 
-static int si_set_temperature_range(struct amdgpu_device *adev)
-{
-       int ret;
-
-       ret = si_thermal_enable_alert(adev, false);
-       if (ret)
-               return ret;
-       ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
-       if (ret)
-               return ret;
-       ret = si_thermal_enable_alert(adev, true);
-       if (ret)
-               return ret;
-
-       return ret;
-}
-
 static void si_dpm_disable(struct amdgpu_device *adev)
 {
        struct rv7xx_power_info *pi = rv770_get_pi(adev);
@@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
 
 static int si_dpm_late_init(void *handle)
 {
-       int ret;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       if (!adev->pm.dpm_enabled)
-               return 0;
-
-       ret = si_set_temperature_range(adev);
-       if (ret)
-               return ret;
-#if 0 //TODO ?
-       si_dpm_powergate_uvd(adev, true);
-#endif
        return 0;
 }
 
index 5633c57..2ddf519 100644 (file)
@@ -733,6 +733,24 @@ static int smu_late_init(void *handle)
                return ret;
        }
 
+       /*
+        * Explicitly notify PMFW the power mode the system in. Since
+        * the PMFW may boot the ASIC with a different mode.
+        * For those supporting ACDC switch via gpio, PMFW will
+        * handle the switch automatically. Driver involvement
+        * is unnecessary.
+        */
+       if (!smu->dc_controlled_by_gpio) {
+               ret = smu_set_power_source(smu,
+                                          adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
+                                          SMU_POWER_SOURCE_DC);
+               if (ret) {
+                       dev_err(adev->dev, "Failed to switch to %s mode!\n",
+                               adev->pm.ac_power ? "AC" : "DC");
+                       return ret;
+               }
+       }
+
        if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
            (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
                return 0;
index c400051..275f708 100644 (file)
@@ -3413,26 +3413,8 @@ static int navi10_post_smu_init(struct smu_context *smu)
                return 0;
 
        ret = navi10_run_umc_cdr_workaround(smu);
-       if (ret) {
+       if (ret)
                dev_err(adev->dev, "Failed to apply umc cdr workaround!\n");
-               return ret;
-       }
-
-       if (!smu->dc_controlled_by_gpio) {
-               /*
-                * For Navi1X, manually switch it to AC mode as PMFW
-                * may boot it with DC mode.
-                */
-               ret = smu_v11_0_set_power_source(smu,
-                                                adev->pm.ac_power ?
-                                                SMU_POWER_SOURCE_AC :
-                                                SMU_POWER_SOURCE_DC);
-               if (ret) {
-                       dev_err(adev->dev, "Failed to switch to %s mode!\n",
-                                       adev->pm.ac_power ? "AC" : "DC");
-                       return ret;
-               }
-       }
 
        return ret;
 }
index 75f1868..85d5359 100644 (file)
@@ -2067,33 +2067,94 @@ static int sienna_cichlid_display_disable_memory_clock_switch(struct smu_context
        return ret;
 }
 
+static void sienna_cichlid_get_override_pcie_settings(struct smu_context *smu,
+                                                     uint32_t *gen_speed_override,
+                                                     uint32_t *lane_width_override)
+{
+       struct amdgpu_device *adev = smu->adev;
+
+       *gen_speed_override = 0xff;
+       *lane_width_override = 0xff;
+
+       switch (adev->pdev->device) {
+       case 0x73A0:
+       case 0x73A1:
+       case 0x73A2:
+       case 0x73A3:
+       case 0x73AB:
+       case 0x73AE:
+               /* Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 */
+               *lane_width_override = 6;
+               break;
+       case 0x73E0:
+       case 0x73E1:
+       case 0x73E3:
+               *lane_width_override = 4;
+               break;
+       case 0x7420:
+       case 0x7421:
+       case 0x7422:
+       case 0x7423:
+       case 0x7424:
+               *lane_width_override = 3;
+               break;
+       default:
+               break;
+       }
+}
+
+#define MAX(a, b)      ((a) > (b) ? (a) : (b))
+
 static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
                                         uint32_t pcie_gen_cap,
                                         uint32_t pcie_width_cap)
 {
        struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
-
-       uint32_t smu_pcie_arg;
+       struct smu_11_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table;
+       uint32_t gen_speed_override, lane_width_override;
        uint8_t *table_member1, *table_member2;
+       uint32_t min_gen_speed, max_gen_speed;
+       uint32_t min_lane_width, max_lane_width;
+       uint32_t smu_pcie_arg;
        int ret, i;
 
        GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1);
        GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2);
 
-       /* lclk dpm table setup */
-       for (i = 0; i < MAX_PCIE_CONF; i++) {
-               dpm_context->dpm_tables.pcie_table.pcie_gen[i] = table_member1[i];
-               dpm_context->dpm_tables.pcie_table.pcie_lane[i] = table_member2[i];
+       sienna_cichlid_get_override_pcie_settings(smu,
+                                                 &gen_speed_override,
+                                                 &lane_width_override);
+
+       /* PCIE gen speed override */
+       if (gen_speed_override != 0xff) {
+               min_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
+               max_gen_speed = MIN(pcie_gen_cap, gen_speed_override);
+       } else {
+               min_gen_speed = MAX(0, table_member1[0]);
+               max_gen_speed = MIN(pcie_gen_cap, table_member1[1]);
+               min_gen_speed = min_gen_speed > max_gen_speed ?
+                               max_gen_speed : min_gen_speed;
        }
+       pcie_table->pcie_gen[0] = min_gen_speed;
+       pcie_table->pcie_gen[1] = max_gen_speed;
+
+       /* PCIE lane width override */
+       if (lane_width_override != 0xff) {
+               min_lane_width = MIN(pcie_width_cap, lane_width_override);
+               max_lane_width = MIN(pcie_width_cap, lane_width_override);
+       } else {
+               min_lane_width = MAX(1, table_member2[0]);
+               max_lane_width = MIN(pcie_width_cap, table_member2[1]);
+               min_lane_width = min_lane_width > max_lane_width ?
+                                max_lane_width : min_lane_width;
+       }
+       pcie_table->pcie_lane[0] = min_lane_width;
+       pcie_table->pcie_lane[1] = max_lane_width;
 
        for (i = 0; i < NUM_LINK_LEVELS; i++) {
-               smu_pcie_arg = (i << 16) |
-                       ((table_member1[i] <= pcie_gen_cap) ?
-                        (table_member1[i] << 8) :
-                        (pcie_gen_cap << 8)) |
-                       ((table_member2[i] <= pcie_width_cap) ?
-                        table_member2[i] :
-                        pcie_width_cap);
+               smu_pcie_arg = (i << 16 |
+                               pcie_table->pcie_gen[i] << 8 |
+                               pcie_table->pcie_lane[i]);
 
                ret = smu_cmn_send_smc_msg_with_param(smu,
                                SMU_MSG_OverridePcieParameters,
@@ -2101,11 +2162,6 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu,
                                NULL);
                if (ret)
                        return ret;
-
-               if (table_member1[i] > pcie_gen_cap)
-                       dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap;
-               if (table_member2[i] > pcie_width_cap)
-                       dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap;
        }
 
        return 0;
index 7433dca..067b4e0 100644 (file)
@@ -582,7 +582,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
        DpmClocks_t *clk_table = smu->smu_table.clocks_table;
        SmuMetrics_legacy_t metrics;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
-       int i, size = 0, ret = 0;
+       int i, idx, size = 0, ret = 0;
        uint32_t cur_value = 0, value = 0, count = 0;
        bool cur_value_match_level = false;
 
@@ -656,7 +656,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
        case SMU_MCLK:
        case SMU_FCLK:
                for (i = 0; i < count; i++) {
-                       ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+                       idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+                       ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
                        if (ret)
                                return ret;
                        if (!value)
@@ -683,7 +684,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
        DpmClocks_t *clk_table = smu->smu_table.clocks_table;
        SmuMetrics_t metrics;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
-       int i, size = 0, ret = 0;
+       int i, idx, size = 0, ret = 0;
        uint32_t cur_value = 0, value = 0, count = 0;
        bool cur_value_match_level = false;
        uint32_t min, max;
@@ -765,7 +766,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
        case SMU_MCLK:
        case SMU_FCLK:
                for (i = 0; i < count; i++) {
-                       ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
+                       idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+                       ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
                        if (ret)
                                return ret;
                        if (!value)
index 5cdc071..8a8ba25 100644 (file)
@@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
 static int renoir_print_clk_levels(struct smu_context *smu,
                        enum smu_clk_type clk_type, char *buf)
 {
-       int i, size = 0, ret = 0;
+       int i, idx, size = 0, ret = 0;
        uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
        SmuMetrics_t metrics;
        struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
        case SMU_VCLK:
        case SMU_DCLK:
                for (i = 0; i < count; i++) {
-                       ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
+                       idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+                       ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value);
                        if (ret)
                                return ret;
                        if (!value)
index 393c6a7..ca37918 100644 (file)
@@ -573,11 +573,11 @@ int smu_v13_0_init_power(struct smu_context *smu)
        if (smu_power->power_context || smu_power->power_context_size != 0)
                return -EINVAL;
 
-       smu_power->power_context = kzalloc(sizeof(struct smu_13_0_dpm_context),
+       smu_power->power_context = kzalloc(sizeof(struct smu_13_0_power_context),
                                           GFP_KERNEL);
        if (!smu_power->power_context)
                return -ENOMEM;
-       smu_power->power_context_size = sizeof(struct smu_13_0_dpm_context);
+       smu_power->power_context_size = sizeof(struct smu_13_0_power_context);
 
        return 0;
 }
index 8fa9a36..6d9760e 100644 (file)
@@ -478,7 +478,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu,
 static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
                                        enum smu_clk_type clk_type, char *buf)
 {
-       int i, size = 0, ret = 0;
+       int i, idx, size = 0, ret = 0;
        uint32_t cur_value = 0, value = 0, count = 0;
        uint32_t min, max;
 
@@ -512,7 +512,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
                        break;
 
                for (i = 0; i < count; i++) {
-                       ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value);
+                       idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+                       ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value);
                        if (ret)
                                break;
 
index 6644596..0081fa6 100644 (file)
@@ -866,7 +866,7 @@ out:
 static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
                                enum smu_clk_type clk_type, char *buf)
 {
-       int i, size = 0, ret = 0;
+       int i, idx, size = 0, ret = 0;
        uint32_t cur_value = 0, value = 0, count = 0;
        uint32_t min = 0, max = 0;
 
@@ -898,7 +898,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
                        goto print_clk_out;
 
                for (i = 0; i < count; i++) {
-                       ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value);
+                       idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
+                       ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value);
                        if (ret)
                                goto print_clk_out;
 
index 3d9ff46..bba6216 100644 (file)
@@ -125,6 +125,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
        MSG_MAP(ArmD3,                          PPSMC_MSG_ArmD3,                       0),
        MSG_MAP(AllowGpo,                       PPSMC_MSG_SetGpoAllow,           0),
        MSG_MAP(GetPptLimit,                    PPSMC_MSG_GetPptLimit,                 0),
+       MSG_MAP(NotifyPowerSource,              PPSMC_MSG_NotifyPowerSource,           0),
 };
 
 static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -1770,6 +1771,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
        .get_power_limit = smu_v13_0_7_get_power_limit,
        .set_power_limit = smu_v13_0_set_power_limit,
+       .set_power_source = smu_v13_0_set_power_source,
        .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
        .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
        .set_tool_table_location = smu_v13_0_set_tool_table_location,
index 04e56b0..798f36c 100644 (file)
@@ -1000,7 +1000,7 @@ out:
 static int yellow_carp_print_clk_levels(struct smu_context *smu,
                                enum smu_clk_type clk_type, char *buf)
 {
-       int i, size = 0, ret = 0;
+       int i, idx, size = 0, ret = 0;
        uint32_t cur_value = 0, value = 0, count = 0;
        uint32_t min, max;
 
@@ -1033,7 +1033,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
                        goto print_clk_out;
 
                for (i = 0; i < count; i++) {
-                       ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value);
+                       idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
+                       ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value);
                        if (ret)
                                goto print_clk_out;
 
index fbb070f..6dc1a09 100644 (file)
@@ -119,53 +119,32 @@ err_astdp_edid_not_ready:
 /*
  * Launch Aspeed DP
  */
-void ast_dp_launch(struct drm_device *dev, u8 bPower)
+void ast_dp_launch(struct drm_device *dev)
 {
-       u32 i = 0, j = 0, WaitCount = 1;
-       u8 bDPTX = 0;
+       u32 i = 0;
        u8 bDPExecute = 1;
-
        struct ast_device *ast = to_ast_device(dev);
-       // S3 come back, need more time to wait BMC ready.
-       if (bPower)
-               WaitCount = 300;
-
-
-       // Wait total count by different condition.
-       for (j = 0; j < WaitCount; j++) {
-               bDPTX = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK);
-
-               if (bDPTX)
-                       break;
 
+       // Wait one second then timeout.
+       while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, ASTDP_MCU_FW_EXECUTING) !=
+               ASTDP_MCU_FW_EXECUTING) {
+               i++;
+               // wait 100 ms
                msleep(100);
-       }
 
-       // 0xE : ASTDP with DPMCU FW handling
-       if (bDPTX == ASTDP_DPMCU_TX) {
-               // Wait one second then timeout.
-               i = 0;
-
-               while (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, COPROCESSOR_LAUNCH) !=
-                       COPROCESSOR_LAUNCH) {
-                       i++;
-                       // wait 100 ms
-                       msleep(100);
-
-                       if (i >= 10) {
-                               // DP would not be ready.
-                               bDPExecute = 0;
-                               break;
-                       }
+               if (i >= 10) {
+                       // DP would not be ready.
+                       bDPExecute = 0;
+                       break;
                }
+       }
 
-               if (bDPExecute)
-                       ast->tx_chip_types |= BIT(AST_TX_ASTDP);
+       if (!bDPExecute)
+               drm_err(dev, "Wait DPMCU executing timeout\n");
 
-               ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
-                                                       (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
-                                                       ASTDP_HOST_EDID_READ_DONE);
-       }
+       ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5,
+                              (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK,
+                              ASTDP_HOST_EDID_READ_DONE);
 }
 
 
index a501169..5498a66 100644 (file)
@@ -350,9 +350,6 @@ int ast_mode_config_init(struct ast_device *ast);
 #define AST_DP501_LINKRATE     0xf014
 #define AST_DP501_EDID_DATA    0xf020
 
-/* Define for Soc scratched reg */
-#define COPROCESSOR_LAUNCH                     BIT(5)
-
 /*
  * Display Transmitter Type:
  */
@@ -480,7 +477,7 @@ struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev);
 
 /* aspeed DP */
 int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata);
-void ast_dp_launch(struct drm_device *dev, u8 bPower);
+void ast_dp_launch(struct drm_device *dev);
 void ast_dp_power_on_off(struct drm_device *dev, bool no);
 void ast_dp_set_on_off(struct drm_device *dev, bool no);
 void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode);
index f32ce29..1f35438 100644 (file)
@@ -254,8 +254,13 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
                case 0x0c:
                        ast->tx_chip_types = AST_TX_DP501_BIT;
                }
-       } else if (ast->chip == AST2600)
-               ast_dp_launch(&ast->base, 0);
+       } else if (ast->chip == AST2600) {
+               if (ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xD1, TX_TYPE_MASK) ==
+                   ASTDP_DPMCU_TX) {
+                       ast->tx_chip_types = AST_TX_ASTDP_BIT;
+                       ast_dp_launch(&ast->base);
+               }
+       }
 
        /* Print stuff for diagnostic purposes */
        if (ast->tx_chip_types & AST_TX_NONE_BIT)
@@ -264,6 +269,8 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
                drm_info(dev, "Using Sil164 TMDS transmitter\n");
        if (ast->tx_chip_types & AST_TX_DP501_BIT)
                drm_info(dev, "Using DP501 DisplayPort transmitter\n");
+       if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
+               drm_info(dev, "Using ASPEED DisplayPort transmitter\n");
 
        return 0;
 }
index 3637482..b3c670a 100644 (file)
@@ -1647,6 +1647,8 @@ static int ast_dp501_output_init(struct ast_device *ast)
 static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
 {
        void *edid;
+       struct drm_device *dev = connector->dev;
+       struct ast_device *ast = to_ast_device(dev);
 
        int succ;
        int count;
@@ -1655,9 +1657,17 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
        if (!edid)
                goto err_drm_connector_update_edid_property;
 
+       /*
+        * Protect access to I/O registers from concurrent modesetting
+        * by acquiring the I/O-register lock.
+        */
+       mutex_lock(&ast->ioregs_lock);
+
        succ = ast_astdp_read_edid(connector->dev, edid);
        if (succ < 0)
-               goto err_kfree;
+               goto err_mutex_unlock;
+
+       mutex_unlock(&ast->ioregs_lock);
 
        drm_connector_update_edid_property(connector, edid);
        count = drm_add_edid_modes(connector, edid);
@@ -1665,7 +1675,8 @@ static int ast_astdp_connector_helper_get_modes(struct drm_connector *connector)
 
        return count;
 
-err_kfree:
+err_mutex_unlock:
+       mutex_unlock(&ast->ioregs_lock);
        kfree(edid);
 err_drm_connector_update_edid_property:
        drm_connector_update_edid_property(connector, NULL);
index 71bb36b..a005aec 100644 (file)
@@ -380,7 +380,8 @@ void ast_post_gpu(struct drm_device *dev)
        ast_set_def_ext_reg(dev);
 
        if (ast->chip == AST2600) {
-               ast_dp_launch(dev, 1);
+               if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
+                       ast_dp_launch(dev);
        } else if (ast->config_mode == ast_use_p2a) {
                if (ast->chip == AST2500)
                        ast_post_chip_2500(dev);
index 6bb1b8b..fd27f19 100644 (file)
@@ -1545,17 +1545,19 @@ static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var,
        }
 }
 
-static void __fill_var(struct fb_var_screeninfo *var,
+static void __fill_var(struct fb_var_screeninfo *var, struct fb_info *info,
                       struct drm_framebuffer *fb)
 {
        int i;
 
        var->xres_virtual = fb->width;
        var->yres_virtual = fb->height;
-       var->accel_flags = FB_ACCELF_TEXT;
+       var->accel_flags = 0;
        var->bits_per_pixel = drm_format_info_bpp(fb->format, 0);
 
-       var->height = var->width = 0;
+       var->height = info->var.height;
+       var->width = info->var.width;
+
        var->left_margin = var->right_margin = 0;
        var->upper_margin = var->lower_margin = 0;
        var->hsync_len = var->vsync_len = 0;
@@ -1618,7 +1620,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
                return -EINVAL;
        }
 
-       __fill_var(var, fb);
+       __fill_var(var, info, fb);
 
        /*
         * fb_pan_display() validates this, but fb_set_par() doesn't and just
@@ -2074,7 +2076,7 @@ static void drm_fb_helper_fill_var(struct fb_info *info,
        info->pseudo_palette = fb_helper->pseudo_palette;
        info->var.xoffset = 0;
        info->var.yoffset = 0;
-       __fill_var(&info->var, fb);
+       __fill_var(&info->var, info, fb);
        info->var.activate = FB_ACTIVATE_NOW;
 
        drm_fb_helper_fill_pixel_fmt(&info->var, format);
index 4cf214d..c21c3f6 100644 (file)
@@ -264,28 +264,10 @@ void drmm_kfree(struct drm_device *dev, void *data)
 }
 EXPORT_SYMBOL(drmm_kfree);
 
-static void drmm_mutex_release(struct drm_device *dev, void *res)
+void __drmm_mutex_release(struct drm_device *dev, void *res)
 {
        struct mutex *lock = res;
 
        mutex_destroy(lock);
 }
-
-/**
- * drmm_mutex_init - &drm_device-managed mutex_init()
- * @dev: DRM device
- * @lock: lock to be initialized
- *
- * Returns:
- * 0 on success, or a negative errno code otherwise.
- *
- * This is a &drm_device-managed version of mutex_init(). The initialized
- * lock is automatically destroyed on the final drm_dev_put().
- */
-int drmm_mutex_init(struct drm_device *dev, struct mutex *lock)
-{
-       mutex_init(lock);
-
-       return drmm_add_action_or_reset(dev, drmm_mutex_release, lock);
-}
-EXPORT_SYMBOL(drmm_mutex_init);
+EXPORT_SYMBOL(__drmm_mutex_release);
index b1a38e6..0cb646c 100644 (file)
@@ -179,7 +179,7 @@ static const struct dmi_system_id orientation_data[] = {
        }, {    /* AYA NEO AIR */
                .matches = {
                  DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
-                 DMI_MATCH(DMI_BOARD_NAME, "AIR"),
+                 DMI_MATCH(DMI_PRODUCT_NAME, "AIR"),
                },
                .driver_data = (void *)&lcd1080x1920_leftside_up,
        }, {    /* AYA NEO NEXT */
index ec784e5..414e585 100644 (file)
@@ -1335,7 +1335,7 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
        /* Let the runqueue know that there is work to do. */
        queue_work(g2d->g2d_workq, &g2d->runqueue_work);
 
-       if (runqueue_node->async)
+       if (req->async)
                goto out;
 
        wait_for_completion(&runqueue_node->complete);
index 74ea3c2..1a5ae78 100644 (file)
@@ -34,11 +34,11 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
        return -ENODEV;
 }
 
-int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
+static inline int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
 {
        return 0;
 }
 
-void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
+static inline void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
 { }
 #endif
index 4d56c8c..f5e1adf 100644 (file)
@@ -469,8 +469,6 @@ static int vidi_remove(struct platform_device *pdev)
        if (ctx->raw_edid != (struct edid *)fake_edid_info) {
                kfree(ctx->raw_edid);
                ctx->raw_edid = NULL;
-
-               return -EINVAL;
        }
 
        component_del(&pdev->dev, &vidi_component_ops);
index 084a483..2aaaba0 100644 (file)
@@ -1453,6 +1453,18 @@ static u8 tgl_calc_voltage_level(int cdclk)
                return 0;
 }
 
+static u8 rplu_calc_voltage_level(int cdclk)
+{
+       if (cdclk > 556800)
+               return 3;
+       else if (cdclk > 480000)
+               return 2;
+       else if (cdclk > 312000)
+               return 1;
+       else
+               return 0;
+}
+
 static void icl_readout_refclk(struct drm_i915_private *dev_priv,
                               struct intel_cdclk_config *cdclk_config)
 {
@@ -3242,6 +3254,13 @@ static const struct intel_cdclk_funcs mtl_cdclk_funcs = {
        .calc_voltage_level = tgl_calc_voltage_level,
 };
 
+static const struct intel_cdclk_funcs rplu_cdclk_funcs = {
+       .get_cdclk = bxt_get_cdclk,
+       .set_cdclk = bxt_set_cdclk,
+       .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+       .calc_voltage_level = rplu_calc_voltage_level,
+};
+
 static const struct intel_cdclk_funcs tgl_cdclk_funcs = {
        .get_cdclk = bxt_get_cdclk,
        .set_cdclk = bxt_set_cdclk,
@@ -3384,14 +3403,17 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
                dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
                dev_priv->display.cdclk.table = dg2_cdclk_table;
        } else if (IS_ALDERLAKE_P(dev_priv)) {
-               dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
                /* Wa_22011320316:adl-p[a0] */
-               if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+               if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
                        dev_priv->display.cdclk.table = adlp_a_step_cdclk_table;
-               else if (IS_ADLP_RPLU(dev_priv))
+                       dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+               } else if (IS_ADLP_RPLU(dev_priv)) {
                        dev_priv->display.cdclk.table = rplu_cdclk_table;
-               else
+                       dev_priv->display.funcs.cdclk = &rplu_cdclk_funcs;
+               } else {
                        dev_priv->display.cdclk.table = adlp_cdclk_table;
+                       dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
+               }
        } else if (IS_ROCKETLAKE(dev_priv)) {
                dev_priv->display.funcs.cdclk = &tgl_cdclk_funcs;
                dev_priv->display.cdclk.table = rkl_cdclk_table;
index 3c29792..0aae9a1 100644 (file)
@@ -1851,9 +1851,17 @@ static void hsw_crtc_disable(struct intel_atomic_state *state,
 
        intel_disable_shared_dpll(old_crtc_state);
 
-       intel_encoders_post_pll_disable(state, crtc);
+       if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) {
+               struct intel_crtc *slave_crtc;
+
+               intel_encoders_post_pll_disable(state, crtc);
 
-       intel_dmc_disable_pipe(i915, crtc->pipe);
+               intel_dmc_disable_pipe(i915, crtc->pipe);
+
+               for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc,
+                                                intel_crtc_bigjoiner_slave_pipes(old_crtc_state))
+                       intel_dmc_disable_pipe(i915, slave_crtc->pipe);
+       }
 }
 
 static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
index 705915d..524bd6d 100644 (file)
@@ -129,7 +129,7 @@ static int intel_dp_aux_sync_len(void)
 
 static int intel_dp_aux_fw_sync_len(void)
 {
-       int precharge = 16; /* 10-16 */
+       int precharge = 10; /* 10-16 */
        int preamble = 8;
 
        return precharge + preamble;
index 650232c..b183efa 100644 (file)
@@ -204,8 +204,6 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
        struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
-       struct intel_gt *gt = dev_priv->media_gt;
-       struct intel_gsc_uc *gsc = &gt->uc.gsc;
        bool capable = false;
 
        /* I915 support for HDCP2.2 */
@@ -213,9 +211,13 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
                return false;
 
        /* If MTL+ make sure gsc is loaded and proxy is setup */
-       if (intel_hdcp_gsc_cs_required(dev_priv))
-               if (!intel_uc_fw_is_running(&gsc->fw))
+       if (intel_hdcp_gsc_cs_required(dev_priv)) {
+               struct intel_gt *gt = dev_priv->media_gt;
+               struct intel_gsc_uc *gsc = gt ? &gt->uc.gsc : NULL;
+
+               if (!gsc || !intel_uc_fw_is_running(&gsc->fw))
                        return false;
+       }
 
        /* MEI/GSC interface is solid depending on which is used */
        mutex_lock(&dev_priv->display.hdcp.comp_mutex);
index a81fa6a..7b516b1 100644 (file)
@@ -346,8 +346,10 @@ static int live_parallel_switch(void *arg)
                                continue;
 
                        ce = intel_context_create(data[m].ce[0]->engine);
-                       if (IS_ERR(ce))
+                       if (IS_ERR(ce)) {
+                               err = PTR_ERR(ce);
                                goto out;
+                       }
 
                        err = intel_context_pin(ce);
                        if (err) {
@@ -367,8 +369,10 @@ static int live_parallel_switch(void *arg)
 
                worker = kthread_create_worker(0, "igt/parallel:%s",
                                               data[n].ce[0]->engine->name);
-               if (IS_ERR(worker))
+               if (IS_ERR(worker)) {
+                       err = PTR_ERR(worker);
                        goto out;
+               }
 
                data[n].worker = worker;
        }
@@ -397,8 +401,10 @@ static int live_parallel_switch(void *arg)
                        }
                }
 
-               if (igt_live_test_end(&t))
-                       err = -EIO;
+               if (igt_live_test_end(&t)) {
+                       err = err ?: -EIO;
+                       break;
+               }
        }
 
 out:
index 736b89a..4202df5 100644 (file)
@@ -1530,8 +1530,8 @@ static int live_busywait_preempt(void *arg)
        struct drm_i915_gem_object *obj;
        struct i915_vma *vma;
        enum intel_engine_id id;
-       int err = -ENOMEM;
        u32 *map;
+       int err;
 
        /*
         * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
@@ -1539,13 +1539,17 @@ static int live_busywait_preempt(void *arg)
         */
 
        ctx_hi = kernel_context(gt->i915, NULL);
-       if (!ctx_hi)
-               return -ENOMEM;
+       if (IS_ERR(ctx_hi))
+               return PTR_ERR(ctx_hi);
+
        ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
 
        ctx_lo = kernel_context(gt->i915, NULL);
-       if (!ctx_lo)
+       if (IS_ERR(ctx_lo)) {
+               err = PTR_ERR(ctx_lo);
                goto err_ctx_hi;
+       }
+
        ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
 
        obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
index 050b8ae..3035cba 100644 (file)
@@ -877,12 +877,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
                        stream->oa_buffer.last_ctx_id = ctx_id;
                }
 
-               /*
-                * Clear out the report id and timestamp as a means to detect unlanded
-                * reports.
-                */
-               oa_report_id_clear(stream, report32);
-               oa_timestamp_clear(stream, report32);
+               if (is_power_of_2(report_size)) {
+                       /*
+                        * Clear out the report id and timestamp as a means
+                        * to detect unlanded reports.
+                        */
+                       oa_report_id_clear(stream, report32);
+                       oa_timestamp_clear(stream, report32);
+               } else {
+                       /* Zero out the entire report */
+                       memset(report32, 0, report_size);
+               }
        }
 
        if (start_offset != *offset) {
index ff00340..ffd91a5 100644 (file)
@@ -165,7 +165,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
 void lima_sched_context_fini(struct lima_sched_pipe *pipe,
                             struct lima_sched_context *context)
 {
-       drm_sched_entity_fini(&context->base);
+       drm_sched_entity_destroy(&context->base);
 }
 
 struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
index 0f2dd26..af3ce5a 100644 (file)
@@ -642,6 +642,11 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
        if (funcs->pixpllc_atomic_update)
                funcs->pixpllc_atomic_update(crtc, old_state);
 
+       if (crtc_state->gamma_lut)
+               mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+       else
+               mgag200_crtc_set_gamma_linear(mdev, format);
+
        mgag200_enable_display(mdev);
 
        if (funcs->enable_vidrst)
index e16b4b3..8914992 100644 (file)
@@ -1526,8 +1526,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
        if (!pdev)
                return -ENODEV;
 
-       mutex_init(&gmu->lock);
-
        gmu->dev = &pdev->dev;
 
        of_dma_configure(gmu->dev, node, true);
index 9fb214f..52da379 100644 (file)
@@ -1981,6 +1981,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
        adreno_gpu = &a6xx_gpu->base;
        gpu = &adreno_gpu->base;
 
+       mutex_init(&a6xx_gpu->gmu.lock);
+
        adreno_gpu->registers = NULL;
 
        /*
index 2b3ae84..bdcd554 100644 (file)
@@ -98,17 +98,17 @@ static const struct dpu_sspp_cfg msm8998_sspp[] = {
 
 static const struct dpu_lm_cfg msm8998_lm[] = {
        LM_BLK("lm_0", LM_0, 0x44000, MIXER_MSM8998_MASK,
-               &msm8998_lm_sblk, PINGPONG_0, LM_2, DSPP_0),
+               &msm8998_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
        LM_BLK("lm_1", LM_1, 0x45000, MIXER_MSM8998_MASK,
-               &msm8998_lm_sblk, PINGPONG_1, LM_5, DSPP_1),
+               &msm8998_lm_sblk, PINGPONG_1, LM_0, DSPP_1),
        LM_BLK("lm_2", LM_2, 0x46000, MIXER_MSM8998_MASK,
-               &msm8998_lm_sblk, PINGPONG_2, LM_0, 0),
+               &msm8998_lm_sblk, PINGPONG_2, LM_5, 0),
        LM_BLK("lm_3", LM_3, 0x47000, MIXER_MSM8998_MASK,
                &msm8998_lm_sblk, PINGPONG_MAX, 0, 0),
        LM_BLK("lm_4", LM_4, 0x48000, MIXER_MSM8998_MASK,
                &msm8998_lm_sblk, PINGPONG_MAX, 0, 0),
        LM_BLK("lm_5", LM_5, 0x49000, MIXER_MSM8998_MASK,
-               &msm8998_lm_sblk, PINGPONG_3, LM_1, 0),
+               &msm8998_lm_sblk, PINGPONG_3, LM_2, 0),
 };
 
 static const struct dpu_pingpong_cfg msm8998_pp[] = {
@@ -134,10 +134,10 @@ static const struct dpu_dspp_cfg msm8998_dspp[] = {
 };
 
 static const struct dpu_intf_cfg msm8998_intf[] = {
-       INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
-       INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
-       INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
-       INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_HDMI, 0, 25, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
+       INTF_BLK("intf_0", INTF_0, 0x6a000, 0x280, INTF_DP, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+       INTF_BLK("intf_1", INTF_1, 0x6a800, 0x280, INTF_DSI, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
+       INTF_BLK("intf_2", INTF_2, 0x6b000, 0x280, INTF_DSI, 1, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 28, 29),
+       INTF_BLK("intf_3", INTF_3, 0x6b800, 0x280, INTF_HDMI, 0, 21, INTF_SDM845_MASK, MDP_SSPP_TOP0_INTR, 30, 31),
 };
 
 static const struct dpu_perf_cfg msm8998_perf_data = {
index 282d410..42b0e58 100644 (file)
@@ -128,10 +128,10 @@ static const struct dpu_dspp_cfg sm8150_dspp[] = {
 };
 
 static const struct dpu_pingpong_cfg sm8150_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
        PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
index c574002..e3bdfe7 100644 (file)
@@ -116,10 +116,10 @@ static const struct dpu_lm_cfg sc8180x_lm[] = {
 };
 
 static const struct dpu_pingpong_cfg sc8180x_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
        PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
index 2c40229..ed13058 100644 (file)
@@ -129,10 +129,10 @@ static const struct dpu_dspp_cfg sm8250_dspp[] = {
 };
 
 static const struct dpu_pingpong_cfg sm8250_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK("pingpong_0", PINGPONG_0, 0x70000, MERGE_3D_0, sdm845_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK("pingpong_1", PINGPONG_1, 0x70800, MERGE_3D_0, sdm845_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
        PP_BLK("pingpong_2", PINGPONG_2, 0x71000, MERGE_3D_1, sdm845_pp_sblk,
index 8799ed7..a46b117 100644 (file)
@@ -80,8 +80,8 @@ static const struct dpu_dspp_cfg sc7180_dspp[] = {
 };
 
 static const struct dpu_pingpong_cfg sc7180_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk_te, -1, -1),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk_te, -1, -1),
+       PP_BLK("pingpong_0", PINGPONG_0, 0x70000, 0, sdm845_pp_sblk, -1, -1),
+       PP_BLK("pingpong_1", PINGPONG_1, 0x70800, 0, sdm845_pp_sblk, -1, -1),
 };
 
 static const struct dpu_intf_cfg sc7180_intf[] = {
index 6f04d8f..988d820 100644 (file)
@@ -122,7 +122,6 @@ const struct dpu_mdss_cfg dpu_sm6115_cfg = {
        .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
                     BIT(MDP_SSPP_TOP0_INTR2) | \
                     BIT(MDP_SSPP_TOP0_HIST_INTR) | \
-                    BIT(MDP_INTF0_INTR) | \
                     BIT(MDP_INTF1_INTR),
 };
 
index 303492d..c9003dc 100644 (file)
@@ -112,7 +112,6 @@ const struct dpu_mdss_cfg dpu_qcm2290_cfg = {
        .mdss_irqs = BIT(MDP_SSPP_TOP0_INTR) | \
                     BIT(MDP_SSPP_TOP0_INTR2) | \
                     BIT(MDP_SSPP_TOP0_HIST_INTR) | \
-                    BIT(MDP_INTF0_INTR) | \
                     BIT(MDP_INTF1_INTR),
 };
 
index ca107ca..4f6a965 100644 (file)
@@ -127,22 +127,22 @@ static const struct dpu_dspp_cfg sm8350_dspp[] = {
 };
 
 static const struct dpu_pingpong_cfg sm8350_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
-       PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
-       PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
-       PP_BLK("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
                        -1),
-       PP_BLK("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
                        -1),
 };
index 5957de1..6b2c7ea 100644 (file)
@@ -87,10 +87,10 @@ static const struct dpu_dspp_cfg sc7280_dspp[] = {
 };
 
 static const struct dpu_pingpong_cfg sc7280_pp[] = {
-       PP_BLK("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
-       PP_BLK("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
-       PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
-       PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
+       PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, 0, sc7280_pp_sblk, -1, -1),
+       PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, 0, sc7280_pp_sblk, -1, -1),
+       PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, 0, sc7280_pp_sblk, -1, -1),
+       PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, 0, sc7280_pp_sblk, -1, -1),
 };
 
 static const struct dpu_intf_cfg sc7280_intf[] = {
index 9aab110..706d0f1 100644 (file)
@@ -121,18 +121,18 @@ static const struct dpu_dspp_cfg sc8280xp_dspp[] = {
 };
 
 static const struct dpu_pingpong_cfg sc8280xp_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te,
-                 DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te,
-                 DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1),
-       PP_BLK_TE("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk_te,
-                 DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1),
-       PP_BLK_TE("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk_te,
-                 DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1),
-       PP_BLK_TE("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk_te,
-                 DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1),
-       PP_BLK_TE("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk_te,
-                 DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1),
+       PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8), -1),
+       PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9), -1),
+       PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10), -1),
+       PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11), -1),
+       PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30), -1),
+       PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
+                       DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31), -1),
 };
 
 static const struct dpu_merge_3d_cfg sc8280xp_merge_3d[] = {
index 02a259b..4ecb3df 100644 (file)
@@ -128,28 +128,28 @@ static const struct dpu_dspp_cfg sm8450_dspp[] = {
 };
 /* FIXME: interrupts */
 static const struct dpu_pingpong_cfg sm8450_pp[] = {
-       PP_BLK_TE("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 12)),
-       PP_BLK_TE("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sdm845_pp_sblk_te,
+       PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 13)),
-       PP_BLK("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 14)),
-       PP_BLK("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 15)),
-       PP_BLK("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
                        -1),
-       PP_BLK("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
                        -1),
-       PP_BLK("pingpong_6", PINGPONG_6, 0x65800, MERGE_3D_3, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_6", PINGPONG_6, 0x65800, MERGE_3D_3, sc7280_pp_sblk,
                        -1,
                        -1),
-       PP_BLK("pingpong_7", PINGPONG_7, 0x65c00, MERGE_3D_3, sdm845_pp_sblk,
+       PP_BLK_DITHER("pingpong_7", PINGPONG_7, 0x65c00, MERGE_3D_3, sc7280_pp_sblk,
                        -1,
                        -1),
 };
index 9e40303..d0ab351 100644 (file)
@@ -132,28 +132,28 @@ static const struct dpu_dspp_cfg sm8550_dspp[] = {
                 &sm8150_dspp_sblk),
 };
 static const struct dpu_pingpong_cfg sm8550_pp[] = {
-       PP_BLK_DIPHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_0", PINGPONG_0, 0x69000, MERGE_3D_0, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 8),
                        -1),
-       PP_BLK_DIPHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_1", PINGPONG_1, 0x6a000, MERGE_3D_0, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 9),
                        -1),
-       PP_BLK_DIPHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_2", PINGPONG_2, 0x6b000, MERGE_3D_1, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 10),
                        -1),
-       PP_BLK_DIPHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_3", PINGPONG_3, 0x6c000, MERGE_3D_1, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 11),
                        -1),
-       PP_BLK_DIPHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_4", PINGPONG_4, 0x6d000, MERGE_3D_2, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 30),
                        -1),
-       PP_BLK_DIPHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_5", PINGPONG_5, 0x6e000, MERGE_3D_2, sc7280_pp_sblk,
                        DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR2, 31),
                        -1),
-       PP_BLK_DIPHER("pingpong_6", PINGPONG_6, 0x66000, MERGE_3D_3, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_6", PINGPONG_6, 0x66000, MERGE_3D_3, sc7280_pp_sblk,
                        -1,
                        -1),
-       PP_BLK_DIPHER("pingpong_7", PINGPONG_7, 0x66400, MERGE_3D_3, sc7280_pp_sblk,
+       PP_BLK_DITHER("pingpong_7", PINGPONG_7, 0x66400, MERGE_3D_3, sc7280_pp_sblk,
                        -1,
                        -1),
 };
index 03f162a..5d994bc 100644 (file)
@@ -491,7 +491,7 @@ static const struct dpu_pingpong_sub_blks sc7280_pp_sblk = {
        .len = 0x20, .version = 0x20000},
 };
 
-#define PP_BLK_DIPHER(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
+#define PP_BLK_DITHER(_name, _id, _base, _merge_3d, _sblk, _done, _rdptr) \
        {\
        .name = _name, .id = _id, \
        .base = _base, .len = 0, \
@@ -587,12 +587,12 @@ static const u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
 
 static const struct dpu_vbif_dynamic_ot_cfg msm8998_ot_rdwr_cfg[] = {
        {
-               .pps = 1088 * 1920 * 30,
+               .pps = 1920 * 1080 * 30,
                .ot_limit = 2,
        },
        {
-               .pps = 1088 * 1920 * 60,
-               .ot_limit = 6,
+               .pps = 1920 * 1080 * 60,
+               .ot_limit = 4,
        },
        {
                .pps = 3840 * 2160 * 30,
@@ -705,10 +705,7 @@ static const struct dpu_qos_lut_entry msm8998_qos_linear[] = {
        {.fl = 10, .lut = 0x1555b},
        {.fl = 11, .lut = 0x5555b},
        {.fl = 12, .lut = 0x15555b},
-       {.fl = 13, .lut = 0x55555b},
-       {.fl = 14, .lut = 0},
-       {.fl = 1,  .lut = 0x1b},
-       {.fl = 0,  .lut = 0}
+       {.fl = 0,  .lut = 0x55555b}
 };
 
 static const struct dpu_qos_lut_entry sdm845_qos_linear[] = {
@@ -730,9 +727,7 @@ static const struct dpu_qos_lut_entry msm8998_qos_macrotile[] = {
        {.fl = 10, .lut = 0x1aaff},
        {.fl = 11, .lut = 0x5aaff},
        {.fl = 12, .lut = 0x15aaff},
-       {.fl = 13, .lut = 0x55aaff},
-       {.fl = 1,  .lut = 0x1aaff},
-       {.fl = 0,  .lut = 0},
+       {.fl = 0,  .lut = 0x55aaff},
 };
 
 static const struct dpu_qos_lut_entry sc7180_qos_linear[] = {
index 53326f2..17f3e7e 100644 (file)
@@ -15,7 +15,7 @@
 
 /*
  * Register offsets in MDSS register file for the interrupt registers
- * w.r.t. to the MDP base
+ * w.r.t. the MDP base
  */
 #define MDP_SSPP_TOP0_OFF              0x0
 #define MDP_INTF_0_OFF                 0x6A000
 #define MDP_INTF_3_OFF                 0x6B800
 #define MDP_INTF_4_OFF                 0x6C000
 #define MDP_INTF_5_OFF                 0x6C800
+#define INTF_INTR_EN                   0x1c0
+#define INTF_INTR_STATUS               0x1c4
+#define INTF_INTR_CLEAR                        0x1c8
 #define MDP_AD4_0_OFF                  0x7C000
 #define MDP_AD4_1_OFF                  0x7D000
 #define MDP_AD4_INTR_EN_OFF            0x41c
 #define MDP_AD4_INTR_CLEAR_OFF         0x424
 #define MDP_AD4_INTR_STATUS_OFF                0x420
-#define MDP_INTF_0_OFF_REV_7xxx             0x34000
-#define MDP_INTF_1_OFF_REV_7xxx             0x35000
-#define MDP_INTF_2_OFF_REV_7xxx             0x36000
-#define MDP_INTF_3_OFF_REV_7xxx             0x37000
-#define MDP_INTF_4_OFF_REV_7xxx             0x38000
-#define MDP_INTF_5_OFF_REV_7xxx             0x39000
-#define MDP_INTF_6_OFF_REV_7xxx             0x3a000
-#define MDP_INTF_7_OFF_REV_7xxx             0x3b000
-#define MDP_INTF_8_OFF_REV_7xxx             0x3c000
+#define MDP_INTF_0_OFF_REV_7xxx                0x34000
+#define MDP_INTF_1_OFF_REV_7xxx                0x35000
+#define MDP_INTF_2_OFF_REV_7xxx                0x36000
+#define MDP_INTF_3_OFF_REV_7xxx                0x37000
+#define MDP_INTF_4_OFF_REV_7xxx                0x38000
+#define MDP_INTF_5_OFF_REV_7xxx                0x39000
+#define MDP_INTF_6_OFF_REV_7xxx                0x3a000
+#define MDP_INTF_7_OFF_REV_7xxx                0x3b000
+#define MDP_INTF_8_OFF_REV_7xxx                0x3c000
 
 /**
  * struct dpu_intr_reg - array of DPU register sets
index 84ee2ef..b9dddf5 100644 (file)
 #define   INTF_TPG_RGB_MAPPING          0x11C
 #define   INTF_PROG_FETCH_START         0x170
 #define   INTF_PROG_ROT_START           0x174
-
-#define   INTF_FRAME_LINE_COUNT_EN      0x0A8
-#define   INTF_FRAME_COUNT              0x0AC
-#define   INTF_LINE_COUNT               0x0B0
-
 #define   INTF_MUX                      0x25C
 #define   INTF_STATUS                   0x26C
 
index 2d28afd..a3e413d 100644 (file)
@@ -61,6 +61,7 @@ static const struct dpu_wb_cfg *_wb_offset(enum dpu_wb wb,
        for (i = 0; i < m->wb_count; i++) {
                if (wb == m->wb[i].id) {
                        b->blk_addr = addr + m->wb[i].base;
+                       b->log_mask = DPU_DBG_MASK_WB;
                        return &m->wb[i];
                }
        }
index feb9a72..5acd568 100644 (file)
@@ -21,9 +21,6 @@
 #define HIST_INTR_EN                    0x01c
 #define HIST_INTR_STATUS                0x020
 #define HIST_INTR_CLEAR                 0x024
-#define INTF_INTR_EN                    0x1C0
-#define INTF_INTR_STATUS                0x1C4
-#define INTF_INTR_CLEAR                 0x1C8
 #define SPLIT_DISPLAY_EN                0x2F4
 #define SPLIT_DISPLAY_UPPER_PIPE_CTRL   0x2F8
 #define DSPP_IGC_COLOR0_RAM_LUTN        0x300
index 6666783..1245c7a 100644 (file)
@@ -593,6 +593,18 @@ static struct hdmi_codec_pdata codec_data = {
        .i2s = 1,
 };
 
+void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio)
+{
+       struct dp_audio_private *audio_priv;
+
+       audio_priv = container_of(dp_audio, struct dp_audio_private, dp_audio);
+
+       if (audio_priv->audio_pdev) {
+               platform_device_unregister(audio_priv->audio_pdev);
+               audio_priv->audio_pdev = NULL;
+       }
+}
+
 int dp_register_audio_driver(struct device *dev,
                struct dp_audio *dp_audio)
 {
index 84e5f4a..4ab7888 100644 (file)
@@ -53,6 +53,8 @@ struct dp_audio *dp_audio_get(struct platform_device *pdev,
 int dp_register_audio_driver(struct device *dev,
                struct dp_audio *dp_audio);
 
+void dp_unregister_audio_driver(struct device *dev, struct dp_audio *dp_audio);
+
 /**
  * dp_audio_put()
  *
index 7a8cf1c..5142aeb 100644 (file)
@@ -620,7 +620,7 @@ void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
                                config & DP_DP_HPD_INT_MASK);
 }
 
-void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
+void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog)
 {
        struct dp_catalog_private *catalog = container_of(dp_catalog,
                                struct dp_catalog_private, dp_catalog);
@@ -635,6 +635,19 @@ void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
        dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
 }
 
+void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
+
+       reftimer &= ~DP_DP_HPD_REFTIMER_ENABLE;
+       dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
+
+       dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, 0);
+}
+
 static void dp_catalog_enable_sdp(struct dp_catalog_private *catalog)
 {
        /* trigger sdp */
index 82376a2..38786e8 100644 (file)
@@ -104,7 +104,8 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog);
 void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
 void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
                        u32 intr_mask, bool en);
-void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_hpd_enable(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_hpd_disable(struct dp_catalog *dp_catalog);
 void dp_catalog_ctrl_config_psr(struct dp_catalog *dp_catalog);
 void dp_catalog_ctrl_set_psr(struct dp_catalog *dp_catalog, bool enter);
 u32 dp_catalog_link_is_connected(struct dp_catalog *dp_catalog);
index 3e13acd..03b0eda 100644 (file)
 #include "dp_audio.h"
 #include "dp_debug.h"
 
+static bool psr_enabled = false;
+module_param(psr_enabled, bool, 0);
+MODULE_PARM_DESC(psr_enabled, "enable PSR for eDP and DP displays");
+
 #define HPD_STRING_SIZE 30
 
 enum {
@@ -326,6 +330,7 @@ static void dp_display_unbind(struct device *dev, struct device *master,
        kthread_stop(dp->ev_tsk);
 
        dp_power_client_deinit(dp->power);
+       dp_unregister_audio_driver(dev, dp->audio);
        dp_aux_unregister(dp->aux);
        dp->drm_dev = NULL;
        dp->aux->drm_dev = NULL;
@@ -406,7 +411,7 @@ static int dp_display_process_hpd_high(struct dp_display_private *dp)
 
        edid = dp->panel->edid;
 
-       dp->dp_display.psr_supported = dp->panel->psr_cap.version;
+       dp->dp_display.psr_supported = dp->panel->psr_cap.version && psr_enabled;
 
        dp->audio_supported = drm_detect_monitor_audio(edid);
        dp_panel_handle_sink_request(dp->panel);
@@ -615,12 +620,6 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
                dp->hpd_state = ST_MAINLINK_READY;
        }
 
-       /* enable HDP irq_hpd/replug interrupt */
-       if (dp->dp_display.internal_hpd)
-               dp_catalog_hpd_config_intr(dp->catalog,
-                                          DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK,
-                                          true);
-
        drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
                        dp->dp_display.connector_type, state);
        mutex_unlock(&dp->event_mutex);
@@ -658,12 +657,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
        drm_dbg_dp(dp->drm_dev, "Before, type=%d hpd_state=%d\n",
                        dp->dp_display.connector_type, state);
 
-       /* disable irq_hpd/replug interrupts */
-       if (dp->dp_display.internal_hpd)
-               dp_catalog_hpd_config_intr(dp->catalog,
-                                          DP_DP_IRQ_HPD_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK,
-                                          false);
-
        /* unplugged, no more irq_hpd handle */
        dp_del_event(dp, EV_IRQ_HPD_INT);
 
@@ -687,10 +680,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
                return 0;
        }
 
-       /* disable HPD plug interrupts */
-       if (dp->dp_display.internal_hpd)
-               dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
-
        /*
         * We don't need separate work for disconnect as
         * connect/attention interrupts are disabled
@@ -706,10 +695,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
        /* signal the disconnect event early to ensure proper teardown */
        dp_display_handle_plugged_change(&dp->dp_display, false);
 
-       /* enable HDP plug interrupt to prepare for next plugin */
-       if (dp->dp_display.internal_hpd)
-               dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true);
-
        drm_dbg_dp(dp->drm_dev, "After, type=%d hpd_state=%d\n",
                        dp->dp_display.connector_type, state);
 
@@ -1082,26 +1067,6 @@ void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
        mutex_unlock(&dp_display->event_mutex);
 }
 
-static void dp_display_config_hpd(struct dp_display_private *dp)
-{
-
-       dp_display_host_init(dp);
-       dp_catalog_ctrl_hpd_config(dp->catalog);
-
-       /* Enable plug and unplug interrupts only if requested */
-       if (dp->dp_display.internal_hpd)
-               dp_catalog_hpd_config_intr(dp->catalog,
-                               DP_DP_HPD_PLUG_INT_MASK |
-                               DP_DP_HPD_UNPLUG_INT_MASK,
-                               true);
-
-       /* Enable interrupt first time
-        * we are leaving dp clocks on during disconnect
-        * and never disable interrupt
-        */
-       enable_irq(dp->irq);
-}
-
 void dp_display_set_psr(struct msm_dp *dp_display, bool enter)
 {
        struct dp_display_private *dp;
@@ -1176,7 +1141,7 @@ static int hpd_event_thread(void *data)
 
                switch (todo->event_id) {
                case EV_HPD_INIT_SETUP:
-                       dp_display_config_hpd(dp_priv);
+                       dp_display_host_init(dp_priv);
                        break;
                case EV_HPD_PLUG_INT:
                        dp_hpd_plug_handle(dp_priv, todo->data);
@@ -1282,7 +1247,6 @@ int dp_display_request_irq(struct msm_dp *dp_display)
                                dp->irq, rc);
                return rc;
        }
-       disable_irq(dp->irq);
 
        return 0;
 }
@@ -1394,13 +1358,8 @@ static int dp_pm_resume(struct device *dev)
        /* turn on dp ctrl/phy */
        dp_display_host_init(dp);
 
-       dp_catalog_ctrl_hpd_config(dp->catalog);
-
-       if (dp->dp_display.internal_hpd)
-               dp_catalog_hpd_config_intr(dp->catalog,
-                               DP_DP_HPD_PLUG_INT_MASK |
-                               DP_DP_HPD_UNPLUG_INT_MASK,
-                               true);
+       if (dp_display->is_edp)
+               dp_catalog_ctrl_hpd_enable(dp->catalog);
 
        if (dp_catalog_link_is_connected(dp->catalog)) {
                /*
@@ -1568,9 +1527,8 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
 
        if (aux_bus && dp->is_edp) {
                dp_display_host_init(dp_priv);
-               dp_catalog_ctrl_hpd_config(dp_priv->catalog);
+               dp_catalog_ctrl_hpd_enable(dp_priv->catalog);
                dp_display_host_phy_init(dp_priv);
-               enable_irq(dp_priv->irq);
 
                /*
                 * The code below assumes that the panel will finish probing
@@ -1612,7 +1570,6 @@ static int dp_display_get_next_bridge(struct msm_dp *dp)
 
 error:
        if (dp->is_edp) {
-               disable_irq(dp_priv->irq);
                dp_display_host_phy_exit(dp_priv);
                dp_display_host_deinit(dp_priv);
        }
@@ -1801,16 +1758,31 @@ void dp_bridge_hpd_enable(struct drm_bridge *bridge)
 {
        struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
        struct msm_dp *dp_display = dp_bridge->dp_display;
+       struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       mutex_lock(&dp->event_mutex);
+       dp_catalog_ctrl_hpd_enable(dp->catalog);
+
+       /* enable HDP interrupts */
+       dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, true);
 
        dp_display->internal_hpd = true;
+       mutex_unlock(&dp->event_mutex);
 }
 
 void dp_bridge_hpd_disable(struct drm_bridge *bridge)
 {
        struct msm_dp_bridge *dp_bridge = to_dp_bridge(bridge);
        struct msm_dp *dp_display = dp_bridge->dp_display;
+       struct dp_display_private *dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       mutex_lock(&dp->event_mutex);
+       /* disable HDP interrupts */
+       dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false);
+       dp_catalog_ctrl_hpd_disable(dp->catalog);
 
        dp_display->internal_hpd = false;
+       mutex_unlock(&dp->event_mutex);
 }
 
 void dp_bridge_hpd_notify(struct drm_bridge *bridge,
index d77fa97..9c45d64 100644 (file)
@@ -155,6 +155,8 @@ static bool can_do_async(struct drm_atomic_state *state,
        for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
                if (drm_atomic_crtc_needs_modeset(crtc_state))
                        return false;
+               if (!crtc_state->active)
+                       return false;
                if (++num_crtcs > 1)
                        return false;
                *async_crtc = crtc;
index b4cfa44..463ca41 100644 (file)
@@ -449,6 +449,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
        if (ret)
                goto err_cleanup_mode_config;
 
+       dma_set_max_seg_size(dev, UINT_MAX);
+
        /* Bind all our sub-components: */
        ret = component_bind_all(dev, ddev);
        if (ret)
@@ -459,8 +461,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
        if (ret)
                goto err_msm_uninit;
 
-       dma_set_max_seg_size(dev, UINT_MAX);
-
        msm_gem_shrinker_init(ddev);
 
        if (priv->kms_init) {
index db6c4e2..cd39b9d 100644 (file)
@@ -219,7 +219,8 @@ static void put_pages(struct drm_gem_object *obj)
        }
 }
 
-static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
+static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj,
+                                             unsigned madv)
 {
        struct msm_drm_private *priv = obj->dev->dev_private;
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -227,7 +228,9 @@ static struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj)
 
        msm_gem_assert_locked(obj);
 
-       if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
+       if (GEM_WARN_ON(msm_obj->madv > madv)) {
+               DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
+                       msm_obj->madv, madv);
                return ERR_PTR(-EBUSY);
        }
 
@@ -248,7 +251,7 @@ struct page **msm_gem_pin_pages(struct drm_gem_object *obj)
        struct page **p;
 
        msm_gem_lock(obj);
-       p = msm_gem_pin_pages_locked(obj);
+       p = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
        msm_gem_unlock(obj);
 
        return p;
@@ -473,10 +476,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma)
 
        msm_gem_assert_locked(obj);
 
-       if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
-               return -EBUSY;
-
-       pages = msm_gem_pin_pages_locked(obj);
+       pages = msm_gem_pin_pages_locked(obj, MSM_MADV_WILLNEED);
        if (IS_ERR(pages))
                return PTR_ERR(pages);
 
@@ -699,13 +699,7 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
        if (obj->import_attach)
                return ERR_PTR(-ENODEV);
 
-       if (GEM_WARN_ON(msm_obj->madv > madv)) {
-               DRM_DEV_ERROR(obj->dev->dev, "Invalid madv state: %u vs %u\n",
-                       msm_obj->madv, madv);
-               return ERR_PTR(-EBUSY);
-       }
-
-       pages = msm_gem_pin_pages_locked(obj);
+       pages = msm_gem_pin_pages_locked(obj, madv);
        if (IS_ERR(pages))
                return ERR_CAST(pages);
 
index aff18c2..9f5933c 100644 (file)
@@ -722,7 +722,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
        struct msm_drm_private *priv = dev->dev_private;
        struct drm_msm_gem_submit *args = data;
        struct msm_file_private *ctx = file->driver_priv;
-       struct msm_gem_submit *submit;
+       struct msm_gem_submit *submit = NULL;
        struct msm_gpu *gpu = priv->gpu;
        struct msm_gpu_submitqueue *queue;
        struct msm_ringbuffer *ring;
@@ -769,13 +769,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
                if (out_fence_fd < 0) {
                        ret = out_fence_fd;
-                       return ret;
+                       goto out_post_unlock;
                }
        }
 
        submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
-       if (IS_ERR(submit))
-               return PTR_ERR(submit);
+       if (IS_ERR(submit)) {
+               ret = PTR_ERR(submit);
+               goto out_post_unlock;
+       }
 
        trace_msm_gpu_submit(pid_nr(submit->pid), ring->id, submit->ident,
                args->nr_bos, args->nr_cmds);
@@ -962,11 +964,20 @@ out:
        if (has_ww_ticket)
                ww_acquire_fini(&submit->ticket);
 out_unlock:
-       if (ret && (out_fence_fd >= 0))
-               put_unused_fd(out_fence_fd);
        mutex_unlock(&queue->lock);
 out_post_unlock:
-       msm_gem_submit_put(submit);
+       if (ret && (out_fence_fd >= 0))
+               put_unused_fd(out_fence_fd);
+
+       if (!IS_ERR_OR_NULL(submit)) {
+               msm_gem_submit_put(submit);
+       } else {
+               /*
+                * If the submit hasn't yet taken ownership of the queue
+                * then we need to drop the reference ourself:
+                */
+               msm_submitqueue_put(queue);
+       }
        if (!IS_ERR_OR_NULL(post_deps)) {
                for (i = 0; i < args->nr_out_syncobjs; ++i) {
                        kfree(post_deps[i].chain);
index 418e1e0..5cc8d35 100644 (file)
@@ -234,7 +234,12 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
        /* Get the pagetable configuration from the domain */
        if (adreno_smmu->cookie)
                ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
-       if (!ttbr1_cfg)
+
+       /*
+        * If you hit this WARN_ONCE() you are probably missing an entry in
+        * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c
+        */
+       if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables"))
                return ERR_PTR(-ENODEV);
 
        pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
@@ -410,7 +415,7 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig
        struct msm_mmu *mmu;
 
        mmu = msm_iommu_new(dev, quirks);
-       if (IS_ERR(mmu))
+       if (IS_ERR_OR_NULL(mmu))
                return mmu;
 
        iommu = to_msm_iommu(mmu);
index 6afdf26..b9fe926 100644 (file)
@@ -53,7 +53,7 @@ pl111_mode_valid(struct drm_simple_display_pipe *pipe,
 {
        struct drm_device *drm = pipe->crtc.dev;
        struct pl111_drm_dev_private *priv = drm->dev_private;
-       u32 cpp = priv->variant->fb_bpp / 8;
+       u32 cpp = DIV_ROUND_UP(priv->variant->fb_depth, 8);
        u64 bw;
 
        /*
index 2a46b5b..d1fe756 100644 (file)
@@ -114,7 +114,7 @@ struct drm_minor;
  *     extensions to the control register
  * @formats: array of supported pixel formats on this variant
  * @nformats: the length of the array of supported pixel formats
- * @fb_bpp: desired bits per pixel on the default framebuffer
+ * @fb_depth: desired depth per pixel on the default framebuffer
  */
 struct pl111_variant_data {
        const char *name;
@@ -126,7 +126,7 @@ struct pl111_variant_data {
        bool st_bitmux_control;
        const u32 *formats;
        unsigned int nformats;
-       unsigned int fb_bpp;
+       unsigned int fb_depth;
 };
 
 struct pl111_drm_dev_private {
index 4b2a9e9..43049c8 100644 (file)
@@ -308,7 +308,7 @@ static int pl111_amba_probe(struct amba_device *amba_dev,
        if (ret < 0)
                goto dev_put;
 
-       drm_fbdev_dma_setup(drm, priv->variant->fb_bpp);
+       drm_fbdev_dma_setup(drm, priv->variant->fb_depth);
 
        return 0;
 
@@ -351,7 +351,7 @@ static const struct pl111_variant_data pl110_variant = {
        .is_pl110 = true,
        .formats = pl110_pixel_formats,
        .nformats = ARRAY_SIZE(pl110_pixel_formats),
-       .fb_bpp = 16,
+       .fb_depth = 16,
 };
 
 /* RealView, Versatile Express etc use this modern variant */
@@ -376,7 +376,7 @@ static const struct pl111_variant_data pl111_variant = {
        .name = "PL111",
        .formats = pl111_pixel_formats,
        .nformats = ARRAY_SIZE(pl111_pixel_formats),
-       .fb_bpp = 32,
+       .fb_depth = 32,
 };
 
 static const u32 pl110_nomadik_pixel_formats[] = {
@@ -405,7 +405,7 @@ static const struct pl111_variant_data pl110_nomadik_variant = {
        .is_lcdc = true,
        .st_bitmux_control = true,
        .broken_vblank = true,
-       .fb_bpp = 16,
+       .fb_depth = 16,
 };
 
 static const struct amba_id pl111_id_table[] = {
index 1b436b7..00c3ebd 100644 (file)
@@ -316,7 +316,7 @@ static const struct pl111_variant_data pl110_integrator = {
        .broken_vblank = true,
        .formats = pl110_integrator_pixel_formats,
        .nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
-       .fb_bpp = 16,
+       .fb_depth = 16,
 };
 
 /*
@@ -330,7 +330,7 @@ static const struct pl111_variant_data pl110_impd1 = {
        .broken_vblank = true,
        .formats = pl110_integrator_pixel_formats,
        .nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
-       .fb_bpp = 16,
+       .fb_depth = 15,
 };
 
 /*
@@ -343,7 +343,7 @@ static const struct pl111_variant_data pl110_versatile = {
        .external_bgr = true,
        .formats = pl110_versatile_pixel_formats,
        .nformats = ARRAY_SIZE(pl110_versatile_pixel_formats),
-       .fb_bpp = 16,
+       .fb_depth = 16,
 };
 
 /*
@@ -355,7 +355,7 @@ static const struct pl111_variant_data pl111_realview = {
        .name = "PL111 RealView",
        .formats = pl111_realview_pixel_formats,
        .nformats = ARRAY_SIZE(pl111_realview_pixel_formats),
-       .fb_bpp = 16,
+       .fb_depth = 16,
 };
 
 /*
@@ -367,7 +367,7 @@ static const struct pl111_variant_data pl111_vexpress = {
        .name = "PL111 Versatile Express",
        .formats = pl111_realview_pixel_formats,
        .nformats = ARRAY_SIZE(pl111_realview_pixel_formats),
-       .fb_bpp = 16,
+       .fb_depth = 16,
        .broken_clockdivider = true,
 };
 
index bdc5af2..d3f5ddb 100644 (file)
@@ -459,7 +459,6 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        struct radeon_device *rdev = dev->dev_private;
        struct drm_radeon_gem_set_domain *args = data;
        struct drm_gem_object *gobj;
-       struct radeon_bo *robj;
        int r;
 
        /* for now if someone requests domain CPU -
@@ -472,13 +471,12 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                up_read(&rdev->exclusive_lock);
                return -ENOENT;
        }
-       robj = gem_to_radeon_bo(gobj);
 
        r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
 
        drm_gem_object_put(gobj);
        up_read(&rdev->exclusive_lock);
-       r = radeon_gem_handle_lockup(robj->rdev, r);
+       r = radeon_gem_handle_lockup(rdev, r);
        return r;
 }
 
index 3377fbc..c4dda90 100644 (file)
@@ -99,6 +99,16 @@ static void radeon_hotplug_work_func(struct work_struct *work)
 
 static void radeon_dp_work_func(struct work_struct *work)
 {
+       struct radeon_device *rdev = container_of(work, struct radeon_device,
+                                                 dp_work);
+       struct drm_device *dev = rdev->ddev;
+       struct drm_mode_config *mode_config = &dev->mode_config;
+       struct drm_connector *connector;
+
+       mutex_lock(&mode_config->mutex);
+       list_for_each_entry(connector, &mode_config->connector_list, head)
+               radeon_connector_hotplug(connector);
+       mutex_unlock(&mode_config->mutex);
 }
 
 /**
index 8c18363..aea5a90 100644 (file)
@@ -1141,9 +1141,6 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
        for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
                struct drm_sched_rq *rq = &sched->sched_rq[i];
 
-               if (!rq)
-                       continue;
-
                spin_lock(&rq->lock);
                list_for_each_entry(s_entity, &rq->entities, list)
                        /*
index 7ae5f27..c6bdb9c 100644 (file)
@@ -587,6 +587,8 @@ static const struct hid_device_id hammer_devices[] = {
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_JEWEL) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
index d79e946..5d29aba 100644 (file)
 #define USB_DEVICE_ID_GOOGLE_MOONBALL  0x5044
 #define USB_DEVICE_ID_GOOGLE_DON       0x5050
 #define USB_DEVICE_ID_GOOGLE_EEL       0x5057
+#define USB_DEVICE_ID_GOOGLE_JEWEL     0x5061
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
index 0fcfd85..5e1a412 100644 (file)
@@ -286,7 +286,7 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
        struct hidpp_report *message,
        struct hidpp_report *response)
 {
-       int ret;
+       int ret = -1;
        int max_retries = 3;
 
        mutex_lock(&hidpp->send_mutex);
@@ -300,13 +300,13 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
         */
        *response = *message;
 
-       for (; max_retries != 0; max_retries--) {
+       for (; max_retries != 0 && ret; max_retries--) {
                ret = __hidpp_send_report(hidpp->hid_dev, message);
 
                if (ret) {
                        dbg_hid("__hidpp_send_report returned err: %d\n", ret);
                        memset(response, 0, sizeof(struct hidpp_report));
-                       goto exit;
+                       break;
                }
 
                if (!wait_event_timeout(hidpp->wait, hidpp->answer_available,
@@ -314,13 +314,14 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
                        dbg_hid("%s:timeout waiting for response\n", __func__);
                        memset(response, 0, sizeof(struct hidpp_report));
                        ret = -ETIMEDOUT;
+                       break;
                }
 
                if (response->report_id == REPORT_ID_HIDPP_SHORT &&
                    response->rap.sub_id == HIDPP_ERROR) {
                        ret = response->rap.params[1];
                        dbg_hid("%s:got hidpp error %02X\n", __func__, ret);
-                       goto exit;
+                       break;
                }
 
                if ((response->report_id == REPORT_ID_HIDPP_LONG ||
@@ -329,13 +330,12 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
                        ret = response->fap.params[1];
                        if (ret != HIDPP20_ERROR_BUSY) {
                                dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret);
-                               goto exit;
+                               break;
                        }
                        dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret);
                }
        }
 
-exit:
        mutex_unlock(&hidpp->send_mutex);
        return ret;
 
index 8214896..76e5353 100644 (file)
@@ -2224,7 +2224,9 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
                } else if (strstr(product_name, "Wacom") ||
                           strstr(product_name, "wacom") ||
                           strstr(product_name, "WACOM")) {
-                       strscpy(name, product_name, sizeof(name));
+                       if (strscpy(name, product_name, sizeof(name)) < 0) {
+                               hid_warn(wacom->hdev, "String overflow while assembling device name");
+                       }
                } else {
                        snprintf(name, sizeof(name), "Wacom %s", product_name);
                }
@@ -2242,7 +2244,9 @@ static void wacom_update_name(struct wacom *wacom, const char *suffix)
                if (name[strlen(name)-1] == ' ')
                        name[strlen(name)-1] = '\0';
        } else {
-               strscpy(name, features->name, sizeof(name));
+               if (strscpy(name, features->name, sizeof(name)) < 0) {
+                       hid_warn(wacom->hdev, "String overflow while assembling device name");
+               }
        }
 
        snprintf(wacom_wac->name, sizeof(wacom_wac->name), "%s%s",
@@ -2410,8 +2414,13 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
                goto fail_quirks;
        }
 
-       if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
+       if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR) {
                error = hid_hw_open(hdev);
+               if (error) {
+                       hid_err(hdev, "hw open failed\n");
+                       goto fail_quirks;
+               }
+       }
 
        wacom_set_shared_values(wacom_wac);
        devres_close_group(&hdev->dev, wacom);
@@ -2500,8 +2509,10 @@ static void wacom_wireless_work(struct work_struct *work)
                                goto fail;
                }
 
-               strscpy(wacom_wac->name, wacom_wac1->name,
-                       sizeof(wacom_wac->name));
+               if (strscpy(wacom_wac->name, wacom_wac1->name,
+                       sizeof(wacom_wac->name)) < 0) {
+                       hid_warn(wacom->hdev, "String overflow while assembling device name");
+               }
        }
 
        return;
index dc0f7d9..2ccf838 100644 (file)
@@ -831,7 +831,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
        /* Enter report */
        if ((data[1] & 0xfc) == 0xc0) {
                /* serial number of the tool */
-               wacom->serial[idx] = ((data[3] & 0x0f) << 28) +
+               wacom->serial[idx] = ((__u64)(data[3] & 0x0f) << 28) +
                        (data[4] << 20) + (data[5] << 12) +
                        (data[6] << 4) + (data[7] >> 4);
 
index 711f451..89e8ed2 100644 (file)
@@ -402,6 +402,7 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
                trace_id = coresight_trace_id_get_cpu_id(cpu);
                if (!IS_VALID_CS_TRACE_ID(trace_id)) {
                        cpumask_clear_cpu(cpu, mask);
+                       coresight_release_path(path);
                        continue;
                }
 
index 918d461..eaa296c 100644 (file)
@@ -942,7 +942,7 @@ tmc_etr_buf_insert_barrier_packet(struct etr_buf *etr_buf, u64 offset)
 
        len = tmc_etr_buf_get_data(etr_buf, offset,
                                   CORESIGHT_BARRIER_PKT_SIZE, &bufp);
-       if (WARN_ON(len < CORESIGHT_BARRIER_PKT_SIZE))
+       if (WARN_ON(len < 0 || len < CORESIGHT_BARRIER_PKT_SIZE))
                return -EINVAL;
        coresight_insert_barrier_packet(bufp);
        return offset + CORESIGHT_BARRIER_PKT_SIZE;
index f98393d..b8636fa 100644 (file)
@@ -1048,7 +1048,7 @@ int kx022a_probe_internal(struct device *dev)
                data->ien_reg = KX022A_REG_INC4;
        } else {
                irq = fwnode_irq_get_byname(fwnode, "INT2");
-               if (irq <= 0)
+               if (irq < 0)
                        return dev_err_probe(dev, irq, "No suitable IRQ\n");
 
                data->inc_reg = KX022A_REG_INC5;
index 5f7d81b..282e539 100644 (file)
@@ -1291,12 +1291,12 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
 
        adev = ACPI_COMPANION(indio_dev->dev.parent);
        if (!adev)
-               return 0;
+               return -ENXIO;
 
        /* Read _ONT data, which should be a package of 6 integers. */
        status = acpi_evaluate_object(adev->handle, "_ONT", NULL, &buffer);
        if (status == AE_NOT_FOUND) {
-               return 0;
+               return -ENXIO;
        } else if (ACPI_FAILURE(status)) {
                dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n",
                         status);
index 3839434..5a5dd5e 100644 (file)
@@ -1817,6 +1817,11 @@ static const struct clk_ops ad4130_int_clk_ops = {
        .unprepare = ad4130_int_clk_unprepare,
 };
 
+static void ad4130_clk_del_provider(void *of_node)
+{
+       of_clk_del_provider(of_node);
+}
+
 static int ad4130_setup_int_clk(struct ad4130_state *st)
 {
        struct device *dev = &st->spi->dev;
@@ -1824,6 +1829,7 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
        struct clk_init_data init;
        const char *clk_name;
        struct clk *clk;
+       int ret;
 
        if (st->int_pin_sel == AD4130_INT_PIN_CLK ||
            st->mclk_sel != AD4130_MCLK_76_8KHZ)
@@ -1843,7 +1849,11 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
        if (IS_ERR(clk))
                return PTR_ERR(clk);
 
-       return of_clk_add_provider(of_node, of_clk_src_simple_get, clk);
+       ret = of_clk_add_provider(of_node, of_clk_src_simple_get, clk);
+       if (ret)
+               return ret;
+
+       return devm_add_action_or_reset(dev, ad4130_clk_del_provider, of_node);
 }
 
 static int ad4130_setup(struct iio_dev *indio_dev)
index 55a6ab5..99bb604 100644 (file)
@@ -897,10 +897,6 @@ static const struct iio_info ad7195_info = {
        __AD719x_CHANNEL(_si, _channel1, -1, _address, NULL, IIO_VOLTAGE, \
                BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
 
-#define AD719x_SHORTED_CHANNEL(_si, _channel1, _address) \
-       __AD719x_CHANNEL(_si, _channel1, -1, _address, "shorted", IIO_VOLTAGE, \
-               BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
-
 #define AD719x_TEMP_CHANNEL(_si, _address) \
        __AD719x_CHANNEL(_si, 0, -1, _address, NULL, IIO_TEMP, 0, NULL)
 
@@ -908,7 +904,7 @@ static const struct iio_chan_spec ad7192_channels[] = {
        AD719x_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M),
        AD719x_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M),
        AD719x_TEMP_CHANNEL(2, AD7192_CH_TEMP),
-       AD719x_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M),
+       AD719x_DIFF_CHANNEL(3, 2, 2, AD7192_CH_AIN2P_AIN2M),
        AD719x_CHANNEL(4, 1, AD7192_CH_AIN1),
        AD719x_CHANNEL(5, 2, AD7192_CH_AIN2),
        AD719x_CHANNEL(6, 3, AD7192_CH_AIN3),
@@ -922,7 +918,7 @@ static const struct iio_chan_spec ad7193_channels[] = {
        AD719x_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M),
        AD719x_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M),
        AD719x_TEMP_CHANNEL(4, AD7193_CH_TEMP),
-       AD719x_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M),
+       AD719x_DIFF_CHANNEL(5, 2, 2, AD7193_CH_AIN2P_AIN2M),
        AD719x_CHANNEL(6, 1, AD7193_CH_AIN1),
        AD719x_CHANNEL(7, 2, AD7193_CH_AIN2),
        AD719x_CHANNEL(8, 3, AD7193_CH_AIN3),
index d8570f6..7e21928 100644 (file)
@@ -584,6 +584,10 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de
        init_completion(&sigma_delta->completion);
 
        sigma_delta->irq_dis = true;
+
+       /* the IRQ core clears IRQ_DISABLE_UNLAZY flag when freeing an IRQ */
+       irq_set_status_flags(sigma_delta->spi->irq, IRQ_DISABLE_UNLAZY);
+
        ret = devm_request_irq(dev, sigma_delta->spi->irq,
                               ad_sd_data_rdy_trig_poll,
                               sigma_delta->info->irq_flags | IRQF_NO_AUTOEN,
index a775d2e..dce9ec9 100644 (file)
@@ -236,8 +236,7 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev,
 {
        struct imx93_adc *adc = iio_priv(indio_dev);
        struct device *dev = adc->dev;
-       long ret;
-       u32 vref_uv;
+       int ret;
 
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
@@ -253,10 +252,10 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev,
                return IIO_VAL_INT;
 
        case IIO_CHAN_INFO_SCALE:
-               ret = vref_uv = regulator_get_voltage(adc->vref);
+               ret = regulator_get_voltage(adc->vref);
                if (ret < 0)
                        return ret;
-               *val = vref_uv / 1000;
+               *val = ret / 1000;
                *val2 = 12;
                return IIO_VAL_FRACTIONAL_LOG2;
 
index bc62e5a..0bc1121 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <dt-bindings/iio/adc/mediatek,mt6370_adc.h>
 
+#define MT6370_REG_DEV_INFO            0x100
 #define MT6370_REG_CHG_CTRL3           0x113
 #define MT6370_REG_CHG_CTRL7           0x117
 #define MT6370_REG_CHG_ADC             0x121
@@ -27,6 +28,7 @@
 #define MT6370_ADC_START_MASK          BIT(0)
 #define MT6370_ADC_IN_SEL_MASK         GENMASK(7, 4)
 #define MT6370_AICR_ICHG_MASK          GENMASK(7, 2)
+#define MT6370_VENID_MASK              GENMASK(7, 4)
 
 #define MT6370_AICR_100_mA             0x0
 #define MT6370_AICR_150_mA             0x1
 #define ADC_CONV_TIME_MS               35
 #define ADC_CONV_POLLING_TIME_US       1000
 
+#define MT6370_VID_RT5081              0x8
+#define MT6370_VID_RT5081A             0xA
+#define MT6370_VID_MT6370              0xE
+
 struct mt6370_adc_data {
        struct device *dev;
        struct regmap *regmap;
@@ -55,6 +61,7 @@ struct mt6370_adc_data {
         * from being read at the same time.
         */
        struct mutex adc_lock;
+       unsigned int vid;
 };
 
 static int mt6370_adc_read_channel(struct mt6370_adc_data *priv, int chan,
@@ -98,6 +105,30 @@ adc_unlock:
        return ret;
 }
 
+static int mt6370_adc_get_ibus_scale(struct mt6370_adc_data *priv)
+{
+       switch (priv->vid) {
+       case MT6370_VID_RT5081:
+       case MT6370_VID_RT5081A:
+       case MT6370_VID_MT6370:
+               return 3350;
+       default:
+               return 3875;
+       }
+}
+
+static int mt6370_adc_get_ibat_scale(struct mt6370_adc_data *priv)
+{
+       switch (priv->vid) {
+       case MT6370_VID_RT5081:
+       case MT6370_VID_RT5081A:
+       case MT6370_VID_MT6370:
+               return 2680;
+       default:
+               return 3870;
+       }
+}
+
 static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
                                 int chan, int *val1, int *val2)
 {
@@ -123,7 +154,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
                case MT6370_AICR_250_mA:
                case MT6370_AICR_300_mA:
                case MT6370_AICR_350_mA:
-                       *val1 = 3350;
+                       *val1 = mt6370_adc_get_ibus_scale(priv);
                        break;
                default:
                        *val1 = 5000;
@@ -150,7 +181,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
                case MT6370_ICHG_600_mA:
                case MT6370_ICHG_700_mA:
                case MT6370_ICHG_800_mA:
-                       *val1 = 2680;
+                       *val1 = mt6370_adc_get_ibat_scale(priv);
                        break;
                default:
                        *val1 = 5000;
@@ -251,6 +282,20 @@ static const struct iio_chan_spec mt6370_adc_channels[] = {
        MT6370_ADC_CHAN(TEMP_JC, IIO_TEMP, 12, BIT(IIO_CHAN_INFO_OFFSET)),
 };
 
+static int mt6370_get_vendor_info(struct mt6370_adc_data *priv)
+{
+       unsigned int dev_info;
+       int ret;
+
+       ret = regmap_read(priv->regmap, MT6370_REG_DEV_INFO, &dev_info);
+       if (ret)
+               return ret;
+
+       priv->vid = FIELD_GET(MT6370_VENID_MASK, dev_info);
+
+       return 0;
+}
+
 static int mt6370_adc_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -272,6 +317,10 @@ static int mt6370_adc_probe(struct platform_device *pdev)
        priv->regmap = regmap;
        mutex_init(&priv->adc_lock);
 
+       ret = mt6370_get_vendor_info(priv);
+       if (ret)
+               return dev_err_probe(dev, ret, "Failed to get vid\n");
+
        ret = regmap_write(priv->regmap, MT6370_REG_CHG_ADC, 0);
        if (ret)
                return dev_err_probe(dev, ret, "Failed to reset ADC\n");
index bca79a9..a50f391 100644 (file)
@@ -757,13 +757,13 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
 
        ret = mxs_lradc_adc_trigger_init(iio);
        if (ret)
-               goto err_trig;
+               return ret;
 
        ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time,
                                         &mxs_lradc_adc_trigger_handler,
                                         &mxs_lradc_adc_buffer_ops);
        if (ret)
-               return ret;
+               goto err_trig;
 
        adc->vref_mv = mxs_lradc_adc_vref_mv[lradc->soc];
 
@@ -801,9 +801,9 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
 
 err_dev:
        mxs_lradc_adc_hw_stop(adc);
-       mxs_lradc_adc_trigger_remove(iio);
-err_trig:
        iio_triggered_buffer_cleanup(iio);
+err_trig:
+       mxs_lradc_adc_trigger_remove(iio);
        return ret;
 }
 
@@ -814,8 +814,8 @@ static int mxs_lradc_adc_remove(struct platform_device *pdev)
 
        iio_device_unregister(iio);
        mxs_lradc_adc_hw_stop(adc);
-       mxs_lradc_adc_trigger_remove(iio);
        iio_triggered_buffer_cleanup(iio);
+       mxs_lradc_adc_trigger_remove(iio);
 
        return 0;
 }
index c1c4392..7dfc9c9 100644 (file)
@@ -547,7 +547,7 @@ static int palmas_gpadc_read_raw(struct iio_dev *indio_dev,
        int adc_chan = chan->channel;
        int ret = 0;
 
-       if (adc_chan > PALMAS_ADC_CH_MAX)
+       if (adc_chan >= PALMAS_ADC_CH_MAX)
                return -EINVAL;
 
        mutex_lock(&adc->lock);
@@ -595,7 +595,7 @@ static int palmas_gpadc_read_event_config(struct iio_dev *indio_dev,
        int adc_chan = chan->channel;
        int ret = 0;
 
-       if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
+       if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
                return -EINVAL;
 
        mutex_lock(&adc->lock);
@@ -684,7 +684,7 @@ static int palmas_gpadc_write_event_config(struct iio_dev *indio_dev,
        int adc_chan = chan->channel;
        int ret;
 
-       if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
+       if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
                return -EINVAL;
 
        mutex_lock(&adc->lock);
@@ -710,7 +710,7 @@ static int palmas_gpadc_read_event_value(struct iio_dev *indio_dev,
        int adc_chan = chan->channel;
        int ret;
 
-       if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
+       if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
                return -EINVAL;
 
        mutex_lock(&adc->lock);
@@ -744,7 +744,7 @@ static int palmas_gpadc_write_event_value(struct iio_dev *indio_dev,
        int old;
        int ret;
 
-       if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
+       if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
                return -EINVAL;
 
        mutex_lock(&adc->lock);
index 1aadb2a..bd7e240 100644 (file)
@@ -2006,16 +2006,15 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm
         * to get the *real* number of channels.
         */
        ret = device_property_count_u32(dev, "st,adc-diff-channels");
-       if (ret < 0)
-               return ret;
-
-       ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32));
-       if (ret > adc_info->max_channels) {
-               dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
-               return -EINVAL;
-       } else if (ret > 0) {
-               adc->num_diff = ret;
-               num_channels += ret;
+       if (ret > 0) {
+               ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32));
+               if (ret > adc_info->max_channels) {
+                       dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
+                       return -EINVAL;
+               } else if (ret > 0) {
+                       adc->num_diff = ret;
+                       num_channels += ret;
+               }
        }
 
        /* Optional sample time is provided either for each, or all channels */
@@ -2037,6 +2036,7 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
        struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX];
        struct device *dev = &indio_dev->dev;
        u32 num_diff = adc->num_diff;
+       int num_se = nchans - num_diff;
        int size = num_diff * sizeof(*diff) / sizeof(u32);
        int scan_index = 0, ret, i, c;
        u32 smp = 0, smps[STM32_ADC_CH_MAX], chans[STM32_ADC_CH_MAX];
@@ -2063,29 +2063,32 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
                        scan_index++;
                }
        }
-
-       ret = device_property_read_u32_array(dev, "st,adc-channels", chans,
-                                            nchans);
-       if (ret)
-               return ret;
-
-       for (c = 0; c < nchans; c++) {
-               if (chans[c] >= adc_info->max_channels) {
-                       dev_err(&indio_dev->dev, "Invalid channel %d\n",
-                               chans[c]);
-                       return -EINVAL;
+       if (num_se > 0) {
+               ret = device_property_read_u32_array(dev, "st,adc-channels", chans, num_se);
+               if (ret) {
+                       dev_err(&indio_dev->dev, "Failed to get st,adc-channels %d\n", ret);
+                       return ret;
                }
 
-               /* Channel can't be configured both as single-ended & diff */
-               for (i = 0; i < num_diff; i++) {
-                       if (chans[c] == diff[i].vinp) {
-                               dev_err(&indio_dev->dev, "channel %d misconfigured\n",  chans[c]);
+               for (c = 0; c < num_se; c++) {
+                       if (chans[c] >= adc_info->max_channels) {
+                               dev_err(&indio_dev->dev, "Invalid channel %d\n",
+                                       chans[c]);
                                return -EINVAL;
                        }
+
+                       /* Channel can't be configured both as single-ended & diff */
+                       for (i = 0; i < num_diff; i++) {
+                               if (chans[c] == diff[i].vinp) {
+                                       dev_err(&indio_dev->dev, "channel %d misconfigured\n",
+                                               chans[c]);
+                                       return -EINVAL;
+                               }
+                       }
+                       stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
+                                               chans[c], 0, scan_index, false);
+                       scan_index++;
                }
-               stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
-                                       chans[c], 0, scan_index, false);
-               scan_index++;
        }
 
        if (adc->nsmps > 0) {
@@ -2306,7 +2309,7 @@ static int stm32_adc_chan_fw_init(struct iio_dev *indio_dev, bool timestamping)
 
        if (legacy)
                ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels,
-                                                num_channels);
+                                                timestamping ? num_channels - 1 : num_channels);
        else
                ret = stm32_adc_generic_chan_init(indio_dev, adc, channels);
        if (ret < 0)
index 07e9f6a..e3366cf 100644 (file)
@@ -1007,7 +1007,7 @@ static int ad74413r_read_raw(struct iio_dev *indio_dev,
 
                ret = ad74413r_get_single_adc_result(indio_dev, chan->channel,
                                                     val);
-               if (ret)
+               if (ret < 0)
                        return ret;
 
                ad74413r_adc_to_resistance_result(*val, val);
index 6c74fea..addd97a 100644 (file)
@@ -17,7 +17,7 @@ obj-$(CONFIG_AD5592R_BASE) += ad5592r-base.o
 obj-$(CONFIG_AD5592R) += ad5592r.o
 obj-$(CONFIG_AD5593R) += ad5593r.o
 obj-$(CONFIG_AD5755) += ad5755.o
-obj-$(CONFIG_AD5755) += ad5758.o
+obj-$(CONFIG_AD5758) += ad5758.o
 obj-$(CONFIG_AD5761) += ad5761.o
 obj-$(CONFIG_AD5764) += ad5764.o
 obj-$(CONFIG_AD5766) += ad5766.o
index 46bf758..3f5661a 100644 (file)
@@ -47,12 +47,18 @@ static int mcp4725_suspend(struct device *dev)
        struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
                to_i2c_client(dev)));
        u8 outbuf[2];
+       int ret;
 
        outbuf[0] = (data->powerdown_mode + 1) << 4;
        outbuf[1] = 0;
        data->powerdown = true;
 
-       return i2c_master_send(data->client, outbuf, 2);
+       ret = i2c_master_send(data->client, outbuf, 2);
+       if (ret < 0)
+               return ret;
+       else if (ret != 2)
+               return -EIO;
+       return 0;
 }
 
 static int mcp4725_resume(struct device *dev)
@@ -60,13 +66,19 @@ static int mcp4725_resume(struct device *dev)
        struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
                to_i2c_client(dev)));
        u8 outbuf[2];
+       int ret;
 
        /* restore previous DAC value */
        outbuf[0] = (data->dac_value >> 8) & 0xf;
        outbuf[1] = data->dac_value & 0xff;
        data->powerdown = false;
 
-       return i2c_master_send(data->client, outbuf, 2);
+       ret = i2c_master_send(data->client, outbuf, 2);
+       if (ret < 0)
+               return ret;
+       else if (ret != 2)
+               return -EIO;
+       return 0;
 }
 static DEFINE_SIMPLE_DEV_PM_OPS(mcp4725_pm_ops, mcp4725_suspend,
                                mcp4725_resume);
index 99576b2..32d7f83 100644 (file)
@@ -275,9 +275,14 @@ static int inv_icm42600_buffer_preenable(struct iio_dev *indio_dev)
 {
        struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
        struct device *dev = regmap_get_device(st->map);
+       struct inv_icm42600_timestamp *ts = iio_priv(indio_dev);
 
        pm_runtime_get_sync(dev);
 
+       mutex_lock(&st->lock);
+       inv_icm42600_timestamp_reset(ts);
+       mutex_unlock(&st->lock);
+
        return 0;
 }
 
@@ -375,7 +380,6 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
        struct device *dev = regmap_get_device(st->map);
        unsigned int sensor;
        unsigned int *watermark;
-       struct inv_icm42600_timestamp *ts;
        struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT;
        unsigned int sleep_temp = 0;
        unsigned int sleep_sensor = 0;
@@ -385,11 +389,9 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
        if (indio_dev == st->indio_gyro) {
                sensor = INV_ICM42600_SENSOR_GYRO;
                watermark = &st->fifo.watermark.gyro;
-               ts = iio_priv(st->indio_gyro);
        } else if (indio_dev == st->indio_accel) {
                sensor = INV_ICM42600_SENSOR_ACCEL;
                watermark = &st->fifo.watermark.accel;
-               ts = iio_priv(st->indio_accel);
        } else {
                return -EINVAL;
        }
@@ -417,8 +419,6 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
        if (!st->fifo.on)
                ret = inv_icm42600_set_temp_conf(st, false, &sleep_temp);
 
-       inv_icm42600_timestamp_reset(ts);
-
 out_unlock:
        mutex_unlock(&st->lock);
 
index 8bb6897..7653261 100644 (file)
@@ -337,6 +337,17 @@ free_gains:
        return ret;
 }
 
+static void iio_gts_us_to_int_micro(int *time_us, int *int_micro_times,
+                                   int num_times)
+{
+       int i;
+
+       for (i = 0; i < num_times; i++) {
+               int_micro_times[i * 2] = time_us[i] / 1000000;
+               int_micro_times[i * 2 + 1] = time_us[i] % 1000000;
+       }
+}
+
 /**
  * iio_gts_build_avail_time_table - build table of available integration times
  * @gts:       Gain time scale descriptor
@@ -351,7 +362,7 @@ free_gains:
  */
 static int iio_gts_build_avail_time_table(struct iio_gts *gts)
 {
-       int *times, i, j, idx = 0;
+       int *times, i, j, idx = 0, *int_micro_times;
 
        if (!gts->num_itime)
                return 0;
@@ -378,13 +389,24 @@ static int iio_gts_build_avail_time_table(struct iio_gts *gts)
                        }
                }
        }
-       gts->avail_time_tables = times;
-       /*
-        * This is just to survive a unlikely corner-case where times in the
-        * given time table were not unique. Else we could just trust the
-        * gts->num_itime.
-        */
-       gts->num_avail_time_tables = idx;
+
+       /* create a list of times formatted as list of IIO_VAL_INT_PLUS_MICRO */
+       int_micro_times = kcalloc(idx, sizeof(int) * 2, GFP_KERNEL);
+       if (int_micro_times) {
+               /*
+                * This is just to survive a unlikely corner-case where times in
+                * the given time table were not unique. Else we could just
+                * trust the gts->num_itime.
+                */
+               gts->num_avail_time_tables = idx;
+               iio_gts_us_to_int_micro(times, int_micro_times, idx);
+       }
+
+       gts->avail_time_tables = int_micro_times;
+       kfree(times);
+
+       if (!int_micro_times)
+               return -ENOMEM;
 
        return 0;
 }
@@ -683,8 +705,8 @@ int iio_gts_avail_times(struct iio_gts *gts,  const int **vals, int *type,
                return -EINVAL;
 
        *vals = gts->avail_time_tables;
-       *type = IIO_VAL_INT;
-       *length = gts->num_avail_time_tables;
+       *type = IIO_VAL_INT_PLUS_MICRO;
+       *length = gts->num_avail_time_tables * 2;
 
        return IIO_AVAIL_LIST;
 }
index e486dcf..f85194f 100644 (file)
@@ -231,6 +231,9 @@ struct bu27034_result {
 
 static const struct regmap_range bu27034_volatile_ranges[] = {
        {
+               .range_min = BU27034_REG_SYSTEM_CONTROL,
+               .range_max = BU27034_REG_SYSTEM_CONTROL,
+       }, {
                .range_min = BU27034_REG_MODE_CONTROL4,
                .range_max = BU27034_REG_MODE_CONTROL4,
        }, {
@@ -1167,11 +1170,12 @@ static int bu27034_read_raw(struct iio_dev *idev,
 
        switch (mask) {
        case IIO_CHAN_INFO_INT_TIME:
-               *val = bu27034_get_int_time(data);
-               if (*val < 0)
-                       return *val;
+               *val = 0;
+               *val2 = bu27034_get_int_time(data);
+               if (*val2 < 0)
+                       return *val2;
 
-               return IIO_VAL_INT;
+               return IIO_VAL_INT_PLUS_MICRO;
 
        case IIO_CHAN_INFO_SCALE:
                return bu27034_get_scale(data, chan->channel, val, val2);
@@ -1229,7 +1233,10 @@ static int bu27034_write_raw(struct iio_dev *idev,
                ret = bu27034_set_scale(data, chan->channel, val, val2);
                break;
        case IIO_CHAN_INFO_INT_TIME:
-               ret = bu27034_try_set_int_time(data, val);
+               if (!val)
+                       ret = bu27034_try_set_int_time(data, val2);
+               else
+                       ret = -EINVAL;
                break;
        default:
                ret = -EINVAL;
@@ -1268,12 +1275,19 @@ static int bu27034_chip_init(struct bu27034_data *data)
        int ret, sel;
 
        /* Reset */
-       ret = regmap_update_bits(data->regmap, BU27034_REG_SYSTEM_CONTROL,
+       ret = regmap_write_bits(data->regmap, BU27034_REG_SYSTEM_CONTROL,
                           BU27034_MASK_SW_RESET, BU27034_MASK_SW_RESET);
        if (ret)
                return dev_err_probe(data->dev, ret, "Sensor reset failed\n");
 
        msleep(1);
+
+       ret = regmap_reinit_cache(data->regmap, &bu27034_regmap);
+       if (ret) {
+               dev_err(data->dev, "Failed to reinit reg cache\n");
+               return ret;
+       }
+
        /*
         * Read integration time here to ensure it is in regmap cache. We do
         * this to speed-up the int-time acquisition in the start of the buffer
index 14e2933..94f5d61 100644 (file)
@@ -8,6 +8,7 @@
  * TODO: Proximity
  */
 #include <linux/bitops.h>
+#include <linux/bitfield.h>
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
@@ -42,6 +43,7 @@
 #define VCNL4035_ALS_PERS_MASK         GENMASK(3, 2)
 #define VCNL4035_INT_ALS_IF_H_MASK     BIT(12)
 #define VCNL4035_INT_ALS_IF_L_MASK     BIT(13)
+#define VCNL4035_DEV_ID_MASK           GENMASK(7, 0)
 
 /* Default values */
 #define VCNL4035_MODE_ALS_ENABLE       BIT(0)
@@ -413,6 +415,7 @@ static int vcnl4035_init(struct vcnl4035_data *data)
                return ret;
        }
 
+       id = FIELD_GET(VCNL4035_DEV_ID_MASK, id);
        if (id != VCNL4035_DEV_ID_VAL) {
                dev_err(&data->client->dev, "Wrong id, got %x, expected %x\n",
                        id, VCNL4035_DEV_ID_VAL);
index 28bb7ef..e155a75 100644 (file)
@@ -296,12 +296,13 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev,
                        return ret;
 
                ret = tmag5273_get_measure(data, &t, &x, &y, &z, &angle, &magnitude);
-               if (ret)
-                       return ret;
 
                pm_runtime_mark_last_busy(data->dev);
                pm_runtime_put_autosuspend(data->dev);
 
+               if (ret)
+                       return ret;
+
                switch (chan->address) {
                case TEMPERATURE:
                        *val = t;
index e86afec..b1c3641 100644 (file)
@@ -3341,9 +3341,7 @@ static int bnxt_re_process_raw_qp_pkt_rx(struct bnxt_re_qp *gsi_qp,
        udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
 
        /* post data received  in the send queue */
-       rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
-
-       return 0;
+       return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
 }
 
 static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
index b9e2f89..e34eccd 100644 (file)
@@ -1336,6 +1336,10 @@ static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
 {
        struct bnxt_qplib_cc_param cc_param = {};
 
+       /* Do not enable congestion control on VFs */
+       if (rdev->is_virtfn)
+               return;
+
        /* Currently enabling only for GenP5 adapters */
        if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
                return;
index f139d4c..8974f62 100644 (file)
@@ -2056,6 +2056,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
        u32 pg_sz_lvl;
        int rc;
 
+       if (!cq->dpi) {
+               dev_err(&rcfw->pdev->dev,
+                       "FP: CREATE_CQ failed due to NULL DPI\n");
+               return -EINVAL;
+       }
+
        hwq_attr.res = res;
        hwq_attr.depth = cq->max_wqe;
        hwq_attr.stride = sizeof(struct cq_base);
@@ -2069,11 +2075,6 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
                                 CMDQ_BASE_OPCODE_CREATE_CQ,
                                 sizeof(req));
 
-       if (!cq->dpi) {
-               dev_err(&rcfw->pdev->dev,
-                       "FP: CREATE_CQ failed due to NULL DPI\n");
-               return -EINVAL;
-       }
        req.dpi = cpu_to_le32(cq->dpi->dpi);
        req.cq_handle = cpu_to_le64(cq->cq_handle);
        req.cq_size = cpu_to_le32(cq->hwq.max_elements);
index 126d4f2..81b0c5e 100644 (file)
@@ -215,17 +215,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
                        return -EINVAL;
                hwq_attr->sginfo->npages = npages;
        } else {
-               unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
-                       hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
-
+               npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem,
+                                               hwq_attr->sginfo->pgsize);
                hwq->is_user = true;
-               npages = sginfo_num_pages;
-               npages = (npages * PAGE_SIZE) /
-                         BIT_ULL(hwq_attr->sginfo->pgshft);
-               if ((sginfo_num_pages * PAGE_SIZE) %
-                    BIT_ULL(hwq_attr->sginfo->pgshft))
-                       if (!npages)
-                               npages++;
        }
 
        if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
index 1714a1e..b967a17 100644 (file)
@@ -617,16 +617,15 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
                /* Free the hwq if it already exist, must be a rereg */
                if (mr->hwq.max_elements)
                        bnxt_qplib_free_hwq(res, &mr->hwq);
-               /* Use system PAGE_SIZE */
                hwq_attr.res = res;
                hwq_attr.depth = pages;
-               hwq_attr.stride = buf_pg_size;
+               hwq_attr.stride = sizeof(dma_addr_t);
                hwq_attr.type = HWQ_TYPE_MR;
                hwq_attr.sginfo = &sginfo;
                hwq_attr.sginfo->umem = umem;
                hwq_attr.sginfo->npages = pages;
-               hwq_attr.sginfo->pgsize = PAGE_SIZE;
-               hwq_attr.sginfo->pgshft = PAGE_SHIFT;
+               hwq_attr.sginfo->pgsize = buf_pg_size;
+               hwq_attr.sginfo->pgshft = ilog2(buf_pg_size);
                rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr);
                if (rc) {
                        dev_err(&res->pdev->dev,
index 8eca6c1..2a195c4 100644 (file)
@@ -1403,7 +1403,7 @@ static int pbl_continuous_initialize(struct efa_dev *dev,
  */
 static int pbl_indirect_initialize(struct efa_dev *dev, struct pbl_context *pbl)
 {
-       u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, PAGE_SIZE);
+       u32 size_in_pages = DIV_ROUND_UP(pbl->pbl_buf_size_in_bytes, EFA_CHUNK_PAYLOAD_SIZE);
        struct scatterlist *sgl;
        int sg_dma_cnt, err;
 
index 84f1167..d4c6b9b 100644 (file)
@@ -4583,11 +4583,9 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
        mtu = ib_mtu_enum_to_int(ib_mtu);
        if (WARN_ON(mtu <= 0))
                return -EINVAL;
-#define MAX_LP_MSG_LEN 16384
-       /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */
-       lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
-       if (WARN_ON(lp_pktn_ini >= 0xF))
-               return -EINVAL;
+#define MIN_LP_MSG_LEN 1024
+       /* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */
+       lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu);
 
        if (attr_mask & IB_QP_PATH_MTU) {
                hr_reg_write(context, QPC_MTU, ib_mtu);
@@ -5012,7 +5010,6 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
 static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
 {
 #define QP_ACK_TIMEOUT_MAX_HIP08 20
-#define QP_ACK_TIMEOUT_OFFSET 10
 #define QP_ACK_TIMEOUT_MAX 31
 
        if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
@@ -5021,7 +5018,7 @@ static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
                                   "local ACK timeout shall be 0 to 20.\n");
                        return false;
                }
-               *timeout += QP_ACK_TIMEOUT_OFFSET;
+               *timeout += HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
        } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
                if (*timeout > QP_ACK_TIMEOUT_MAX) {
                        ibdev_warn(&hr_dev->ib_dev,
@@ -5307,6 +5304,18 @@ out:
        return ret;
 }
 
+static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev,
+                             struct hns_roce_v2_qp_context *context)
+{
+       u8 timeout;
+
+       timeout = (u8)hr_reg_read(context, QPC_AT);
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+               timeout -= HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
+
+       return timeout;
+}
+
 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
                                int qp_attr_mask,
                                struct ib_qp_init_attr *qp_init_attr)
@@ -5384,7 +5393,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
        qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);
 
        qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
-       qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT);
+       qp_attr->timeout = get_qp_timeout_attr(hr_dev, &context);
        qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
        qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);
 
index 1b44d24..7033eae 100644 (file)
@@ -44,6 +44,8 @@
 #define HNS_ROCE_V2_MAX_XRCD_NUM               0x1000000
 #define HNS_ROCE_V2_RSV_XRCD_NUM               0
 
+#define HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08    10
+
 #define HNS_ROCE_V3_SCCC_SZ                    64
 #define HNS_ROCE_V3_GMV_ENTRY_SZ               32
 
index 37a5cf6..1437649 100644 (file)
@@ -33,6 +33,7 @@
 
 #include <linux/vmalloc.h>
 #include <rdma/ib_umem.h>
+#include <linux/math.h>
 #include "hns_roce_device.h"
 #include "hns_roce_cmd.h"
 #include "hns_roce_hem.h"
@@ -909,6 +910,44 @@ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
        return page_cnt;
 }
 
+static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
+{
+       return int_pow(ba_per_bt, hopnum - 1);
+}
+
+static unsigned int cal_best_bt_pg_sz(struct hns_roce_dev *hr_dev,
+                                     struct hns_roce_mtr *mtr,
+                                     unsigned int pg_shift)
+{
+       unsigned long cap = hr_dev->caps.page_size_cap;
+       struct hns_roce_buf_region *re;
+       unsigned int pgs_per_l1ba;
+       unsigned int ba_per_bt;
+       unsigned int ba_num;
+       int i;
+
+       for_each_set_bit_from(pg_shift, &cap, sizeof(cap) * BITS_PER_BYTE) {
+               if (!(BIT(pg_shift) & cap))
+                       continue;
+
+               ba_per_bt = BIT(pg_shift) / BA_BYTE_LEN;
+               ba_num = 0;
+               for (i = 0; i < mtr->hem_cfg.region_count; i++) {
+                       re = &mtr->hem_cfg.region[i];
+                       if (re->hopnum == 0)
+                               continue;
+
+                       pgs_per_l1ba = cal_pages_per_l1ba(ba_per_bt, re->hopnum);
+                       ba_num += DIV_ROUND_UP(re->count, pgs_per_l1ba);
+               }
+
+               if (ba_num <= ba_per_bt)
+                       return pg_shift;
+       }
+
+       return 0;
+}
+
 static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
                         unsigned int ba_page_shift)
 {
@@ -917,6 +956,10 @@ static int mtr_alloc_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
 
        hns_roce_hem_list_init(&mtr->hem_list);
        if (!cfg->is_direct) {
+               ba_page_shift = cal_best_bt_pg_sz(hr_dev, mtr, ba_page_shift);
+               if (!ba_page_shift)
+                       return -ERANGE;
+
                ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
                                                cfg->region, cfg->region_count,
                                                ba_page_shift);
index ab5cdf7..eaa12c1 100644 (file)
@@ -522,11 +522,6 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
        if (!iwqp->user_mode)
                cancel_delayed_work_sync(&iwqp->dwork_flush);
 
-       irdma_qp_rem_ref(&iwqp->ibqp);
-       wait_for_completion(&iwqp->free_qp);
-       irdma_free_lsmm_rsrc(iwqp);
-       irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
-
        if (!iwqp->user_mode) {
                if (iwqp->iwscq) {
                        irdma_clean_cqes(iwqp, iwqp->iwscq);
@@ -534,6 +529,12 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
                                irdma_clean_cqes(iwqp, iwqp->iwrcq);
                }
        }
+
+       irdma_qp_rem_ref(&iwqp->ibqp);
+       wait_for_completion(&iwqp->free_qp);
+       irdma_free_lsmm_rsrc(iwqp);
+       irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
+
        irdma_remove_push_mmap_entries(iwqp);
        irdma_free_qp_rsrc(iwqp);
 
@@ -3291,6 +3292,7 @@ static int irdma_post_send(struct ib_qp *ibqp,
                        break;
                case IB_WR_LOCAL_INV:
                        info.op_type = IRDMA_OP_TYPE_INV_STAG;
+                       info.local_fence = info.read_fence;
                        info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
                        err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
                        break;
index db18ace..f46c5a5 100644 (file)
@@ -115,15 +115,16 @@ static enum ib_wc_opcode wr_to_wc_opcode(enum ib_wr_opcode opcode)
 void retransmit_timer(struct timer_list *t)
 {
        struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
+       unsigned long flags;
 
        rxe_dbg_qp(qp, "retransmit timer fired\n");
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp->valid) {
                qp->comp.timeout = 1;
                rxe_sched_task(&qp->comp.task);
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 }
 
 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
@@ -481,11 +482,13 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
 
 static void comp_check_sq_drain_done(struct rxe_qp *qp)
 {
-       spin_lock_bh(&qp->state_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
                if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
                        qp->attr.sq_draining = 0;
-                       spin_unlock_bh(&qp->state_lock);
+                       spin_unlock_irqrestore(&qp->state_lock, flags);
 
                        if (qp->ibqp.event_handler) {
                                struct ib_event ev;
@@ -499,7 +502,7 @@ static void comp_check_sq_drain_done(struct rxe_qp *qp)
                        return;
                }
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 }
 
 static inline enum comp_state complete_ack(struct rxe_qp *qp,
@@ -625,13 +628,15 @@ static void free_pkt(struct rxe_pkt_info *pkt)
  */
 static void reset_retry_timer(struct rxe_qp *qp)
 {
+       unsigned long flags;
+
        if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) {
-               spin_lock_bh(&qp->state_lock);
+               spin_lock_irqsave(&qp->state_lock, flags);
                if (qp_state(qp) >= IB_QPS_RTS &&
                    psn_compare(qp->req.psn, qp->comp.psn) > 0)
                        mod_timer(&qp->retrans_timer,
                                  jiffies + qp->qp_timeout_jiffies);
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
        }
 }
 
@@ -643,18 +648,19 @@ int rxe_completer(struct rxe_qp *qp)
        struct rxe_pkt_info *pkt = NULL;
        enum comp_state state;
        int ret;
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
                          qp_state(qp) == IB_QPS_RESET) {
                bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
 
                drain_resp_pkts(qp);
                flush_send_queue(qp, notify);
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                goto exit;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        if (qp->comp.timeout) {
                qp->comp.timeout_retry = 1;
index 2bc7361..a38fab1 100644 (file)
@@ -412,15 +412,16 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
        int err;
        int is_request = pkt->mask & RXE_REQ_MASK;
        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if ((is_request && (qp_state(qp) < IB_QPS_RTS)) ||
            (!is_request && (qp_state(qp) < IB_QPS_RTR))) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
                goto drop;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        rxe_icrc_generate(skb, pkt);
 
index c5451a4..61a2eb7 100644 (file)
@@ -300,6 +300,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
        struct rxe_cq *rcq = to_rcq(init->recv_cq);
        struct rxe_cq *scq = to_rcq(init->send_cq);
        struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
+       unsigned long flags;
 
        rxe_get(pd);
        rxe_get(rcq);
@@ -325,10 +326,10 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
        if (err)
                goto err2;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        qp->attr.qp_state = IB_QPS_RESET;
        qp->valid = 1;
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        return 0;
 
@@ -492,24 +493,28 @@ static void rxe_qp_reset(struct rxe_qp *qp)
 /* move the qp to the error state */
 void rxe_qp_error(struct rxe_qp *qp)
 {
-       spin_lock_bh(&qp->state_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&qp->state_lock, flags);
        qp->attr.qp_state = IB_QPS_ERR;
 
        /* drain work and packet queues */
        rxe_sched_task(&qp->resp.task);
        rxe_sched_task(&qp->comp.task);
        rxe_sched_task(&qp->req.task);
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 }
 
 static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
                       int mask)
 {
-       spin_lock_bh(&qp->state_lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&qp->state_lock, flags);
        qp->attr.sq_draining = 1;
        rxe_sched_task(&qp->comp.task);
        rxe_sched_task(&qp->req.task);
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 }
 
 /* caller should hold qp->state_lock */
@@ -555,14 +560,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
                qp->attr.cur_qp_state = attr->qp_state;
 
        if (mask & IB_QP_STATE) {
-               spin_lock_bh(&qp->state_lock);
+               unsigned long flags;
+
+               spin_lock_irqsave(&qp->state_lock, flags);
                err = __qp_chk_state(qp, attr, mask);
                if (!err) {
                        qp->attr.qp_state = attr->qp_state;
                        rxe_dbg_qp(qp, "state -> %s\n",
                                        qps2str[attr->qp_state]);
                }
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
 
                if (err)
                        return err;
@@ -688,6 +695,8 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
 /* called by the query qp verb */
 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
 {
+       unsigned long flags;
+
        *attr = qp->attr;
 
        attr->rq_psn                            = qp->resp.psn;
@@ -708,12 +717,13 @@ int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
        /* Applications that get this state typically spin on it.
         * Yield the processor
         */
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp->attr.sq_draining) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                cond_resched();
+       } else {
+               spin_unlock_irqrestore(&qp->state_lock, flags);
        }
-       spin_unlock_bh(&qp->state_lock);
 
        return 0;
 }
@@ -736,10 +746,11 @@ int rxe_qp_chk_destroy(struct rxe_qp *qp)
 static void rxe_qp_do_cleanup(struct work_struct *work)
 {
        struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        qp->valid = 0;
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
        qp->qp_timeout_jiffies = 0;
 
        if (qp_type(qp) == IB_QPT_RC) {
index 2f953cc..5861e42 100644 (file)
@@ -14,6 +14,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
                            struct rxe_qp *qp)
 {
        unsigned int pkt_type;
+       unsigned long flags;
 
        if (unlikely(!qp->valid))
                return -EINVAL;
@@ -38,19 +39,19 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
                return -EINVAL;
        }
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (pkt->mask & RXE_REQ_MASK) {
                if (unlikely(qp_state(qp) < IB_QPS_RTR)) {
-                       spin_unlock_bh(&qp->state_lock);
+                       spin_unlock_irqrestore(&qp->state_lock, flags);
                        return -EINVAL;
                }
        } else {
                if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
-                       spin_unlock_bh(&qp->state_lock);
+                       spin_unlock_irqrestore(&qp->state_lock, flags);
                        return -EINVAL;
                }
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        return 0;
 }
index 65134a9..5fe7cba 100644 (file)
@@ -99,17 +99,18 @@ static void req_retry(struct rxe_qp *qp)
 void rnr_nak_timer(struct timer_list *t)
 {
        struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
+       unsigned long flags;
 
        rxe_dbg_qp(qp, "nak timer fired\n");
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp->valid) {
                /* request a send queue retry */
                qp->req.need_retry = 1;
                qp->req.wait_for_rnr_timer = 0;
                rxe_sched_task(&qp->req.task);
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 }
 
 static void req_check_sq_drain_done(struct rxe_qp *qp)
@@ -118,8 +119,9 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
        unsigned int index;
        unsigned int cons;
        struct rxe_send_wqe *wqe;
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp_state(qp) == IB_QPS_SQD) {
                q = qp->sq.queue;
                index = qp->req.wqe_index;
@@ -140,7 +142,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
                                break;
 
                        qp->attr.sq_draining = 0;
-                       spin_unlock_bh(&qp->state_lock);
+                       spin_unlock_irqrestore(&qp->state_lock, flags);
 
                        if (qp->ibqp.event_handler) {
                                struct ib_event ev;
@@ -154,7 +156,7 @@ static void req_check_sq_drain_done(struct rxe_qp *qp)
                        return;
                } while (0);
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 }
 
 static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
@@ -173,6 +175,7 @@ static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
 {
        struct rxe_send_wqe *wqe;
+       unsigned long flags;
 
        req_check_sq_drain_done(qp);
 
@@ -180,13 +183,13 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
        if (wqe == NULL)
                return NULL;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
                     (wqe->state != wqe_state_processing))) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                return NULL;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
        return wqe;
@@ -676,16 +679,17 @@ int rxe_requester(struct rxe_qp *qp)
        struct rxe_queue *q = qp->sq.queue;
        struct rxe_ah *ah;
        struct rxe_av *av;
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (unlikely(!qp->valid)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                goto exit;
        }
 
        if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
                wqe = __req_next_wqe(qp);
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                if (wqe)
                        goto err;
                else
@@ -700,10 +704,10 @@ int rxe_requester(struct rxe_qp *qp)
                qp->req.wait_psn = 0;
                qp->req.need_retry = 0;
                qp->req.wait_for_rnr_timer = 0;
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                goto exit;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        /* we come here if the retransmit timer has fired
         * or if the rnr timer has fired. If the retransmit
index 68f6cd1..1da044f 100644 (file)
@@ -1047,6 +1047,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
        struct ib_uverbs_wc *uwc = &cqe.uibwc;
        struct rxe_recv_wqe *wqe = qp->resp.wqe;
        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+       unsigned long flags;
 
        if (!wqe)
                goto finish;
@@ -1137,12 +1138,12 @@ static enum resp_states do_complete(struct rxe_qp *qp,
                return RESPST_ERR_CQ_OVERFLOW;
 
 finish:
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                return RESPST_CHK_RESOURCE;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        if (unlikely(!pkt))
                return RESPST_DONE;
@@ -1468,18 +1469,19 @@ int rxe_responder(struct rxe_qp *qp)
        enum resp_states state;
        struct rxe_pkt_info *pkt = NULL;
        int ret;
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
                          qp_state(qp) == IB_QPS_RESET) {
                bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
 
                drain_req_pkts(qp);
                flush_recv_queue(qp, notify);
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                goto exit;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
 
index dea605b..4d8f6b8 100644 (file)
@@ -904,10 +904,10 @@ static int rxe_post_send_kernel(struct rxe_qp *qp,
        if (!err)
                rxe_sched_task(&qp->req.task);
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp_state(qp) == IB_QPS_ERR)
                rxe_sched_task(&qp->comp.task);
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        return err;
 }
@@ -917,22 +917,23 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
 {
        struct rxe_qp *qp = to_rqp(ibqp);
        int err;
+       unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        /* caller has already called destroy_qp */
        if (WARN_ON_ONCE(!qp->valid)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                rxe_err_qp(qp, "qp has been destroyed");
                return -EINVAL;
        }
 
        if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                *bad_wr = wr;
                rxe_err_qp(qp, "qp not ready to send");
                return -EINVAL;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        if (qp->is_user) {
                /* Utilize process context to do protocol processing */
@@ -1008,22 +1009,22 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
        struct rxe_rq *rq = &qp->rq;
        unsigned long flags;
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        /* caller has already called destroy_qp */
        if (WARN_ON_ONCE(!qp->valid)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                rxe_err_qp(qp, "qp has been destroyed");
                return -EINVAL;
        }
 
        /* see C10-97.2.1 */
        if (unlikely((qp_state(qp) < IB_QPS_INIT))) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                *bad_wr = wr;
                rxe_dbg_qp(qp, "qp not ready to post recv");
                return -EINVAL;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        if (unlikely(qp->srq)) {
                *bad_wr = wr;
@@ -1044,10 +1045,10 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
 
        spin_unlock_irqrestore(&rq->producer_lock, flags);
 
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp_state(qp) == IB_QPS_ERR)
                rxe_sched_task(&qp->resp.task);
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
 
        return err;
 }
index 37e876d..641eb86 100644 (file)
@@ -703,7 +703,7 @@ void input_close_device(struct input_handle *handle)
 
        __input_release_device(handle);
 
-       if (!dev->inhibited && !--dev->users) {
+       if (!--dev->users && !dev->inhibited) {
                if (dev->poller)
                        input_dev_poller_stop(dev->poller);
                if (dev->close)
index 28be88e..f33622f 100644 (file)
@@ -281,7 +281,6 @@ static const struct xpad_device {
        { 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 },
        { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
        { 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
-       { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
        { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
        { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
        { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
index 0948938..e79f549 100644 (file)
@@ -109,6 +109,27 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = {
 };
 
 /*
+ * Some devices have a wrong entry which points to a GPIO which is
+ * required in another driver, so this driver must not claim it.
+ */
+static const struct dmi_system_id dmi_invalid_acpi_index[] = {
+       {
+               /*
+                * Lenovo Yoga Book X90F / X90L, the PNP0C40 home button entry
+                * points to a GPIO which is not a home button and which is
+                * required by the lenovo-yogabook driver.
+                */
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
+               },
+               .driver_data = (void *)1l,
+       },
+       {} /* Terminating entry */
+};
+
+/*
  * Get the Nth GPIO number from the ACPI object.
  */
 static int soc_button_lookup_gpio(struct device *dev, int acpi_index,
@@ -137,6 +158,8 @@ soc_button_device_create(struct platform_device *pdev,
        struct platform_device *pd;
        struct gpio_keys_button *gpio_keys;
        struct gpio_keys_platform_data *gpio_keys_pdata;
+       const struct dmi_system_id *dmi_id;
+       int invalid_acpi_index = -1;
        int error, gpio, irq;
        int n_buttons = 0;
 
@@ -154,10 +177,17 @@ soc_button_device_create(struct platform_device *pdev,
        gpio_keys = (void *)(gpio_keys_pdata + 1);
        n_buttons = 0;
 
+       dmi_id = dmi_first_match(dmi_invalid_acpi_index);
+       if (dmi_id)
+               invalid_acpi_index = (long)dmi_id->driver_data;
+
        for (info = button_info; info->name; info++) {
                if (info->autorepeat != autorepeat)
                        continue;
 
+               if (info->acpi_index == invalid_acpi_index)
+                       continue;
+
                error = soc_button_lookup_gpio(&pdev->dev, info->acpi_index, &gpio, &irq);
                if (error || irq < 0) {
                        /*
index ece97f8..2118b20 100644 (file)
@@ -674,10 +674,11 @@ static void process_packet_head_v4(struct psmouse *psmouse)
        struct input_dev *dev = psmouse->dev;
        struct elantech_data *etd = psmouse->private;
        unsigned char *packet = psmouse->packet;
-       int id = ((packet[3] & 0xe0) >> 5) - 1;
+       int id;
        int pres, traces;
 
-       if (id < 0)
+       id = ((packet[3] & 0xe0) >> 5) - 1;
+       if (id < 0 || id >= ETP_MAX_FINGERS)
                return;
 
        etd->mt[id].x = ((packet[1] & 0x0f) << 8) | packet[2];
@@ -707,7 +708,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
        int id, sid;
 
        id = ((packet[0] & 0xe0) >> 5) - 1;
-       if (id < 0)
+       if (id < 0 || id >= ETP_MAX_FINGERS)
                return;
 
        sid = ((packet[3] & 0xe0) >> 5) - 1;
@@ -728,7 +729,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
        input_report_abs(dev, ABS_MT_POSITION_X, etd->mt[id].x);
        input_report_abs(dev, ABS_MT_POSITION_Y, etd->mt[id].y);
 
-       if (sid >= 0) {
+       if (sid >= 0 && sid < ETP_MAX_FINGERS) {
                etd->mt[sid].x += delta_x2 * weight;
                etd->mt[sid].y -= delta_y2 * weight;
                input_mt_slot(dev, sid);
index 30102cb..3c9d072 100644 (file)
@@ -560,7 +560,7 @@ static int cyttsp5_hid_output_get_sysinfo(struct cyttsp5 *ts)
 static int cyttsp5_hid_output_bl_launch_app(struct cyttsp5 *ts)
 {
        int rc;
-       u8 cmd[HID_OUTPUT_BL_LAUNCH_APP];
+       u8 cmd[HID_OUTPUT_BL_LAUNCH_APP_SIZE];
        u16 crc;
 
        put_unaligned_le16(HID_OUTPUT_BL_LAUNCH_APP_SIZE, cmd);
index 6de9007..4d80060 100644 (file)
@@ -282,6 +282,7 @@ config EXYNOS_IOMMU_DEBUG
 config IPMMU_VMSA
        bool "Renesas VMSA-compatible IPMMU"
        depends on ARCH_RENESAS || COMPILE_TEST
+       depends on ARM || ARM64 || COMPILE_TEST
        depends on !GENERIC_ATOMIC64    # for IOMMU_IO_PGTABLE_LPAE
        select IOMMU_API
        select IOMMU_IO_PGTABLE_LPAE
index e98f20a..9beeceb 100644 (file)
@@ -15,9 +15,7 @@ extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
 extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
 extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
 extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
-extern int amd_iommu_init_devices(void);
-extern void amd_iommu_uninit_devices(void);
-extern void amd_iommu_init_notifier(void);
+extern void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
 extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
 
 #ifdef CONFIG_AMD_IOMMU_DEBUGFS
index 329a406..c2d80a4 100644 (file)
@@ -759,6 +759,30 @@ void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
 }
 
 /*
+ * This function restarts event logging in case the IOMMU experienced
+ * an GA log overflow.
+ */
+void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
+{
+       u32 status;
+
+       status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+       if (status & MMIO_STATUS_GALOG_RUN_MASK)
+               return;
+
+       pr_info_ratelimited("IOMMU GA Log restarting\n");
+
+       iommu_feature_disable(iommu, CONTROL_GALOG_EN);
+       iommu_feature_disable(iommu, CONTROL_GAINT_EN);
+
+       writel(MMIO_STATUS_GALOG_OVERFLOW_MASK,
+              iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+       iommu_feature_enable(iommu, CONTROL_GAINT_EN);
+       iommu_feature_enable(iommu, CONTROL_GALOG_EN);
+}
+
+/*
  * This function resets the command buffer if the IOMMU stopped fetching
  * commands from it.
  */
index 4a31464..dc1ec68 100644 (file)
@@ -845,6 +845,7 @@ amd_iommu_set_pci_msi_domain(struct device *dev, struct amd_iommu *iommu) { }
        (MMIO_STATUS_EVT_OVERFLOW_INT_MASK | \
         MMIO_STATUS_EVT_INT_MASK | \
         MMIO_STATUS_PPR_INT_MASK | \
+        MMIO_STATUS_GALOG_OVERFLOW_MASK | \
         MMIO_STATUS_GALOG_INT_MASK)
 
 irqreturn_t amd_iommu_int_thread(int irq, void *data)
@@ -868,10 +869,16 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
                }
 
 #ifdef CONFIG_IRQ_REMAP
-               if (status & MMIO_STATUS_GALOG_INT_MASK) {
+               if (status & (MMIO_STATUS_GALOG_INT_MASK |
+                             MMIO_STATUS_GALOG_OVERFLOW_MASK)) {
                        pr_devel("Processing IOMMU GA Log\n");
                        iommu_poll_ga_log(iommu);
                }
+
+               if (status & MMIO_STATUS_GALOG_OVERFLOW_MASK) {
+                       pr_info_ratelimited("IOMMU GA Log overflow\n");
+                       amd_iommu_restart_ga_log(iommu);
+               }
 #endif
 
                if (status & MMIO_STATUS_EVT_OVERFLOW_INT_MASK) {
@@ -2067,7 +2074,7 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
 {
        struct io_pgtable_ops *pgtbl_ops;
        struct protection_domain *domain;
-       int pgtable = amd_iommu_pgtable;
+       int pgtable;
        int mode = DEFAULT_PGTABLE_LEVEL;
        int ret;
 
@@ -2084,6 +2091,10 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
                mode = PAGE_MODE_NONE;
        } else if (type == IOMMU_DOMAIN_UNMANAGED) {
                pgtable = AMD_IOMMU_V1;
+       } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) {
+               pgtable = amd_iommu_pgtable;
+       } else {
+               return NULL;
        }
 
        switch (pgtable) {
@@ -2118,6 +2129,15 @@ out_err:
        return NULL;
 }
 
+static inline u64 dma_max_address(void)
+{
+       if (amd_iommu_pgtable == AMD_IOMMU_V1)
+               return ~0ULL;
+
+       /* V2 with 4/5 level page table */
+       return ((1ULL << PM_LEVEL_SHIFT(amd_iommu_gpt_level)) - 1);
+}
+
 static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
 {
        struct protection_domain *domain;
@@ -2134,7 +2154,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
                return NULL;
 
        domain->domain.geometry.aperture_start = 0;
-       domain->domain.geometry.aperture_end   = ~0ULL;
+       domain->domain.geometry.aperture_end   = dma_max_address();
        domain->domain.geometry.force_aperture = true;
 
        return &domain->domain;
@@ -2387,7 +2407,7 @@ static void amd_iommu_iotlb_sync(struct iommu_domain *domain,
        unsigned long flags;
 
        spin_lock_irqsave(&dom->lock, flags);
-       domain_flush_pages(dom, gather->start, gather->end - gather->start, 1);
+       domain_flush_pages(dom, gather->start, gather->end - gather->start + 1, 1);
        amd_iommu_domain_flush_complete(dom);
        spin_unlock_irqrestore(&dom->lock, flags);
 }
@@ -3493,8 +3513,7 @@ int amd_iommu_activate_guest_mode(void *data)
        struct irte_ga *entry = (struct irte_ga *) ir_data->entry;
        u64 valid;
 
-       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) ||
-           !entry || entry->lo.fields_vapic.guest_mode)
+       if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) || !entry)
                return 0;
 
        valid = entry->lo.fields_vapic.valid;
index ae09c62..c71afda 100644 (file)
@@ -517,6 +517,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
        { .compatible = "qcom,qcm2290-smmu-500", .data = &qcom_smmu_500_impl0_data },
        { .compatible = "qcom,qdu1000-smmu-500", .data = &qcom_smmu_500_impl0_data  },
        { .compatible = "qcom,sc7180-smmu-500", .data = &qcom_smmu_500_impl0_data },
+       { .compatible = "qcom,sc7180-smmu-v2", .data = &qcom_smmu_v2_data },
        { .compatible = "qcom,sc7280-smmu-500", .data = &qcom_smmu_500_impl0_data },
        { .compatible = "qcom,sc8180x-smmu-500", .data = &qcom_smmu_500_impl0_data },
        { .compatible = "qcom,sc8280xp-smmu-500", .data = &qcom_smmu_500_impl0_data },
@@ -561,5 +562,14 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
        if (match)
                return qcom_smmu_create(smmu, match->data);
 
+       /*
+        * If you hit this WARN_ON() you are missing an entry in the
+        * qcom_smmu_impl_of_match[] table, and GPU per-process page-
+        * tables will be broken.
+        */
+       WARN(of_device_is_compatible(np, "qcom,adreno-smmu"),
+            "Missing qcom_smmu_impl_of_match entry for: %s",
+            dev_name(smmu->dev));
+
        return smmu;
 }
index aecc7d1..e93906d 100644 (file)
@@ -781,7 +781,8 @@ static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
 
-       mtk_iommu_tlb_flush_all(dom->bank->parent_data);
+       if (dom->bank)
+               mtk_iommu_tlb_flush_all(dom->bank->parent_data);
 }
 
 static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
index ea5a308..4054030 100644 (file)
@@ -1335,20 +1335,22 @@ static int rk_iommu_probe(struct platform_device *pdev)
        for (i = 0; i < iommu->num_irq; i++) {
                int irq = platform_get_irq(pdev, i);
 
-               if (irq < 0)
-                       return irq;
+               if (irq < 0) {
+                       err = irq;
+                       goto err_pm_disable;
+               }
 
                err = devm_request_irq(iommu->dev, irq, rk_iommu_irq,
                                       IRQF_SHARED, dev_name(dev), iommu);
-               if (err) {
-                       pm_runtime_disable(dev);
-                       goto err_remove_sysfs;
-               }
+               if (err)
+                       goto err_pm_disable;
        }
 
        dma_set_mask_and_coherent(dev, rk_ops->dma_bit_mask);
 
        return 0;
+err_pm_disable:
+       pm_runtime_disable(dev);
 err_remove_sysfs:
        iommu_device_sysfs_remove(&iommu->iommu);
 err_put_group:
index a610821..afd6a18 100644 (file)
@@ -16,7 +16,13 @@ void gic_enable_of_quirks(const struct device_node *np,
                          const struct gic_quirk *quirks, void *data)
 {
        for (; quirks->desc; quirks++) {
-               if (!of_device_is_compatible(np, quirks->compatible))
+               if (!quirks->compatible && !quirks->property)
+                       continue;
+               if (quirks->compatible &&
+                   !of_device_is_compatible(np, quirks->compatible))
+                       continue;
+               if (quirks->property &&
+                   !of_property_read_bool(np, quirks->property))
                        continue;
                if (quirks->init(data))
                        pr_info("GIC: enabling workaround for %s\n",
@@ -28,7 +34,7 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
                void *data)
 {
        for (; quirks->desc; quirks++) {
-               if (quirks->compatible)
+               if (quirks->compatible || quirks->property)
                        continue;
                if (quirks->iidr != (quirks->mask & iidr))
                        continue;
index 27e3d4e..3db4592 100644 (file)
@@ -13,6 +13,7 @@
 struct gic_quirk {
        const char *desc;
        const char *compatible;
+       const char *property;
        bool (*init)(void *data);
        u32 iidr;
        u32 mask;
index 6fcee22..a605aa7 100644 (file)
@@ -39,6 +39,7 @@
 
 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996    (1ULL << 0)
 #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539  (1ULL << 1)
+#define FLAGS_WORKAROUND_MTK_GICR_SAVE         (1ULL << 2)
 
 #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
 
@@ -1720,6 +1721,15 @@ static bool gic_enable_quirk_msm8996(void *data)
        return true;
 }
 
+static bool gic_enable_quirk_mtk_gicr(void *data)
+{
+       struct gic_chip_data *d = data;
+
+       d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE;
+
+       return true;
+}
+
 static bool gic_enable_quirk_cavium_38539(void *data)
 {
        struct gic_chip_data *d = data;
@@ -1793,6 +1803,11 @@ static const struct gic_quirk gic_quirks[] = {
                .init   = gic_enable_quirk_msm8996,
        },
        {
+               .desc   = "GICv3: Mediatek Chromebook GICR save problem",
+               .property = "mediatek,broken-save-restore-fw",
+               .init   = gic_enable_quirk_mtk_gicr,
+       },
+       {
                .desc   = "GICv3: HIP06 erratum 161010803",
                .iidr   = 0x0204043b,
                .mask   = 0xffffffff,
@@ -1834,6 +1849,11 @@ static void gic_enable_nmi_support(void)
        if (!gic_prio_masking_enabled())
                return;
 
+       if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) {
+               pr_warn("Skipping NMI enable due to firmware issues\n");
+               return;
+       }
+
        ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL);
        if (!ppi_nmi_refs)
                return;
index eada5e0..5101a3f 100644 (file)
@@ -240,26 +240,27 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
        struct irq_domain *domain;
        struct device_node *np;
        u32 num_pins;
+       int ret = 0;
+
+       parent = bus_get_dev_root(&platform_bus_type);
+       if (!parent)
+               return -ENODEV;
 
        for_each_child_of_node(pdev->dev.of_node, np) {
                if (!of_property_read_bool(np, "interrupt-controller"))
                        continue;
 
-               parent = bus_get_dev_root(&platform_bus_type);
-               if (parent) {
-                       child = of_platform_device_create(np, NULL, parent);
-                       put_device(parent);
-                       if (!child) {
-                               of_node_put(np);
-                               return -ENOMEM;
-                       }
+               child = of_platform_device_create(np, NULL, parent);
+               if (!child) {
+                       ret = -ENOMEM;
+                       break;
                }
 
                if (of_property_read_u32(child->dev.of_node, "num-pins",
                                         &num_pins) < 0) {
                        dev_err(&pdev->dev, "No num-pins property\n");
-                       of_node_put(np);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       break;
                }
 
                domain = platform_msi_create_device_domain(&child->dev, num_pins,
@@ -267,12 +268,16 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
                                                           &mbigen_domain_ops,
                                                           mgn_chip);
                if (!domain) {
-                       of_node_put(np);
-                       return -ENOMEM;
+                       ret = -ENOMEM;
+                       break;
                }
        }
 
-       return 0;
+       put_device(parent);
+       if (ret)
+               of_node_put(np);
+
+       return ret;
 }
 
 #ifdef CONFIG_ACPI
index 2aaa9aa..7da18ef 100644 (file)
@@ -150,7 +150,7 @@ static const struct meson_gpio_irq_params s4_params = {
        INIT_MESON_S4_COMMON_DATA(82)
 };
 
-static const struct of_device_id meson_irq_gpio_matches[] = {
+static const struct of_device_id meson_irq_gpio_matches[] __maybe_unused = {
        { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
        { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
        { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
index 046c355..6d5ecc1 100644 (file)
@@ -50,7 +50,7 @@ void __iomem *mips_gic_base;
 
 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks);
 
-static DEFINE_SPINLOCK(gic_lock);
+static DEFINE_RAW_SPINLOCK(gic_lock);
 static struct irq_domain *gic_irq_domain;
 static int gic_shared_intrs;
 static unsigned int gic_cpu_pin;
@@ -210,7 +210,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
 
        irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
 
-       spin_lock_irqsave(&gic_lock, flags);
+       raw_spin_lock_irqsave(&gic_lock, flags);
        switch (type & IRQ_TYPE_SENSE_MASK) {
        case IRQ_TYPE_EDGE_FALLING:
                pol = GIC_POL_FALLING_EDGE;
@@ -250,7 +250,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
        else
                irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
                                                 handle_level_irq, NULL);
-       spin_unlock_irqrestore(&gic_lock, flags);
+       raw_spin_unlock_irqrestore(&gic_lock, flags);
 
        return 0;
 }
@@ -268,7 +268,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
                return -EINVAL;
 
        /* Assumption : cpumask refers to a single CPU */
-       spin_lock_irqsave(&gic_lock, flags);
+       raw_spin_lock_irqsave(&gic_lock, flags);
 
        /* Re-route this IRQ */
        write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu)));
@@ -279,7 +279,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
                set_bit(irq, per_cpu_ptr(pcpu_masks, cpu));
 
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
-       spin_unlock_irqrestore(&gic_lock, flags);
+       raw_spin_unlock_irqrestore(&gic_lock, flags);
 
        return IRQ_SET_MASK_OK;
 }
@@ -357,12 +357,12 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
        cd = irq_data_get_irq_chip_data(d);
        cd->mask = false;
 
-       spin_lock_irqsave(&gic_lock, flags);
+       raw_spin_lock_irqsave(&gic_lock, flags);
        for_each_online_cpu(cpu) {
                write_gic_vl_other(mips_cm_vp_id(cpu));
                write_gic_vo_rmask(BIT(intr));
        }
-       spin_unlock_irqrestore(&gic_lock, flags);
+       raw_spin_unlock_irqrestore(&gic_lock, flags);
 }
 
 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
@@ -375,12 +375,12 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
        cd = irq_data_get_irq_chip_data(d);
        cd->mask = true;
 
-       spin_lock_irqsave(&gic_lock, flags);
+       raw_spin_lock_irqsave(&gic_lock, flags);
        for_each_online_cpu(cpu) {
                write_gic_vl_other(mips_cm_vp_id(cpu));
                write_gic_vo_smask(BIT(intr));
        }
-       spin_unlock_irqrestore(&gic_lock, flags);
+       raw_spin_unlock_irqrestore(&gic_lock, flags);
 }
 
 static void gic_all_vpes_irq_cpu_online(void)
@@ -393,19 +393,21 @@ static void gic_all_vpes_irq_cpu_online(void)
        unsigned long flags;
        int i;
 
-       spin_lock_irqsave(&gic_lock, flags);
+       raw_spin_lock_irqsave(&gic_lock, flags);
 
        for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {
                unsigned int intr = local_intrs[i];
                struct gic_all_vpes_chip_data *cd;
 
+               if (!gic_local_irq_is_routable(intr))
+                       continue;
                cd = &gic_all_vpes_chip_data[intr];
                write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
                if (cd->mask)
                        write_gic_vl_smask(BIT(intr));
        }
 
-       spin_unlock_irqrestore(&gic_lock, flags);
+       raw_spin_unlock_irqrestore(&gic_lock, flags);
 }
 
 static struct irq_chip gic_all_vpes_local_irq_controller = {
@@ -435,11 +437,11 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
 
        data = irq_get_irq_data(virq);
 
-       spin_lock_irqsave(&gic_lock, flags);
+       raw_spin_lock_irqsave(&gic_lock, flags);
        write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
        write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
        irq_data_update_effective_affinity(data, cpumask_of(cpu));
-       spin_unlock_irqrestore(&gic_lock, flags);
+       raw_spin_unlock_irqrestore(&gic_lock, flags);
 
        return 0;
 }
@@ -531,12 +533,12 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
        if (!gic_local_irq_is_routable(intr))
                return -EPERM;
 
-       spin_lock_irqsave(&gic_lock, flags);
+       raw_spin_lock_irqsave(&gic_lock, flags);
        for_each_online_cpu(cpu) {
                write_gic_vl_other(mips_cm_vp_id(cpu));
                write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
        }
-       spin_unlock_irqrestore(&gic_lock, flags);
+       raw_spin_unlock_irqrestore(&gic_lock, flags);
 
        return 0;
 }
index 55a0372..1c84981 100644 (file)
@@ -312,14 +312,14 @@ static int lpg_calc_freq(struct lpg_channel *chan, uint64_t period)
                max_res = LPG_RESOLUTION_9BIT;
        }
 
-       min_period = (u64)NSEC_PER_SEC *
-                       div64_u64((1 << pwm_resolution_arr[0]), clk_rate_arr[clk_len - 1]);
+       min_period = div64_u64((u64)NSEC_PER_SEC * (1 << pwm_resolution_arr[0]),
+                              clk_rate_arr[clk_len - 1]);
        if (period <= min_period)
                return -EINVAL;
 
        /* Limit period to largest possible value, to avoid overflows */
-       max_period = (u64)NSEC_PER_SEC * max_res * LPG_MAX_PREDIV *
-                       div64_u64((1 << LPG_MAX_M), 1024);
+       max_period = div64_u64((u64)NSEC_PER_SEC * max_res * LPG_MAX_PREDIV * (1 << LPG_MAX_M),
+                              1024);
        if (period > max_period)
                period = max_period;
 
index c4a705c..fc6a12a 100644 (file)
@@ -98,6 +98,7 @@ static ssize_t mbox_test_message_write(struct file *filp,
                                       size_t count, loff_t *ppos)
 {
        struct mbox_test_device *tdev = filp->private_data;
+       char *message;
        void *data;
        int ret;
 
@@ -113,12 +114,13 @@ static ssize_t mbox_test_message_write(struct file *filp,
                return -EINVAL;
        }
 
-       mutex_lock(&tdev->mutex);
-
-       tdev->message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
-       if (!tdev->message)
+       message = kzalloc(MBOX_MAX_MSG_LEN, GFP_KERNEL);
+       if (!message)
                return -ENOMEM;
 
+       mutex_lock(&tdev->mutex);
+
+       tdev->message = message;
        ret = copy_from_user(tdev->message, userbuf, count);
        if (ret) {
                ret = -EFAULT;
index 4739ed8..9ea285f 100644 (file)
@@ -5516,7 +5516,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
 
        sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0,
                                      &dd_idx, NULL);
-       end_sector = bio_end_sector(raid_bio);
+       end_sector = sector + bio_sectors(raid_bio);
 
        rcu_read_lock();
        if (r5c_big_stripe_cached(conf, sector))
index 769ea6b..241b162 100644 (file)
@@ -1091,7 +1091,8 @@ void cec_received_msg_ts(struct cec_adapter *adap,
        mutex_lock(&adap->lock);
        dprintk(2, "%s: %*ph\n", __func__, msg->len, msg->msg);
 
-       adap->last_initiator = 0xff;
+       if (!adap->transmit_in_progress)
+               adap->last_initiator = 0xff;
 
        /* Check if this message was for us (directed or broadcast). */
        if (!cec_msg_is_broadcast(msg)) {
@@ -1585,7 +1586,7 @@ static void cec_claim_log_addrs(struct cec_adapter *adap, bool block)
  *
  * This function is called with adap->lock held.
  */
-static int cec_adap_enable(struct cec_adapter *adap)
+int cec_adap_enable(struct cec_adapter *adap)
 {
        bool enable;
        int ret = 0;
@@ -1595,6 +1596,9 @@ static int cec_adap_enable(struct cec_adapter *adap)
        if (adap->needs_hpd)
                enable = enable && adap->phys_addr != CEC_PHYS_ADDR_INVALID;
 
+       if (adap->devnode.unregistered)
+               enable = false;
+
        if (enable == adap->is_enabled)
                return 0;
 
index af358e9..7e153c5 100644 (file)
@@ -191,6 +191,8 @@ static void cec_devnode_unregister(struct cec_adapter *adap)
        mutex_lock(&adap->lock);
        __cec_s_phys_addr(adap, CEC_PHYS_ADDR_INVALID, false);
        __cec_s_log_addrs(adap, NULL, false);
+       // Disable the adapter (since adap->devnode.unregistered is true)
+       cec_adap_enable(adap);
        mutex_unlock(&adap->lock);
 
        cdev_device_del(&devnode->cdev, &devnode->dev);
index b78df93..ed1f8c6 100644 (file)
@@ -47,6 +47,7 @@ int cec_monitor_pin_cnt_inc(struct cec_adapter *adap);
 void cec_monitor_pin_cnt_dec(struct cec_adapter *adap);
 int cec_adap_status(struct seq_file *file, void *priv);
 int cec_thread_func(void *_adap);
+int cec_adap_enable(struct cec_adapter *adap);
 void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block);
 int __cec_s_log_addrs(struct cec_adapter *adap,
                      struct cec_log_addrs *log_addrs, bool block);
index c2d2792..baf6454 100644 (file)
@@ -151,6 +151,12 @@ struct dvb_ca_private {
 
        /* mutex serializing ioctls */
        struct mutex ioctl_mutex;
+
+       /* A mutex used when a device is disconnected */
+       struct mutex remove_mutex;
+
+       /* Whether the device is disconnected */
+       int exit;
 };
 
 static void dvb_ca_private_free(struct dvb_ca_private *ca)
@@ -187,7 +193,7 @@ static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca);
 static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
                                    u8 *ebuf, int ecount);
 static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
-                                    u8 *ebuf, int ecount);
+                                    u8 *ebuf, int ecount, int size_write_flag);
 
 /**
  * findstr - Safely find needle in haystack.
@@ -370,7 +376,7 @@ static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
        ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10);
        if (ret)
                return ret;
-       ret = dvb_ca_en50221_write_data(ca, slot, buf, 2);
+       ret = dvb_ca_en50221_write_data(ca, slot, buf, 2, CMDREG_SW);
        if (ret != 2)
                return -EIO;
        ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN);
@@ -778,11 +784,13 @@ exit:
  * @buf: The data in this buffer is treated as a complete link-level packet to
  *      be written.
  * @bytes_write: Size of ebuf.
+ * @size_write_flag: A flag on Command Register which says whether the link size
+ * information will be writen or not.
  *
  * return: Number of bytes written, or < 0 on error.
  */
 static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
-                                    u8 *buf, int bytes_write)
+                                    u8 *buf, int bytes_write, int size_write_flag)
 {
        struct dvb_ca_slot *sl = &ca->slot_info[slot];
        int status;
@@ -817,7 +825,7 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
 
        /* OK, set HC bit */
        status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND,
-                                           IRQEN | CMDREG_HC);
+                                           IRQEN | CMDREG_HC | size_write_flag);
        if (status)
                goto exit;
 
@@ -1508,7 +1516,7 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
 
                        mutex_lock(&sl->slot_lock);
                        status = dvb_ca_en50221_write_data(ca, slot, fragbuf,
-                                                          fraglen + 2);
+                                                          fraglen + 2, 0);
                        mutex_unlock(&sl->slot_lock);
                        if (status == (fraglen + 2)) {
                                written = 1;
@@ -1709,12 +1717,22 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
 
        dprintk("%s\n", __func__);
 
-       if (!try_module_get(ca->pub->owner))
+       mutex_lock(&ca->remove_mutex);
+
+       if (ca->exit) {
+               mutex_unlock(&ca->remove_mutex);
+               return -ENODEV;
+       }
+
+       if (!try_module_get(ca->pub->owner)) {
+               mutex_unlock(&ca->remove_mutex);
                return -EIO;
+       }
 
        err = dvb_generic_open(inode, file);
        if (err < 0) {
                module_put(ca->pub->owner);
+               mutex_unlock(&ca->remove_mutex);
                return err;
        }
 
@@ -1739,6 +1757,7 @@ static int dvb_ca_en50221_io_open(struct inode *inode, struct file *file)
 
        dvb_ca_private_get(ca);
 
+       mutex_unlock(&ca->remove_mutex);
        return 0;
 }
 
@@ -1758,6 +1777,8 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
 
        dprintk("%s\n", __func__);
 
+       mutex_lock(&ca->remove_mutex);
+
        /* mark the CA device as closed */
        ca->open = 0;
        dvb_ca_en50221_thread_update_delay(ca);
@@ -1768,6 +1789,13 @@ static int dvb_ca_en50221_io_release(struct inode *inode, struct file *file)
 
        dvb_ca_private_put(ca);
 
+       if (dvbdev->users == 1 && ca->exit == 1) {
+               mutex_unlock(&ca->remove_mutex);
+               wake_up(&dvbdev->wait_queue);
+       } else {
+               mutex_unlock(&ca->remove_mutex);
+       }
+
        return err;
 }
 
@@ -1891,6 +1919,7 @@ int dvb_ca_en50221_init(struct dvb_adapter *dvb_adapter,
        }
 
        mutex_init(&ca->ioctl_mutex);
+       mutex_init(&ca->remove_mutex);
 
        if (signal_pending(current)) {
                ret = -EINTR;
@@ -1933,6 +1962,14 @@ void dvb_ca_en50221_release(struct dvb_ca_en50221 *pubca)
 
        dprintk("%s\n", __func__);
 
+       mutex_lock(&ca->remove_mutex);
+       ca->exit = 1;
+       mutex_unlock(&ca->remove_mutex);
+
+       if (ca->dvbdev->users < 1)
+               wait_event(ca->dvbdev->wait_queue,
+                               ca->dvbdev->users == 1);
+
        /* shutdown the thread if there was one */
        kthread_stop(ca->thread);
 
index 398c862..7c4d86b 100644 (file)
@@ -115,12 +115,12 @@ static inline int dvb_dmx_swfilter_payload(struct dvb_demux_feed *feed,
 
        cc = buf[3] & 0x0f;
        ccok = ((feed->cc + 1) & 0x0f) == cc;
-       feed->cc = cc;
        if (!ccok) {
                set_buf_flags(feed, DMX_BUFFER_FLAG_DISCONTINUITY_DETECTED);
                dprintk_sect_loss("missed packet: %d instead of %d!\n",
                                  cc, (feed->cc + 1) & 0x0f);
        }
+       feed->cc = cc;
 
        if (buf[1] & 0x40)      // PUSI ?
                feed->peslen = 0xfffa;
@@ -300,7 +300,6 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
 
        cc = buf[3] & 0x0f;
        ccok = ((feed->cc + 1) & 0x0f) == cc;
-       feed->cc = cc;
 
        if (buf[3] & 0x20) {
                /* adaption field present, check for discontinuity_indicator */
@@ -336,6 +335,7 @@ static int dvb_dmx_swfilter_section_packet(struct dvb_demux_feed *feed,
                feed->pusi_seen = false;
                dvb_dmx_swfilter_section_new(feed);
        }
+       feed->cc = cc;
 
        if (buf[1] & 0x40) {
                /* PUSI=1 (is set), section boundary is here */
index cc0a789..bc6950a 100644 (file)
@@ -293,14 +293,22 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
        }
 
        if (events->eventw == events->eventr) {
-               int ret;
+               struct wait_queue_entry wait;
+               int ret = 0;
 
                if (flags & O_NONBLOCK)
                        return -EWOULDBLOCK;
 
-               ret = wait_event_interruptible(events->wait_queue,
-                                              dvb_frontend_test_event(fepriv, events));
-
+               init_waitqueue_entry(&wait, current);
+               add_wait_queue(&events->wait_queue, &wait);
+               while (!dvb_frontend_test_event(fepriv, events)) {
+                       wait_woken(&wait, TASK_INTERRUPTIBLE, 0);
+                       if (signal_pending(current)) {
+                               ret = -ERESTARTSYS;
+                               break;
+                       }
+               }
+               remove_wait_queue(&events->wait_queue, &wait);
                if (ret < 0)
                        return ret;
        }
@@ -809,15 +817,26 @@ static void dvb_frontend_stop(struct dvb_frontend *fe)
 
        dev_dbg(fe->dvb->device, "%s:\n", __func__);
 
+       mutex_lock(&fe->remove_mutex);
+
        if (fe->exit != DVB_FE_DEVICE_REMOVED)
                fe->exit = DVB_FE_NORMAL_EXIT;
        mb();
 
-       if (!fepriv->thread)
+       if (!fepriv->thread) {
+               mutex_unlock(&fe->remove_mutex);
                return;
+       }
 
        kthread_stop(fepriv->thread);
 
+       mutex_unlock(&fe->remove_mutex);
+
+       if (fepriv->dvbdev->users < -1) {
+               wait_event(fepriv->dvbdev->wait_queue,
+                          fepriv->dvbdev->users == -1);
+       }
+
        sema_init(&fepriv->sem, 1);
        fepriv->state = FESTATE_IDLE;
 
@@ -2761,9 +2780,13 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
        struct dvb_adapter *adapter = fe->dvb;
        int ret;
 
+       mutex_lock(&fe->remove_mutex);
+
        dev_dbg(fe->dvb->device, "%s:\n", __func__);
-       if (fe->exit == DVB_FE_DEVICE_REMOVED)
-               return -ENODEV;
+       if (fe->exit == DVB_FE_DEVICE_REMOVED) {
+               ret = -ENODEV;
+               goto err_remove_mutex;
+       }
 
        if (adapter->mfe_shared == 2) {
                mutex_lock(&adapter->mfe_lock);
@@ -2771,7 +2794,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
                        if (adapter->mfe_dvbdev &&
                            !adapter->mfe_dvbdev->writers) {
                                mutex_unlock(&adapter->mfe_lock);
-                               return -EBUSY;
+                               ret = -EBUSY;
+                               goto err_remove_mutex;
                        }
                        adapter->mfe_dvbdev = dvbdev;
                }
@@ -2794,8 +2818,10 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
                        while (mferetry-- && (mfedev->users != -1 ||
                                              mfepriv->thread)) {
                                if (msleep_interruptible(500)) {
-                                       if (signal_pending(current))
-                                               return -EINTR;
+                                       if (signal_pending(current)) {
+                                               ret = -EINTR;
+                                               goto err_remove_mutex;
+                                       }
                                }
                        }
 
@@ -2807,7 +2833,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
                                if (mfedev->users != -1 ||
                                    mfepriv->thread) {
                                        mutex_unlock(&adapter->mfe_lock);
-                                       return -EBUSY;
+                                       ret = -EBUSY;
+                                       goto err_remove_mutex;
                                }
                                adapter->mfe_dvbdev = dvbdev;
                        }
@@ -2866,6 +2893,8 @@ static int dvb_frontend_open(struct inode *inode, struct file *file)
 
        if (adapter->mfe_shared)
                mutex_unlock(&adapter->mfe_lock);
+
+       mutex_unlock(&fe->remove_mutex);
        return ret;
 
 err3:
@@ -2887,6 +2916,9 @@ err1:
 err0:
        if (adapter->mfe_shared)
                mutex_unlock(&adapter->mfe_lock);
+
+err_remove_mutex:
+       mutex_unlock(&fe->remove_mutex);
        return ret;
 }
 
@@ -2897,6 +2929,8 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
        struct dvb_frontend_private *fepriv = fe->frontend_priv;
        int ret;
 
+       mutex_lock(&fe->remove_mutex);
+
        dev_dbg(fe->dvb->device, "%s:\n", __func__);
 
        if ((file->f_flags & O_ACCMODE) != O_RDONLY) {
@@ -2918,10 +2952,18 @@ static int dvb_frontend_release(struct inode *inode, struct file *file)
                }
                mutex_unlock(&fe->dvb->mdev_lock);
 #endif
-               if (fe->exit != DVB_FE_NO_EXIT)
-                       wake_up(&dvbdev->wait_queue);
                if (fe->ops.ts_bus_ctrl)
                        fe->ops.ts_bus_ctrl(fe, 0);
+
+               if (fe->exit != DVB_FE_NO_EXIT) {
+                       mutex_unlock(&fe->remove_mutex);
+                       wake_up(&dvbdev->wait_queue);
+               } else {
+                       mutex_unlock(&fe->remove_mutex);
+               }
+
+       } else {
+               mutex_unlock(&fe->remove_mutex);
        }
 
        dvb_frontend_put(fe);
@@ -3022,6 +3064,7 @@ int dvb_register_frontend(struct dvb_adapter *dvb,
        fepriv = fe->frontend_priv;
 
        kref_init(&fe->refcount);
+       mutex_init(&fe->remove_mutex);
 
        /*
         * After initialization, there need to be two references: one
index 8a2febf..8bb8dd3 100644 (file)
@@ -1564,15 +1564,43 @@ static long dvb_net_ioctl(struct file *file,
        return dvb_usercopy(file, cmd, arg, dvb_net_do_ioctl);
 }
 
+static int locked_dvb_net_open(struct inode *inode, struct file *file)
+{
+       struct dvb_device *dvbdev = file->private_data;
+       struct dvb_net *dvbnet = dvbdev->priv;
+       int ret;
+
+       if (mutex_lock_interruptible(&dvbnet->remove_mutex))
+               return -ERESTARTSYS;
+
+       if (dvbnet->exit) {
+               mutex_unlock(&dvbnet->remove_mutex);
+               return -ENODEV;
+       }
+
+       ret = dvb_generic_open(inode, file);
+
+       mutex_unlock(&dvbnet->remove_mutex);
+
+       return ret;
+}
+
 static int dvb_net_close(struct inode *inode, struct file *file)
 {
        struct dvb_device *dvbdev = file->private_data;
        struct dvb_net *dvbnet = dvbdev->priv;
 
+       mutex_lock(&dvbnet->remove_mutex);
+
        dvb_generic_release(inode, file);
 
-       if(dvbdev->users == 1 && dvbnet->exit == 1)
+       if (dvbdev->users == 1 && dvbnet->exit == 1) {
+               mutex_unlock(&dvbnet->remove_mutex);
                wake_up(&dvbdev->wait_queue);
+       } else {
+               mutex_unlock(&dvbnet->remove_mutex);
+       }
+
        return 0;
 }
 
@@ -1580,7 +1608,7 @@ static int dvb_net_close(struct inode *inode, struct file *file)
 static const struct file_operations dvb_net_fops = {
        .owner = THIS_MODULE,
        .unlocked_ioctl = dvb_net_ioctl,
-       .open = dvb_generic_open,
+       .open = locked_dvb_net_open,
        .release = dvb_net_close,
        .llseek = noop_llseek,
 };
@@ -1599,10 +1627,13 @@ void dvb_net_release (struct dvb_net *dvbnet)
 {
        int i;
 
+       mutex_lock(&dvbnet->remove_mutex);
        dvbnet->exit = 1;
+       mutex_unlock(&dvbnet->remove_mutex);
+
        if (dvbnet->dvbdev->users < 1)
                wait_event(dvbnet->dvbdev->wait_queue,
-                               dvbnet->dvbdev->users==1);
+                               dvbnet->dvbdev->users == 1);
 
        dvb_unregister_device(dvbnet->dvbdev);
 
@@ -1621,6 +1652,7 @@ int dvb_net_init (struct dvb_adapter *adap, struct dvb_net *dvbnet,
        int i;
 
        mutex_init(&dvbnet->ioctl_mutex);
+       mutex_init(&dvbnet->remove_mutex);
        dvbnet->demux = dmx;
 
        for (i=0; i<DVB_NET_DEVICES_MAX; i++)
index e9b3ce0..a4b05e3 100644 (file)
@@ -27,6 +27,7 @@
 #include <media/tuner.h>
 
 static DEFINE_MUTEX(dvbdev_mutex);
+static LIST_HEAD(dvbdevfops_list);
 static int dvbdev_debug;
 
 module_param(dvbdev_debug, int, 0644);
@@ -453,14 +454,15 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
                        enum dvb_device_type type, int demux_sink_pads)
 {
        struct dvb_device *dvbdev;
-       struct file_operations *dvbdevfops;
+       struct file_operations *dvbdevfops = NULL;
+       struct dvbdevfops_node *node = NULL, *new_node = NULL;
        struct device *clsdev;
        int minor;
        int id, ret;
 
        mutex_lock(&dvbdev_register_lock);
 
-       if ((id = dvbdev_get_free_id (adap, type)) < 0){
+       if ((id = dvbdev_get_free_id (adap, type)) < 0) {
                mutex_unlock(&dvbdev_register_lock);
                *pdvbdev = NULL;
                pr_err("%s: couldn't find free device id\n", __func__);
@@ -468,18 +470,45 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
        }
 
        *pdvbdev = dvbdev = kzalloc(sizeof(*dvbdev), GFP_KERNEL);
-
        if (!dvbdev){
                mutex_unlock(&dvbdev_register_lock);
                return -ENOMEM;
        }
 
-       dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
+       /*
+        * When a device of the same type is probe()d more than once,
+        * the first allocated fops are used. This prevents memory leaks
+        * that can occur when the same device is probe()d repeatedly.
+        */
+       list_for_each_entry(node, &dvbdevfops_list, list_head) {
+               if (node->fops->owner == adap->module &&
+                               node->type == type &&
+                               node->template == template) {
+                       dvbdevfops = node->fops;
+                       break;
+               }
+       }
 
-       if (!dvbdevfops){
-               kfree (dvbdev);
-               mutex_unlock(&dvbdev_register_lock);
-               return -ENOMEM;
+       if (dvbdevfops == NULL) {
+               dvbdevfops = kmemdup(template->fops, sizeof(*dvbdevfops), GFP_KERNEL);
+               if (!dvbdevfops) {
+                       kfree(dvbdev);
+                       mutex_unlock(&dvbdev_register_lock);
+                       return -ENOMEM;
+               }
+
+               new_node = kzalloc(sizeof(struct dvbdevfops_node), GFP_KERNEL);
+               if (!new_node) {
+                       kfree(dvbdevfops);
+                       kfree(dvbdev);
+                       mutex_unlock(&dvbdev_register_lock);
+                       return -ENOMEM;
+               }
+
+               new_node->fops = dvbdevfops;
+               new_node->type = type;
+               new_node->template = template;
+               list_add_tail (&new_node->list_head, &dvbdevfops_list);
        }
 
        memcpy(dvbdev, template, sizeof(struct dvb_device));
@@ -490,20 +519,20 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
        dvbdev->priv = priv;
        dvbdev->fops = dvbdevfops;
        init_waitqueue_head (&dvbdev->wait_queue);
-
        dvbdevfops->owner = adap->module;
-
        list_add_tail (&dvbdev->list_head, &adap->device_list);
-
        down_write(&minor_rwsem);
 #ifdef CONFIG_DVB_DYNAMIC_MINORS
        for (minor = 0; minor < MAX_DVB_MINORS; minor++)
                if (dvb_minors[minor] == NULL)
                        break;
-
        if (minor == MAX_DVB_MINORS) {
+               if (new_node) {
+                       list_del (&new_node->list_head);
+                       kfree(dvbdevfops);
+                       kfree(new_node);
+               }
                list_del (&dvbdev->list_head);
-               kfree(dvbdevfops);
                kfree(dvbdev);
                up_write(&minor_rwsem);
                mutex_unlock(&dvbdev_register_lock);
@@ -512,41 +541,47 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
 #else
        minor = nums2minor(adap->num, type, id);
 #endif
-
        dvbdev->minor = minor;
        dvb_minors[minor] = dvb_device_get(dvbdev);
        up_write(&minor_rwsem);
-
        ret = dvb_register_media_device(dvbdev, type, minor, demux_sink_pads);
        if (ret) {
                pr_err("%s: dvb_register_media_device failed to create the mediagraph\n",
                      __func__);
-
+               if (new_node) {
+                       list_del (&new_node->list_head);
+                       kfree(dvbdevfops);
+                       kfree(new_node);
+               }
                dvb_media_device_free(dvbdev);
                list_del (&dvbdev->list_head);
-               kfree(dvbdevfops);
                kfree(dvbdev);
                mutex_unlock(&dvbdev_register_lock);
                return ret;
        }
 
-       mutex_unlock(&dvbdev_register_lock);
-
        clsdev = device_create(dvb_class, adap->device,
                               MKDEV(DVB_MAJOR, minor),
                               dvbdev, "dvb%d.%s%d", adap->num, dnames[type], id);
        if (IS_ERR(clsdev)) {
                pr_err("%s: failed to create device dvb%d.%s%d (%ld)\n",
                       __func__, adap->num, dnames[type], id, PTR_ERR(clsdev));
+               if (new_node) {
+                       list_del (&new_node->list_head);
+                       kfree(dvbdevfops);
+                       kfree(new_node);
+               }
                dvb_media_device_free(dvbdev);
                list_del (&dvbdev->list_head);
-               kfree(dvbdevfops);
                kfree(dvbdev);
+               mutex_unlock(&dvbdev_register_lock);
                return PTR_ERR(clsdev);
        }
+
        dprintk("DVB: register adapter%d/%s%d @ minor: %i (0x%02x)\n",
                adap->num, dnames[type], id, minor, minor);
 
+       mutex_unlock(&dvbdev_register_lock);
        return 0;
 }
 EXPORT_SYMBOL(dvb_register_device);
@@ -575,7 +610,6 @@ static void dvb_free_device(struct kref *ref)
 {
        struct dvb_device *dvbdev = container_of(ref, struct dvb_device, ref);
 
-       kfree (dvbdev->fops);
        kfree (dvbdev);
 }
 
@@ -1081,9 +1115,17 @@ error:
 
 static void __exit exit_dvbdev(void)
 {
+       struct dvbdevfops_node *node, *next;
+
        class_destroy(dvb_class);
        cdev_del(&dvb_device_cdev);
        unregister_chrdev_region(MKDEV(DVB_MAJOR, 0), MAX_DVB_MINORS);
+
+       list_for_each_entry_safe(node, next, &dvbdevfops_list, list_head) {
+               list_del (&node->list_head);
+               kfree(node->fops);
+               kfree(node);
+       }
 }
 
 subsys_initcall(init_dvbdev);
index 1f1753f..0782f83 100644 (file)
@@ -798,7 +798,7 @@ MODULE_DEVICE_TABLE(i2c, mn88443x_i2c_id);
 static struct i2c_driver mn88443x_driver = {
        .driver = {
                .name = "mn88443x",
-               .of_match_table = of_match_ptr(mn88443x_of_match),
+               .of_match_table = mn88443x_of_match,
        },
        .probe_new = mn88443x_probe,
        .remove   = mn88443x_remove,
index 8287851..d85bfbb 100644 (file)
@@ -697,7 +697,7 @@ static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
        netup_unidvb_dma_enable(dma, 0);
        msleep(50);
        cancel_work_sync(&dma->work);
-       del_timer(&dma->timeout);
+       del_timer_sync(&dma->timeout);
 }
 
 static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
@@ -887,12 +887,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
                ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
                ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
                pci_dev->irq);
-       if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
-                       "netup_unidvb", pci_dev) < 0) {
-               dev_err(&pci_dev->dev,
-                       "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
-               goto irq_request_err;
-       }
+
        ndev->dma_size = 2 * 188 *
                NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
        ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
@@ -933,6 +928,14 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
                dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
                goto dma_setup_err;
        }
+
+       if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
+                       "netup_unidvb", pci_dev) < 0) {
+               dev_err(&pci_dev->dev,
+                       "%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
+               goto dma_setup_err;
+       }
+
        dev_info(&pci_dev->dev,
                "netup_unidvb: device has been initialized\n");
        return 0;
@@ -951,8 +954,6 @@ spi_setup_err:
        dma_free_coherent(&pci_dev->dev, ndev->dma_size,
                        ndev->dma_virt, ndev->dma_phys);
 dma_alloc_err:
-       free_irq(pci_dev->irq, pci_dev);
-irq_request_err:
        iounmap(ndev->lmmio1);
 pci_bar1_error:
        iounmap(ndev->lmmio0);
index 2999155..0fbd030 100644 (file)
@@ -584,6 +584,9 @@ static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
 
        if (!(ctx->dev->dec_capability & VCODEC_CAPABILITY_4K_DISABLED)) {
                for (i = 0; i < num_supported_formats; i++) {
+                       if (mtk_video_formats[i].type != MTK_FMT_DEC)
+                               continue;
+
                        mtk_video_formats[i].frmsize.max_width =
                                VCODEC_DEC_4K_CODED_WIDTH;
                        mtk_video_formats[i].frmsize.max_height =
index 898f321..8640db3 100644 (file)
@@ -353,7 +353,6 @@ static int video_get_subdev_format(struct camss_video *video,
        if (subdev == NULL)
                return -EPIPE;
 
-       memset(&fmt, 0, sizeof(fmt));
        fmt.pad = pad;
 
        ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
index 8355185..61cfaaf 100644 (file)
@@ -397,10 +397,12 @@ hantro_reset_raw_fmt(struct hantro_ctx *ctx, int bit_depth)
        if (!raw_vpu_fmt)
                return -EINVAL;
 
-       if (ctx->is_encoder)
+       if (ctx->is_encoder) {
                encoded_fmt = &ctx->dst_fmt;
-       else
+               ctx->vpu_src_fmt = raw_vpu_fmt;
+       } else {
                encoded_fmt = &ctx->src_fmt;
+       }
 
        hantro_reset_fmt(&raw_fmt, raw_vpu_fmt);
        raw_fmt.width = encoded_fmt->width;
index 44540de..d3b5cb4 100644 (file)
@@ -101,6 +101,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap,
                if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
                        if (msg[i].addr ==
                                ce6230_zl10353_config.demod_address) {
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                req.cmd = DEMOD_READ;
                                req.value = msg[i].addr >> 1;
                                req.index = msg[i].buf[0];
@@ -117,6 +121,10 @@ static int ce6230_i2c_master_xfer(struct i2c_adapter *adap,
                } else {
                        if (msg[i].addr ==
                                ce6230_zl10353_config.demod_address) {
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                req.cmd = DEMOD_WRITE;
                                req.value = msg[i].addr >> 1;
                                req.index = msg[i].buf[0];
index 7ed0ab9..0e4773f 100644 (file)
@@ -115,6 +115,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
        while (i < num) {
                if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
                        if (msg[i].addr == ec168_ec100_config.demod_address) {
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                req.cmd = READ_DEMOD;
                                req.value = 0;
                                req.index = 0xff00 + msg[i].buf[0]; /* reg */
@@ -131,6 +135,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                        }
                } else {
                        if (msg[i].addr == ec168_ec100_config.demod_address) {
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                req.cmd = WRITE_DEMOD;
                                req.value = msg[i].buf[1]; /* val */
                                req.index = 0xff00 + msg[i].buf[0]; /* reg */
@@ -139,6 +147,10 @@ static int ec168_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                                ret = ec168_ctrl_msg(d, &req);
                                i += 1;
                        } else {
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                req.cmd = WRITE_I2C;
                                req.value = msg[i].buf[0]; /* val */
                                req.index = 0x0100 + msg[i].addr; /* I2C addr */
index 795a012..f7884bb 100644 (file)
@@ -176,6 +176,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                        ret = -EOPNOTSUPP;
                        goto err_mutex_unlock;
                } else if (msg[0].addr == 0x10) {
+                       if (msg[0].len < 1 || msg[1].len < 1) {
+                               ret = -EOPNOTSUPP;
+                               goto err_mutex_unlock;
+                       }
                        /* method 1 - integrated demod */
                        if (msg[0].buf[0] == 0x00) {
                                /* return demod page from driver cache */
@@ -189,6 +193,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                                ret = rtl28xxu_ctrl_msg(d, &req);
                        }
                } else if (msg[0].len < 2) {
+                       if (msg[0].len < 1) {
+                               ret = -EOPNOTSUPP;
+                               goto err_mutex_unlock;
+                       }
                        /* method 2 - old I2C */
                        req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
                        req.index = CMD_I2C_RD;
@@ -217,8 +225,16 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                        ret = -EOPNOTSUPP;
                        goto err_mutex_unlock;
                } else if (msg[0].addr == 0x10) {
+                       if (msg[0].len < 1) {
+                               ret = -EOPNOTSUPP;
+                               goto err_mutex_unlock;
+                       }
                        /* method 1 - integrated demod */
                        if (msg[0].buf[0] == 0x00) {
+                               if (msg[0].len < 2) {
+                                       ret = -EOPNOTSUPP;
+                                       goto err_mutex_unlock;
+                               }
                                /* save demod page for later demod access */
                                dev->page = msg[0].buf[1];
                                ret = 0;
@@ -231,6 +247,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                                ret = rtl28xxu_ctrl_msg(d, &req);
                        }
                } else if ((msg[0].len < 23) && (!dev->new_i2c_write)) {
+                       if (msg[0].len < 1) {
+                               ret = -EOPNOTSUPP;
+                               goto err_mutex_unlock;
+                       }
                        /* method 2 - old I2C */
                        req.value = (msg[0].buf[0] << 8) | (msg[0].addr << 1);
                        req.index = CMD_I2C_WR;
index 7d78ee0..a31c6f8 100644 (file)
@@ -988,6 +988,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
                        /* write/read request */
                        if (i + 1 < num && (msg[i + 1].flags & I2C_M_RD)) {
                                req = 0xB9;
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
                                value = msg[i].addr + (msg[i].len << 8);
                                length = msg[i + 1].len + 6;
@@ -1001,6 +1005,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
 
                                /* demod 16bit addr */
                                req = 0xBD;
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                index = (((msg[i].buf[0] << 8) & 0xff00) | (msg[i].buf[1] & 0x00ff));
                                value = msg[i].addr + (2 << 8);
                                length = msg[i].len - 2;
@@ -1026,6 +1034,10 @@ static int az6027_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int n
                        } else {
 
                                req = 0xBD;
+                               if (msg[i].len < 1) {
+                                       i = -EOPNOTSUPP;
+                                       break;
+                               }
                                index = msg[i].buf[0] & 0x00FF;
                                value = msg[i].addr + (1 << 8);
                                length = msg[i].len - 1;
index 2756815..32134be 100644 (file)
@@ -63,6 +63,10 @@ static int digitv_i2c_xfer(struct i2c_adapter *adap,struct i2c_msg msg[],int num
                warn("more than 2 i2c messages at a time is not handled yet. TODO.");
 
        for (i = 0; i < num; i++) {
+               if (msg[i].len < 1) {
+                       i = -EOPNOTSUPP;
+                       break;
+               }
                /* write/read request */
                if (i+1 < num && (msg[i+1].flags & I2C_M_RD)) {
                        if (digitv_ctrl_msg(d, USB_READ_COFDM, msg[i].buf[0], NULL, 0,
index 0ca7642..8747960 100644 (file)
@@ -946,7 +946,7 @@ static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
        for (i = 0; i < 6; i++) {
                obuf[1] = 0xf0 + i;
                if (i2c_transfer(&d->i2c_adap, msg, 2) != 2)
-                       break;
+                       return -1;
                else
                        mac[i] = ibuf[0];
        }
index 9501b10..0df1027 100644 (file)
@@ -37,6 +37,7 @@ config VIDEO_PVRUSB2_DVB
        bool "pvrusb2 ATSC/DVB support"
        default y
        depends on VIDEO_PVRUSB2 && DVB_CORE
+       depends on VIDEO_PVRUSB2=m || DVB_CORE=y
        select DVB_LGDT330X if MEDIA_SUBDRV_AUTOSELECT
        select DVB_S5H1409 if MEDIA_SUBDRV_AUTOSELECT
        select DVB_S5H1411 if MEDIA_SUBDRV_AUTOSELECT
index 38822ce..c4474d4 100644 (file)
@@ -1544,8 +1544,7 @@ static void ttusb_dec_exit_dvb(struct ttusb_dec *dec)
        dvb_dmx_release(&dec->demux);
        if (dec->fe) {
                dvb_unregister_frontend(dec->fe);
-               if (dec->fe->ops.release)
-                       dec->fe->ops.release(dec->fe);
+               dvb_frontend_detach(dec->fe);
        }
        dvb_unregister_adapter(&dec->adapter);
 }
index 7aefa76..d631ce4 100644 (file)
@@ -251,14 +251,17 @@ static int uvc_parse_format(struct uvc_device *dev,
                /* Find the format descriptor from its GUID. */
                fmtdesc = uvc_format_by_guid(&buffer[5]);
 
-               if (fmtdesc != NULL) {
-                       format->fcc = fmtdesc->fcc;
-               } else {
+               if (!fmtdesc) {
+                       /*
+                        * Unknown video formats are not fatal errors, the
+                        * caller will skip this descriptor.
+                        */
                        dev_info(&streaming->intf->dev,
                                 "Unknown video format %pUl\n", &buffer[5]);
-                       format->fcc = 0;
+                       return 0;
                }
 
+               format->fcc = fmtdesc->fcc;
                format->bpp = buffer[21];
 
                /*
@@ -675,7 +678,7 @@ static int uvc_parse_streaming(struct uvc_device *dev,
        interval = (u32 *)&frame[nframes];
 
        streaming->format = format;
-       streaming->nformats = nformats;
+       streaming->nformats = 0;
 
        /* Parse the format descriptors. */
        while (buflen > 2 && buffer[1] == USB_DT_CS_INTERFACE) {
@@ -689,7 +692,10 @@ static int uvc_parse_streaming(struct uvc_device *dev,
                                &interval, buffer, buflen);
                        if (ret < 0)
                                goto error;
+                       if (!ret)
+                               break;
 
+                       streaming->nformats++;
                        frame += format->nframes;
                        format++;
 
index bf0c181..22fe08f 100644 (file)
@@ -314,8 +314,7 @@ int v4l2_create_fwnode_links_to_pad(struct v4l2_subdev *src_sd,
 {
        struct fwnode_handle *endpoint;
 
-       if (!(sink->flags & MEDIA_PAD_FL_SINK) ||
-           !is_media_entity_v4l2_subdev(sink->entity))
+       if (!(sink->flags & MEDIA_PAD_FL_SINK))
                return -EINVAL;
 
        fwnode_graph_for_each_endpoint(dev_fwnode(src_sd->dev), endpoint) {
index f484669..30d4d04 100644 (file)
@@ -316,12 +316,14 @@ static void fastrpc_free_map(struct kref *ref)
        if (map->table) {
                if (map->attr & FASTRPC_ATTR_SECUREMAP) {
                        struct qcom_scm_vmperm perm;
+                       int vmid = map->fl->cctx->vmperms[0].vmid;
+                       u64 src_perms = BIT(QCOM_SCM_VMID_HLOS) | BIT(vmid);
                        int err = 0;
 
                        perm.vmid = QCOM_SCM_VMID_HLOS;
                        perm.perm = QCOM_SCM_PERM_RWX;
                        err = qcom_scm_assign_mem(map->phys, map->size,
-                               &map->fl->cctx->perms, &perm, 1);
+                               &src_perms, &perm, 1);
                        if (err) {
                                dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
                                                map->phys, map->size, err);
@@ -787,8 +789,12 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
                goto map_err;
        }
 
-       map->phys = sg_dma_address(map->table->sgl);
-       map->phys += ((u64)fl->sctx->sid << 32);
+       if (attr & FASTRPC_ATTR_SECUREMAP) {
+               map->phys = sg_phys(map->table->sgl);
+       } else {
+               map->phys = sg_dma_address(map->table->sgl);
+               map->phys += ((u64)fl->sctx->sid << 32);
+       }
        map->size = len;
        map->va = sg_virt(map->table->sgl);
        map->len = len;
@@ -798,9 +804,15 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
                 * If subsystem VMIDs are defined in DTSI, then do
                 * hyp_assign from HLOS to those VM(s)
                 */
+               u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
+               struct qcom_scm_vmperm dst_perms[2] = {0};
+
+               dst_perms[0].vmid = QCOM_SCM_VMID_HLOS;
+               dst_perms[0].perm = QCOM_SCM_PERM_RW;
+               dst_perms[1].vmid = fl->cctx->vmperms[0].vmid;
+               dst_perms[1].perm = QCOM_SCM_PERM_RWX;
                map->attr = attr;
-               err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms,
-                               fl->cctx->vmperms, fl->cctx->vmcount);
+               err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2);
                if (err) {
                        dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
                                        map->phys, map->size, err);
@@ -1892,7 +1904,7 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
        req.vaddrout = rsp_msg.vaddr;
 
        /* Add memory to static PD pool, protection thru hypervisor */
-       if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
+       if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
                struct qcom_scm_vmperm perm;
 
                perm.vmid = QCOM_SCM_VMID_HLOS;
@@ -2337,8 +2349,10 @@ static void fastrpc_notify_users(struct fastrpc_user *user)
        struct fastrpc_invoke_ctx *ctx;
 
        spin_lock(&user->lock);
-       list_for_each_entry(ctx, &user->pending, node)
+       list_for_each_entry(ctx, &user->pending, node) {
+               ctx->retval = -EPIPE;
                complete(&ctx->work);
+       }
        spin_unlock(&user->lock);
 }
 
@@ -2349,7 +2363,9 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
        struct fastrpc_user *user;
        unsigned long flags;
 
+       /* No invocations past this point */
        spin_lock_irqsave(&cctx->lock, flags);
+       cctx->rpdev = NULL;
        list_for_each_entry(user, &cctx->users, user)
                fastrpc_notify_users(user);
        spin_unlock_irqrestore(&cctx->lock, flags);
@@ -2368,7 +2384,6 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
 
        of_platform_depopulate(&rpdev->dev);
 
-       cctx->rpdev = NULL;
        fastrpc_channel_ctx_put(cctx);
 }
 
index 00c33ed..d920c41 100644 (file)
@@ -264,6 +264,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
                goto out_put;
        }
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
+       req_to_mmc_queue_req(req)->drv_op_result = -EIO;
        blk_execute_rq(req, false);
        ret = req_to_mmc_queue_req(req)->drv_op_result;
        blk_mq_free_request(req);
@@ -651,6 +652,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
        idatas[0] = idata;
        req_to_mmc_queue_req(req)->drv_op =
                rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+       req_to_mmc_queue_req(req)->drv_op_result = -EIO;
        req_to_mmc_queue_req(req)->drv_op_data = idatas;
        req_to_mmc_queue_req(req)->ioc_count = 1;
        blk_execute_rq(req, false);
@@ -722,6 +724,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
        }
        req_to_mmc_queue_req(req)->drv_op =
                rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
+       req_to_mmc_queue_req(req)->drv_op_result = -EIO;
        req_to_mmc_queue_req(req)->drv_op_data = idata;
        req_to_mmc_queue_req(req)->ioc_count = n;
        blk_execute_rq(req, false);
@@ -2806,6 +2809,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
        if (IS_ERR(req))
                return PTR_ERR(req);
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
+       req_to_mmc_queue_req(req)->drv_op_result = -EIO;
        blk_execute_rq(req, false);
        ret = req_to_mmc_queue_req(req)->drv_op_result;
        if (ret >= 0) {
@@ -2844,6 +2848,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
                goto out_free;
        }
        req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
+       req_to_mmc_queue_req(req)->drv_op_result = -EIO;
        req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
        blk_execute_rq(req, false);
        err = req_to_mmc_queue_req(req)->drv_op_result;
index 2e120ad..0c5f5e3 100644 (file)
@@ -28,7 +28,6 @@ struct mmc_pwrseq_sd8787 {
        struct mmc_pwrseq pwrseq;
        struct gpio_desc *reset_gpio;
        struct gpio_desc *pwrdn_gpio;
-       u32 reset_pwrdwn_delay_ms;
 };
 
 #define to_pwrseq_sd8787(p) container_of(p, struct mmc_pwrseq_sd8787, pwrseq)
@@ -39,7 +38,7 @@ static void mmc_pwrseq_sd8787_pre_power_on(struct mmc_host *host)
 
        gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
 
-       msleep(pwrseq->reset_pwrdwn_delay_ms);
+       msleep(300);
        gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
 }
 
@@ -51,17 +50,37 @@ static void mmc_pwrseq_sd8787_power_off(struct mmc_host *host)
        gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
 }
 
+static void mmc_pwrseq_wilc1000_pre_power_on(struct mmc_host *host)
+{
+       struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
+
+       /* The pwrdn_gpio is really CHIP_EN, reset_gpio is RESETN */
+       gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 1);
+       msleep(5);
+       gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
+}
+
+static void mmc_pwrseq_wilc1000_power_off(struct mmc_host *host)
+{
+       struct mmc_pwrseq_sd8787 *pwrseq = to_pwrseq_sd8787(host->pwrseq);
+
+       gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
+       gpiod_set_value_cansleep(pwrseq->pwrdn_gpio, 0);
+}
+
 static const struct mmc_pwrseq_ops mmc_pwrseq_sd8787_ops = {
        .pre_power_on = mmc_pwrseq_sd8787_pre_power_on,
        .power_off = mmc_pwrseq_sd8787_power_off,
 };
 
-static const u32 sd8787_delay_ms = 300;
-static const u32 wilc1000_delay_ms = 5;
+static const struct mmc_pwrseq_ops mmc_pwrseq_wilc1000_ops = {
+       .pre_power_on = mmc_pwrseq_wilc1000_pre_power_on,
+       .power_off = mmc_pwrseq_wilc1000_power_off,
+};
 
 static const struct of_device_id mmc_pwrseq_sd8787_of_match[] = {
-       { .compatible = "mmc-pwrseq-sd8787", .data = &sd8787_delay_ms },
-       { .compatible = "mmc-pwrseq-wilc1000", .data = &wilc1000_delay_ms },
+       { .compatible = "mmc-pwrseq-sd8787", .data = &mmc_pwrseq_sd8787_ops },
+       { .compatible = "mmc-pwrseq-wilc1000", .data = &mmc_pwrseq_wilc1000_ops },
        {/* sentinel */},
 };
 MODULE_DEVICE_TABLE(of, mmc_pwrseq_sd8787_of_match);
@@ -77,7 +96,6 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        match = of_match_node(mmc_pwrseq_sd8787_of_match, pdev->dev.of_node);
-       pwrseq->reset_pwrdwn_delay_ms = *(u32 *)match->data;
 
        pwrseq->pwrdn_gpio = devm_gpiod_get(dev, "powerdown", GPIOD_OUT_LOW);
        if (IS_ERR(pwrseq->pwrdn_gpio))
@@ -88,7 +106,7 @@ static int mmc_pwrseq_sd8787_probe(struct platform_device *pdev)
                return PTR_ERR(pwrseq->reset_gpio);
 
        pwrseq->pwrseq.dev = dev;
-       pwrseq->pwrseq.ops = &mmc_pwrseq_sd8787_ops;
+       pwrseq->pwrseq.ops = match->data;
        pwrseq->pwrseq.owner = THIS_MODULE;
        platform_set_drvdata(pdev, pwrseq);
 
index b24aa27..d2f6250 100644 (file)
@@ -540,9 +540,11 @@ static int sdhci_cdns_probe(struct platform_device *pdev)
 
        if (host->mmc->caps & MMC_CAP_HW_RESET) {
                priv->rst_hw = devm_reset_control_get_optional_exclusive(dev, NULL);
-               if (IS_ERR(priv->rst_hw))
-                       return dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw),
-                                            "reset controller error\n");
+               if (IS_ERR(priv->rst_hw)) {
+                       ret = dev_err_probe(mmc_dev(host->mmc), PTR_ERR(priv->rst_hw),
+                                           "reset controller error\n");
+                       goto free;
+               }
                if (priv->rst_hw)
                        host->mmc_host_ops.card_hw_reset = sdhci_cdns_mmc_hw_reset;
        }
index d7c0c0b..eebf946 100644 (file)
@@ -1634,6 +1634,10 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
        if (ret)
                return ret;
 
+       /* HS400/HS400ES require 8 bit bus */
+       if (!(host->mmc->caps & MMC_CAP_8_BIT_DATA))
+               host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES);
+
        if (mmc_gpio_get_cd(host->mmc) >= 0)
                host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
 
@@ -1724,10 +1728,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
                host->mmc_host_ops.init_card = usdhc_init_card;
        }
 
-       err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
-       if (err)
-               goto disable_ahb_clk;
-
        if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING)
                sdhci_esdhc_ops.platform_execute_tuning =
                                        esdhc_executing_tuning;
@@ -1735,15 +1735,13 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
        if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
                host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
 
-       if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
-           imx_data->socdata->flags & ESDHC_FLAG_HS400)
+       if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
                host->mmc->caps2 |= MMC_CAP2_HS400;
 
        if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23)
                host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN;
 
-       if (host->mmc->caps & MMC_CAP_8_BIT_DATA &&
-           imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
+       if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) {
                host->mmc->caps2 |= MMC_CAP2_HS400_ES;
                host->mmc_host_ops.hs400_enhanced_strobe =
                                        esdhc_hs400_enhanced_strobe;
@@ -1765,6 +1763,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
                        goto disable_ahb_clk;
        }
 
+       err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
+       if (err)
+               goto disable_ahb_clk;
+
        sdhci_esdhc_imx_hwinit(host);
 
        err = sdhci_add_host(host);
index e4c4bfa..9ec593d 100644 (file)
@@ -1713,6 +1713,9 @@ static void construct_request_response(struct vub300_mmc_host *vub300,
        int bytes = 3 & less_cmd;
        int words = less_cmd >> 2;
        u8 *r = vub300->resp.response.command_response;
+
+       if (!resp_len)
+               return;
        if (bytes == 3) {
                cmd->resp[words] = (r[1 + (words << 2)] << 24)
                        | (r[2 + (words << 2)] << 16)
index 01f1c67..8dc4f5c 100644 (file)
@@ -590,8 +590,8 @@ static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
                            (end_page - start_page + 1) * oob_per_page);
 }
 
-static int mtdchar_write_ioctl(struct mtd_info *mtd,
-               struct mtd_write_req __user *argp)
+static noinline_for_stack int
+mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
 {
        struct mtd_info *master = mtd_get_master(mtd);
        struct mtd_write_req req;
@@ -688,8 +688,8 @@ static int mtdchar_write_ioctl(struct mtd_info *mtd,
        return ret;
 }
 
-static int mtdchar_read_ioctl(struct mtd_info *mtd,
-               struct mtd_read_req __user *argp)
+static noinline_for_stack int
+mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
 {
        struct mtd_info *master = mtd_get_master(mtd);
        struct mtd_read_req req;
index 2cda439..017868f 100644 (file)
@@ -36,25 +36,25 @@ int ingenic_ecc_correct(struct ingenic_ecc *ecc,
 void ingenic_ecc_release(struct ingenic_ecc *ecc);
 struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np);
 #else /* CONFIG_MTD_NAND_INGENIC_ECC */
-int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
+static inline int ingenic_ecc_calculate(struct ingenic_ecc *ecc,
                          struct ingenic_ecc_params *params,
                          const u8 *buf, u8 *ecc_code)
 {
        return -ENODEV;
 }
 
-int ingenic_ecc_correct(struct ingenic_ecc *ecc,
+static inline int ingenic_ecc_correct(struct ingenic_ecc *ecc,
                        struct ingenic_ecc_params *params, u8 *buf,
                        u8 *ecc_code)
 {
        return -ENODEV;
 }
 
-void ingenic_ecc_release(struct ingenic_ecc *ecc)
+static inline void ingenic_ecc_release(struct ingenic_ecc *ecc)
 {
 }
 
-struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np)
+static inline struct ingenic_ecc *of_ingenic_ecc_get(struct device_node *np)
 {
        return ERR_PTR(-ENODEV);
 }
index afb4245..30c15e4 100644 (file)
@@ -2457,6 +2457,12 @@ static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr,
                        NDTR1_WAIT_MODE;
        }
 
+       /*
+        * Reset nfc->selected_chip so the next command will cause the timing
+        * registers to be updated in marvell_nfc_select_target().
+        */
+       nfc->selected_chip = NULL;
+
        return 0;
 }
 
@@ -2894,10 +2900,6 @@ static int marvell_nfc_init(struct marvell_nfc *nfc)
                regmap_update_bits(sysctrl_base, GENCONF_CLK_GATING_CTRL,
                                   GENCONF_CLK_GATING_CTRL_ND_GATE,
                                   GENCONF_CLK_GATING_CTRL_ND_GATE);
-
-               regmap_update_bits(sysctrl_base, GENCONF_ND_CLK_CTRL,
-                                  GENCONF_ND_CLK_CTRL_EN,
-                                  GENCONF_ND_CLK_CTRL_EN);
        }
 
        /* Configure the DMA if appropriate */
index 0bb0ad1..5f29fac 100644 (file)
@@ -2018,6 +2018,7 @@ static const struct spi_nor_manufacturer *manufacturers[] = {
 
 static const struct flash_info spi_nor_generic_flash = {
        .name = "spi-nor-generic",
+       .n_banks = 1,
        /*
         * JESD216 rev A doesn't specify the page size, therefore we need a
         * sane default.
@@ -2921,7 +2922,8 @@ static void spi_nor_late_init_params(struct spi_nor *nor)
        if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
                spi_nor_init_default_locking_ops(nor);
 
-       nor->params->bank_size = div64_u64(nor->params->size, nor->info->n_banks);
+       if (nor->info->n_banks > 1)
+               params->bank_size = div64_u64(params->size, nor->info->n_banks);
 }
 
 /**
@@ -2987,6 +2989,7 @@ static void spi_nor_init_default_params(struct spi_nor *nor)
        /* Set SPI NOR sizes. */
        params->writesize = 1;
        params->size = (u64)info->sector_size * info->n_sectors;
+       params->bank_size = params->size;
        params->page_size = info->page_size;
 
        if (!(info->flags & SPI_NOR_NO_FR)) {
index 15f9a80..36876aa 100644 (file)
@@ -361,7 +361,7 @@ static int cypress_nor_determine_addr_mode_by_sr1(struct spi_nor *nor,
  */
 static int cypress_nor_set_addr_mode_nbytes(struct spi_nor *nor)
 {
-       struct spi_mem_op op;
+       struct spi_mem_op op = {};
        u8 addr_mode;
        int ret;
 
@@ -492,7 +492,7 @@ s25fs256t_post_bfpt_fixup(struct spi_nor *nor,
                          const struct sfdp_parameter_header *bfpt_header,
                          const struct sfdp_bfpt *bfpt)
 {
-       struct spi_mem_op op;
+       struct spi_mem_op op = {};
        int ret;
 
        ret = cypress_nor_set_addr_mode_nbytes(nor);
index 3fed888..edbaa14 100644 (file)
@@ -3947,7 +3947,11 @@ static int bond_slave_netdev_event(unsigned long event,
                unblock_netpoll_tx();
                break;
        case NETDEV_FEAT_CHANGE:
-               bond_compute_features(bond);
+               if (!bond->notifier_ctx) {
+                       bond->notifier_ctx = true;
+                       bond_compute_features(bond);
+                       bond->notifier_ctx = false;
+               }
                break;
        case NETDEV_RESEND_IGMP:
                /* Propagate to master device */
@@ -6342,6 +6346,8 @@ static int bond_init(struct net_device *bond_dev)
        if (!bond->wq)
                return -ENOMEM;
 
+       bond->notifier_ctx = false;
+
        spin_lock_init(&bond->stats_lock);
        netdev_lockdep_set_classes(bond_dev);
 
index 3ceccaf..b190007 100644 (file)
@@ -95,7 +95,7 @@ config CAN_AT91
 
 config CAN_BXCAN
        tristate "STM32 Basic Extended CAN (bxCAN) devices"
-       depends on OF || ARCH_STM32 || COMPILE_TEST
+       depends on ARCH_STM32 || COMPILE_TEST
        depends on HAS_IOMEM
        select CAN_RX_OFFLOAD
        help
index e26ccd4..027a8a1 100644 (file)
 #define BXCAN_FiR1_REG(b) (0x40 + (b) * 8)
 #define BXCAN_FiR2_REG(b) (0x44 + (b) * 8)
 
-#define BXCAN_FILTER_ID(primary) (primary ? 0 : 14)
+#define BXCAN_FILTER_ID(cfg) ((cfg) == BXCAN_CFG_DUAL_SECONDARY ? 14 : 0)
 
 /* Filter primary register (FMR) bits */
 #define BXCAN_FMR_CANSB_MASK GENMASK(13, 8)
@@ -135,6 +135,12 @@ enum bxcan_lec_code {
        BXCAN_LEC_UNUSED
 };
 
+enum bxcan_cfg {
+       BXCAN_CFG_SINGLE = 0,
+       BXCAN_CFG_DUAL_PRIMARY,
+       BXCAN_CFG_DUAL_SECONDARY
+};
+
 /* Structure of the message buffer */
 struct bxcan_mb {
        u32 id;                 /* can identifier */
@@ -167,7 +173,7 @@ struct bxcan_priv {
        struct regmap *gcan;
        int tx_irq;
        int sce_irq;
-       bool primary;
+       enum bxcan_cfg cfg;
        struct clk *clk;
        spinlock_t rmw_lock;    /* lock for read-modify-write operations */
        unsigned int tx_head;
@@ -202,17 +208,17 @@ static inline void bxcan_rmw(struct bxcan_priv *priv, void __iomem *addr,
        spin_unlock_irqrestore(&priv->rmw_lock, flags);
 }
 
-static void bxcan_disable_filters(struct bxcan_priv *priv, bool primary)
+static void bxcan_disable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
 {
-       unsigned int fid = BXCAN_FILTER_ID(primary);
+       unsigned int fid = BXCAN_FILTER_ID(cfg);
        u32 fmask = BIT(fid);
 
        regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0);
 }
 
-static void bxcan_enable_filters(struct bxcan_priv *priv, bool primary)
+static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg)
 {
-       unsigned int fid = BXCAN_FILTER_ID(primary);
+       unsigned int fid = BXCAN_FILTER_ID(cfg);
        u32 fmask = BIT(fid);
 
        /* Filter settings:
@@ -680,7 +686,7 @@ static int bxcan_chip_start(struct net_device *ndev)
                  BXCAN_BTR_BRP_MASK | BXCAN_BTR_TS1_MASK | BXCAN_BTR_TS2_MASK |
                  BXCAN_BTR_SJW_MASK, set);
 
-       bxcan_enable_filters(priv, priv->primary);
+       bxcan_enable_filters(priv, priv->cfg);
 
        /* Clear all internal status */
        priv->tx_head = 0;
@@ -806,7 +812,7 @@ static void bxcan_chip_stop(struct net_device *ndev)
                  BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FOVIE1 |
                  BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 |
                  BXCAN_IER_FFIE0 | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE, 0);
-       bxcan_disable_filters(priv, priv->primary);
+       bxcan_disable_filters(priv, priv->cfg);
        bxcan_enter_sleep_mode(priv);
        priv->can.state = CAN_STATE_STOPPED;
 }
@@ -931,7 +937,7 @@ static int bxcan_probe(struct platform_device *pdev)
        struct clk *clk = NULL;
        void __iomem *regs;
        struct regmap *gcan;
-       bool primary;
+       enum bxcan_cfg cfg;
        int err, rx_irq, tx_irq, sce_irq;
 
        regs = devm_platform_ioremap_resource(pdev, 0);
@@ -946,7 +952,13 @@ static int bxcan_probe(struct platform_device *pdev)
                return PTR_ERR(gcan);
        }
 
-       primary = of_property_read_bool(np, "st,can-primary");
+       if (of_property_read_bool(np, "st,can-primary"))
+               cfg = BXCAN_CFG_DUAL_PRIMARY;
+       else if (of_property_read_bool(np, "st,can-secondary"))
+               cfg = BXCAN_CFG_DUAL_SECONDARY;
+       else
+               cfg = BXCAN_CFG_SINGLE;
+
        clk = devm_clk_get(dev, NULL);
        if (IS_ERR(clk)) {
                dev_err(dev, "failed to get clock\n");
@@ -992,7 +1004,7 @@ static int bxcan_probe(struct platform_device *pdev)
        priv->clk = clk;
        priv->tx_irq = tx_irq;
        priv->sce_irq = sce_irq;
-       priv->primary = primary;
+       priv->cfg = cfg;
        priv->can.clock.freq = clk_get_rate(clk);
        spin_lock_init(&priv->rmw_lock);
        priv->tx_head = 0;
index 241ec63..f6d05b3 100644 (file)
@@ -54,7 +54,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
        /* check flag whether this packet has to be looped back */
        if (!(dev->flags & IFF_ECHO) ||
            (skb->protocol != htons(ETH_P_CAN) &&
-            skb->protocol != htons(ETH_P_CANFD))) {
+            skb->protocol != htons(ETH_P_CANFD) &&
+            skb->protocol != htons(ETH_P_CANXL))) {
                kfree_skb(skb);
                return 0;
        }
index 53e8a91..be189ed 100644 (file)
@@ -71,10 +71,12 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
 #define KVASER_PCIEFD_SYSID_BUILD_REG (KVASER_PCIEFD_SYSID_BASE + 0x14)
 /* Shared receive buffer registers */
 #define KVASER_PCIEFD_SRB_BASE 0x1f200
+#define KVASER_PCIEFD_SRB_FIFO_LAST_REG (KVASER_PCIEFD_SRB_BASE + 0x1f4)
 #define KVASER_PCIEFD_SRB_CMD_REG (KVASER_PCIEFD_SRB_BASE + 0x200)
 #define KVASER_PCIEFD_SRB_IEN_REG (KVASER_PCIEFD_SRB_BASE + 0x204)
 #define KVASER_PCIEFD_SRB_IRQ_REG (KVASER_PCIEFD_SRB_BASE + 0x20c)
 #define KVASER_PCIEFD_SRB_STAT_REG (KVASER_PCIEFD_SRB_BASE + 0x210)
+#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG (KVASER_PCIEFD_SRB_BASE + 0x214)
 #define KVASER_PCIEFD_SRB_CTRL_REG (KVASER_PCIEFD_SRB_BASE + 0x218)
 /* EPCS flash controller registers */
 #define KVASER_PCIEFD_SPI_BASE 0x1fc00
@@ -111,6 +113,9 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
 /* DMA support */
 #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24)
 
+/* SRB current packet level */
+#define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK 0xff
+
 /* DMA Enable */
 #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0)
 
@@ -526,7 +531,7 @@ static int kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
              KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD |
              KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL |
              KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP |
-             KVASER_PCIEFD_KCAN_IRQ_TAR | KVASER_PCIEFD_KCAN_IRQ_TFD;
+             KVASER_PCIEFD_KCAN_IRQ_TAR;
 
        iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 
@@ -554,6 +559,8 @@ static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can)
 
        if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
                mode |= KVASER_PCIEFD_KCAN_MODE_LOM;
+       else
+               mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM;
 
        mode |= KVASER_PCIEFD_KCAN_MODE_EEN;
        mode |= KVASER_PCIEFD_KCAN_MODE_EPEN;
@@ -572,7 +579,7 @@ static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can)
 
        spin_lock_irqsave(&can->lock, irq);
        iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
-       iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+       iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
                  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 
        status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG);
@@ -615,7 +622,7 @@ static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can)
        iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
        iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
 
-       iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TFD,
+       iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
                  can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 
        mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG);
@@ -719,6 +726,7 @@ static int kvaser_pciefd_stop(struct net_device *netdev)
                iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
                del_timer(&can->bec_poll_timer);
        }
+       can->can.state = CAN_STATE_STOPPED;
        close_candev(netdev);
 
        return ret;
@@ -1007,8 +1015,7 @@ static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie)
                SET_NETDEV_DEV(netdev, &pcie->pci->dev);
 
                iowrite32(-1, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG);
-               iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD |
-                         KVASER_PCIEFD_KCAN_IRQ_TFD,
+               iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD,
                          can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
 
                pcie->can[i] = can;
@@ -1058,6 +1065,7 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
 {
        int i;
        u32 srb_status;
+       u32 srb_packet_count;
        dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT];
 
        /* Disable the DMA */
@@ -1085,6 +1093,15 @@ static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie)
                  KVASER_PCIEFD_SRB_CMD_RDB1,
                  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
 
+       /* Empty Rx FIFO */
+       srb_packet_count = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG) &
+                          KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK;
+       while (srb_packet_count) {
+               /* Drop current packet in FIFO */
+               ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_FIFO_LAST_REG);
+               srb_packet_count--;
+       }
+
        srb_status = ioread32(pcie->reg_base + KVASER_PCIEFD_SRB_STAT_REG);
        if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) {
                dev_err(&pcie->pci->dev, "DMA not idle before enabling\n");
@@ -1425,9 +1442,6 @@ static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie,
                cmd = KVASER_PCIEFD_KCAN_CMD_AT;
                cmd |= ++can->cmd_seq << KVASER_PCIEFD_KCAN_CMD_SEQ_SHIFT;
                iowrite32(cmd, can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG);
-
-               iowrite32(KVASER_PCIEFD_KCAN_IRQ_TFD,
-                         can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG);
        } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET &&
                   p->header[0] & KVASER_PCIEFD_SPACK_IRM &&
                   cmdseq == (p->header[1] & KVASER_PCIEFD_PACKET_SEQ_MSK) &&
@@ -1714,15 +1728,6 @@ static int kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
        if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF)
                netdev_err(can->can.dev, "Tx FIFO overflow\n");
 
-       if (irq & KVASER_PCIEFD_KCAN_IRQ_TFD) {
-               u8 count = ioread32(can->reg_base +
-                                   KVASER_PCIEFD_KCAN_TX_NPACKETS_REG) & 0xff;
-
-               if (count == 0)
-                       iowrite32(KVASER_PCIEFD_KCAN_CTRL_EFLUSH,
-                                 can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG);
-       }
-
        if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP)
                netdev_err(can->can.dev,
                           "Fail to change bittiming, when not in reset mode\n");
@@ -1824,6 +1829,11 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
        if (err)
                goto err_teardown_can_ctrls;
 
+       err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
+                         IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
+       if (err)
+               goto err_teardown_can_ctrls;
+
        iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
                  pcie->reg_base + KVASER_PCIEFD_SRB_IRQ_REG);
 
@@ -1844,11 +1854,6 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
        iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
                  pcie->reg_base + KVASER_PCIEFD_SRB_CMD_REG);
 
-       err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
-                         IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
-       if (err)
-               goto err_teardown_can_ctrls;
-
        err = kvaser_pciefd_reg_candev(pcie);
        if (err)
                goto err_free_irq;
@@ -1856,6 +1861,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
        return 0;
 
 err_free_irq:
+       /* Disable PCI interrupts */
+       iowrite32(0, pcie->reg_base + KVASER_PCIEFD_IEN_REG);
        free_irq(pcie->pci->irq, pcie);
 
 err_teardown_can_ctrls:
index cbe8318..c0215a8 100644 (file)
@@ -1188,8 +1188,6 @@ static int lan9303_port_fdb_add(struct dsa_switch *ds, int port,
        struct lan9303 *chip = ds->priv;
 
        dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
-       if (vid)
-               return -EOPNOTSUPP;
 
        return lan9303_alr_add_port(chip, addr, port, false);
 }
@@ -1201,8 +1199,6 @@ static int lan9303_port_fdb_del(struct dsa_switch *ds, int port,
        struct lan9303 *chip = ds->priv;
 
        dev_dbg(chip->dev, "%s(%d, %pM, %d)\n", __func__, port, addr, vid);
-       if (vid)
-               return -EOPNOTSUPP;
        lan9303_alr_del_port(chip, addr, port);
 
        return 0;
index 64a2f2f..08a46ff 100644 (file)
@@ -7170,7 +7170,7 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
                goto out;
        }
        if (chip->reset)
-               usleep_range(1000, 2000);
+               usleep_range(10000, 20000);
 
        /* Detect if the device is configured in single chip addressing mode,
         * otherwise continue with address specific smi init/detection.
index aec9d4f..d19b630 100644 (file)
 /* Offset 0x10: Extended Port Control Command */
 #define MV88E6393X_PORT_EPC_CMD                0x10
 #define MV88E6393X_PORT_EPC_CMD_BUSY   0x8000
-#define MV88E6393X_PORT_EPC_CMD_WRITE  0x0300
+#define MV88E6393X_PORT_EPC_CMD_WRITE  0x3000
 #define MV88E6393X_PORT_EPC_INDEX_PORT_ETYPE   0x02
 
 /* Offset 0x11: Extended Port Control Data */
index 4347b42..de9da46 100644 (file)
@@ -20,6 +20,7 @@ config NET_DSA_QCA8K_LEDS_SUPPORT
        bool "Qualcomm Atheros QCA8K Ethernet switch family LEDs support"
        depends on NET_DSA_QCA8K
        depends on LEDS_CLASS=y || LEDS_CLASS=NET_DSA_QCA8K
+       depends on LEDS_TRIGGERS
        help
          This enabled support for LEDs present on the Qualcomm Atheros
          QCA8K Ethernet switch chips.
index 919027c..c37d2e5 100644 (file)
@@ -120,6 +120,22 @@ static void a5psw_port_mgmtfwd_set(struct a5psw *a5psw, int port, bool enable)
        a5psw_port_pattern_set(a5psw, port, A5PSW_PATTERN_MGMTFWD, enable);
 }
 
+static void a5psw_port_tx_enable(struct a5psw *a5psw, int port, bool enable)
+{
+       u32 mask = A5PSW_PORT_ENA_TX(port);
+       u32 reg = enable ? mask : 0;
+
+       /* Even though the port TX is disabled through TXENA bit in the
+        * PORT_ENA register, it can still send BPDUs. This depends on the tag
+        * configuration added when sending packets from the CPU port to the
+        * switch port. Indeed, when using forced forwarding without filtering,
+        * even disabled ports will be able to send packets that are tagged.
+        * This allows to implement STP support when ports are in a state where
+        * forwarding traffic should be stopped but BPDUs should still be sent.
+        */
+       a5psw_reg_rmw(a5psw, A5PSW_PORT_ENA, mask, reg);
+}
+
 static void a5psw_port_enable_set(struct a5psw *a5psw, int port, bool enable)
 {
        u32 port_ena = 0;
@@ -292,6 +308,22 @@ static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
        return 0;
 }
 
+static void a5psw_port_learning_set(struct a5psw *a5psw, int port, bool learn)
+{
+       u32 mask = A5PSW_INPUT_LEARN_DIS(port);
+       u32 reg = !learn ? mask : 0;
+
+       a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
+static void a5psw_port_rx_block_set(struct a5psw *a5psw, int port, bool block)
+{
+       u32 mask = A5PSW_INPUT_LEARN_BLOCK(port);
+       u32 reg = block ? mask : 0;
+
+       a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+}
+
 static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
                                          bool set)
 {
@@ -308,6 +340,14 @@ static void a5psw_flooding_set_resolution(struct a5psw *a5psw, int port,
                a5psw_reg_writel(a5psw, offsets[i], a5psw->bridged_ports);
 }
 
+static void a5psw_port_set_standalone(struct a5psw *a5psw, int port,
+                                     bool standalone)
+{
+       a5psw_port_learning_set(a5psw, port, !standalone);
+       a5psw_flooding_set_resolution(a5psw, port, !standalone);
+       a5psw_port_mgmtfwd_set(a5psw, port, standalone);
+}
+
 static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
                                  struct dsa_bridge bridge,
                                  bool *tx_fwd_offload,
@@ -323,8 +363,7 @@ static int a5psw_port_bridge_join(struct dsa_switch *ds, int port,
        }
 
        a5psw->br_dev = bridge.dev;
-       a5psw_flooding_set_resolution(a5psw, port, true);
-       a5psw_port_mgmtfwd_set(a5psw, port, false);
+       a5psw_port_set_standalone(a5psw, port, false);
 
        return 0;
 }
@@ -334,8 +373,7 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
 {
        struct a5psw *a5psw = ds->priv;
 
-       a5psw_flooding_set_resolution(a5psw, port, false);
-       a5psw_port_mgmtfwd_set(a5psw, port, true);
+       a5psw_port_set_standalone(a5psw, port, true);
 
        /* No more ports bridged */
        if (a5psw->bridged_ports == BIT(A5PSW_CPU_PORT))
@@ -344,28 +382,35 @@ static void a5psw_port_bridge_leave(struct dsa_switch *ds, int port,
 
 static void a5psw_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
 {
-       u32 mask = A5PSW_INPUT_LEARN_DIS(port) | A5PSW_INPUT_LEARN_BLOCK(port);
+       bool learning_enabled, rx_enabled, tx_enabled;
        struct a5psw *a5psw = ds->priv;
-       u32 reg = 0;
 
        switch (state) {
        case BR_STATE_DISABLED:
        case BR_STATE_BLOCKING:
-               reg |= A5PSW_INPUT_LEARN_DIS(port);
-               reg |= A5PSW_INPUT_LEARN_BLOCK(port);
-               break;
        case BR_STATE_LISTENING:
-               reg |= A5PSW_INPUT_LEARN_DIS(port);
+               rx_enabled = false;
+               tx_enabled = false;
+               learning_enabled = false;
                break;
        case BR_STATE_LEARNING:
-               reg |= A5PSW_INPUT_LEARN_BLOCK(port);
+               rx_enabled = false;
+               tx_enabled = false;
+               learning_enabled = true;
                break;
        case BR_STATE_FORWARDING:
-       default:
+               rx_enabled = true;
+               tx_enabled = true;
+               learning_enabled = true;
                break;
+       default:
+               dev_err(ds->dev, "invalid STP state: %d\n", state);
+               return;
        }
 
-       a5psw_reg_rmw(a5psw, A5PSW_INPUT_LEARN, mask, reg);
+       a5psw_port_learning_set(a5psw, port, learning_enabled);
+       a5psw_port_rx_block_set(a5psw, port, !rx_enabled);
+       a5psw_port_tx_enable(a5psw, port, tx_enabled);
 }
 
 static void a5psw_port_fast_age(struct dsa_switch *ds, int port)
@@ -673,7 +718,7 @@ static int a5psw_setup(struct dsa_switch *ds)
        }
 
        /* Configure management port */
-       reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_DISCARD;
+       reg = A5PSW_CPU_PORT | A5PSW_MGMT_CFG_ENABLE;
        a5psw_reg_writel(a5psw, A5PSW_MGMT_CFG, reg);
 
        /* Set pattern 0 to forward all frame to mgmt port */
@@ -722,13 +767,15 @@ static int a5psw_setup(struct dsa_switch *ds)
                if (dsa_port_is_unused(dp))
                        continue;
 
-               /* Enable egress flooding for CPU port */
-               if (dsa_port_is_cpu(dp))
+               /* Enable egress flooding and learning for CPU port */
+               if (dsa_port_is_cpu(dp)) {
                        a5psw_flooding_set_resolution(a5psw, port, true);
+                       a5psw_port_learning_set(a5psw, port, true);
+               }
 
-               /* Enable management forward only for user ports */
+               /* Enable standalone mode for user ports */
                if (dsa_port_is_user(dp))
-                       a5psw_port_mgmtfwd_set(a5psw, port, true);
+                       a5psw_port_set_standalone(a5psw, port, true);
        }
 
        return 0;
index c67abd4..b869192 100644 (file)
@@ -19,6 +19,7 @@
 #define A5PSW_PORT_OFFSET(port)                (0x400 * (port))
 
 #define A5PSW_PORT_ENA                 0x8
+#define A5PSW_PORT_ENA_TX(port)                BIT(port)
 #define A5PSW_PORT_ENA_RX_SHIFT                16
 #define A5PSW_PORT_ENA_TX_RX(port)     (BIT((port) + A5PSW_PORT_ENA_RX_SHIFT) | \
                                         BIT(port))
@@ -36,7 +37,7 @@
 #define A5PSW_INPUT_LEARN_BLOCK(p)     BIT(p)
 
 #define A5PSW_MGMT_CFG                 0x20
-#define A5PSW_MGMT_CFG_DISCARD         BIT(7)
+#define A5PSW_MGMT_CFG_ENABLE          BIT(6)
 
 #define A5PSW_MODE_CFG                 0x24
 #define A5PSW_MODE_STATS_RESET         BIT(31)
index d2f4358..ba3e7aa 100644 (file)
@@ -66,8 +66,10 @@ static int max_interrupt_work = 20;
 #include <linux/timer.h>
 #include <linux/ethtool.h>
 #include <linux/bitops.h>
-
 #include <linux/uaccess.h>
+
+#include <net/Space.h>
+
 #include <asm/io.h>
 #include <asm/dma.h>
 
index 82f94b1..5267e9d 100644 (file)
@@ -195,6 +195,7 @@ static int tc589_probe(struct pcmcia_device *link)
 {
        struct el3_private *lp;
        struct net_device *dev;
+       int ret;
 
        dev_dbg(&link->dev, "3c589_attach()\n");
 
@@ -218,7 +219,15 @@ static int tc589_probe(struct pcmcia_device *link)
 
        dev->ethtool_ops = &netdev_ethtool_ops;
 
-       return tc589_config(link);
+       ret = tc589_config(link);
+       if (ret)
+               goto err_free_netdev;
+
+       return 0;
+
+err_free_netdev:
+       free_netdev(dev);
+       return ret;
 }
 
 static void tc589_detach(struct pcmcia_device *link)
index 0a9118b..bc9c81d 100644 (file)
@@ -52,6 +52,7 @@ static const char version2[] =
 #include <linux/etherdevice.h>
 #include <linux/jiffies.h>
 #include <linux/platform_device.h>
+#include <net/Space.h>
 
 #include <asm/io.h>
 
index 6e62c37..7465650 100644 (file)
@@ -66,6 +66,7 @@ static const char version[] =
 #include <linux/isapnp.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <net/Space.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
index 5b00c45..119021d 100644 (file)
@@ -37,6 +37,7 @@ static const char version[] =
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <net/Space.h>
 
 #include <asm/io.h>
 
index 8971665..6cf3818 100644 (file)
@@ -59,6 +59,7 @@ static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@c
 #include <linux/skbuff.h>
 #include <linux/mm.h>
 #include <linux/bitops.h>
+#include <net/Space.h>
 
 #include <asm/io.h>
 #include <asm/dma.h>
index f7c597e..debe521 100644 (file)
@@ -68,9 +68,15 @@ bool pdsc_is_fw_running(struct pdsc *pdsc)
 
 bool pdsc_is_fw_good(struct pdsc *pdsc)
 {
-       u8 gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
+       bool fw_running = pdsc_is_fw_running(pdsc);
+       u8 gen;
 
-       return pdsc_is_fw_running(pdsc) && gen == pdsc->fw_generation;
+       /* Make sure to update the cached fw_status by calling
+        * pdsc_is_fw_running() before getting the generation
+        */
+       gen = pdsc->fw_status & PDS_CORE_FW_STS_F_GENERATION;
+
+       return fw_running && gen == pdsc->fw_generation;
 }
 
 static u8 pdsc_devcmd_status(struct pdsc *pdsc)
index 33a9574..32d2c6f 100644 (file)
@@ -1329,7 +1329,7 @@ static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
        return pdata->phy_if.phy_impl.an_outcome(pdata);
 }
 
-static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
+static bool xgbe_phy_status_result(struct xgbe_prv_data *pdata)
 {
        struct ethtool_link_ksettings *lks = &pdata->phy.lks;
        enum xgbe_mode mode;
@@ -1367,8 +1367,13 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
 
        pdata->phy.duplex = DUPLEX_FULL;
 
-       if (xgbe_set_mode(pdata, mode) && pdata->an_again)
+       if (!xgbe_set_mode(pdata, mode))
+               return false;
+
+       if (pdata->an_again)
                xgbe_phy_reconfig_aneg(pdata);
+
+       return true;
 }
 
 static void xgbe_phy_status(struct xgbe_prv_data *pdata)
@@ -1398,7 +1403,8 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
                        return;
                }
 
-               xgbe_phy_status_result(pdata);
+               if (xgbe_phy_status_result(pdata))
+                       return;
 
                if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
                        clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
index 38d0cda..bf1611c 100644 (file)
@@ -2531,9 +2531,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        priv->irq0 = platform_get_irq(pdev, 0);
        if (!priv->is_lite) {
                priv->irq1 = platform_get_irq(pdev, 1);
-               priv->wol_irq = platform_get_irq(pdev, 2);
+               priv->wol_irq = platform_get_irq_optional(pdev, 2);
        } else {
-               priv->wol_irq = platform_get_irq(pdev, 1);
+               priv->wol_irq = platform_get_irq_optional(pdev, 1);
        }
        if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
                ret = -EINVAL;
index dcd9367..b499bc9 100644 (file)
@@ -692,7 +692,7 @@ next_tx_int:
 
        __netif_txq_completed_wake(txq, nr_pkts, tx_bytes,
                                   bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
-                                  READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING);
+                                  READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
 }
 
 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -2365,6 +2365,9 @@ static int bnxt_async_event_process(struct bnxt *bp,
                                struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
                                u64 ns;
 
+                               if (!ptp)
+                                       goto async_event_process_exit;
+
                                spin_lock_bh(&ptp->ptp_lock);
                                bnxt_ptp_update_current_time(bp);
                                ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
@@ -4763,6 +4766,9 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
                if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
                    !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
                        continue;
+               if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE &&
+                   !bp->ptp_cfg)
+                       continue;
                __set_bit(bnxt_async_events_arr[i], async_events_bmap);
        }
        if (bmap && bmap_size) {
@@ -5350,6 +5356,7 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
        if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_QCFG))
                return;
 
+       req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
        /* all contexts configured to same hash_type, zero always exists */
        req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
        resp = hwrm_req_hold(bp, req);
@@ -8812,6 +8819,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
                goto err_out;
        }
 
+       if (BNXT_VF(bp))
+               bnxt_hwrm_func_qcfg(bp);
+
        rc = bnxt_setup_vnic(bp, 0);
        if (rc)
                goto err_out;
@@ -11598,6 +11608,7 @@ static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
 static void bnxt_fw_health_check(struct bnxt *bp)
 {
        struct bnxt_fw_health *fw_health = bp->fw_health;
+       struct pci_dev *pdev = bp->pdev;
        u32 val;
 
        if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
@@ -11611,7 +11622,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
        }
 
        val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
-       if (val == fw_health->last_fw_heartbeat) {
+       if (val == fw_health->last_fw_heartbeat && pci_device_is_present(pdev)) {
                fw_health->arrests++;
                goto fw_reset;
        }
@@ -11619,7 +11630,7 @@ static void bnxt_fw_health_check(struct bnxt *bp)
        fw_health->last_fw_heartbeat = val;
 
        val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
-       if (val != fw_health->last_fw_reset_cnt) {
+       if (val != fw_health->last_fw_reset_cnt && pci_device_is_present(pdev)) {
                fw_health->discoveries++;
                goto fw_reset;
        }
@@ -13025,26 +13036,37 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
 
 #endif /* CONFIG_RFS_ACCEL */
 
-static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
+static int bnxt_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
+                                   unsigned int entry, struct udp_tunnel_info *ti)
 {
        struct bnxt *bp = netdev_priv(netdev);
-       struct udp_tunnel_info ti;
        unsigned int cmd;
 
-       udp_tunnel_nic_get_port(netdev, table, 0, &ti);
-       if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
+       if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
                cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
        else
                cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
 
-       if (ti.port)
-               return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
+       return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti->port, cmd);
+}
+
+static int bnxt_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
+                                     unsigned int entry, struct udp_tunnel_info *ti)
+{
+       struct bnxt *bp = netdev_priv(netdev);
+       unsigned int cmd;
+
+       if (ti->type == UDP_TUNNEL_TYPE_VXLAN)
+               cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
+       else
+               cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
 
        return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
 }
 
 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
-       .sync_table     = bnxt_udp_tunnel_sync,
+       .set_port       = bnxt_udp_tunnel_set_port,
+       .unset_port     = bnxt_udp_tunnel_unset_port,
        .flags          = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
                          UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
        .tables         = {
index 2dd8ee4..8fd5071 100644 (file)
@@ -3831,7 +3831,7 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
                }
        }
 
-       if (req & BNXT_FW_RESET_AP) {
+       if (!BNXT_CHIP_P4_PLUS(bp) && (req & BNXT_FW_RESET_AP)) {
                /* This feature is not supported in older firmware versions */
                if (bp->hwrm_spec_code >= 0x10803) {
                        if (!bnxt_firmware_reset_ap(dev)) {
index e466891..f388671 100644 (file)
@@ -952,6 +952,7 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
                bnxt_ptp_timecounter_init(bp, true);
                bnxt_ptp_adjfine_rtc(bp, 0);
        }
+       bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
 
        ptp->ptp_info = bnxt_ptp_caps;
        if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) {
index f28ffc3..2b5761a 100644 (file)
@@ -1272,7 +1272,8 @@ static void bcmgenet_get_ethtool_stats(struct net_device *dev,
        }
 }
 
-static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
+void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
+                            bool tx_lpi_enabled)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
@@ -1292,7 +1293,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
 
        /* Enable EEE and switch to a 27Mhz clock automatically */
        reg = bcmgenet_readl(priv->base + off);
-       if (enable)
+       if (tx_lpi_enabled)
                reg |= TBUF_EEE_EN | TBUF_PM_EN;
        else
                reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
@@ -1313,6 +1314,7 @@ static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
 
        priv->eee.eee_enabled = enable;
        priv->eee.eee_active = enable;
+       priv->eee.tx_lpi_enabled = tx_lpi_enabled;
 }
 
 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
@@ -1328,6 +1330,7 @@ static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
 
        e->eee_enabled = p->eee_enabled;
        e->eee_active = p->eee_active;
+       e->tx_lpi_enabled = p->tx_lpi_enabled;
        e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
 
        return phy_ethtool_get_eee(dev->phydev, e);
@@ -1337,7 +1340,6 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct ethtool_eee *p = &priv->eee;
-       int ret = 0;
 
        if (GENET_IS_V1(priv))
                return -EOPNOTSUPP;
@@ -1348,16 +1350,11 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
        p->eee_enabled = e->eee_enabled;
 
        if (!p->eee_enabled) {
-               bcmgenet_eee_enable_set(dev, false);
+               bcmgenet_eee_enable_set(dev, false, false);
        } else {
-               ret = phy_init_eee(dev->phydev, false);
-               if (ret) {
-                       netif_err(priv, hw, dev, "EEE initialization failed\n");
-                       return ret;
-               }
-
+               p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
                bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
-               bcmgenet_eee_enable_set(dev, true);
+               bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
        }
 
        return phy_ethtool_set_eee(dev->phydev, e);
@@ -3450,7 +3447,7 @@ err_clk_disable:
        return ret;
 }
 
-static void bcmgenet_netif_stop(struct net_device *dev)
+static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
 
@@ -3465,6 +3462,8 @@ static void bcmgenet_netif_stop(struct net_device *dev)
        /* Disable MAC transmit. TX DMA disabled must be done before this */
        umac_enable_set(priv, CMD_TX_EN, false);
 
+       if (stop_phy)
+               phy_stop(dev->phydev);
        bcmgenet_disable_rx_napi(priv);
        bcmgenet_intr_disable(priv);
 
@@ -3485,7 +3484,7 @@ static int bcmgenet_close(struct net_device *dev)
 
        netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
 
-       bcmgenet_netif_stop(dev);
+       bcmgenet_netif_stop(dev, false);
 
        /* Really kill the PHY state machine and disconnect from it */
        phy_disconnect(dev->phydev);
@@ -4277,9 +4276,6 @@ static int bcmgenet_resume(struct device *d)
        if (!device_may_wakeup(d))
                phy_resume(dev->phydev);
 
-       if (priv->eee.eee_enabled)
-               bcmgenet_eee_enable_set(dev, true);
-
        bcmgenet_netif_start(dev);
 
        netif_device_attach(dev);
@@ -4303,7 +4299,7 @@ static int bcmgenet_suspend(struct device *d)
 
        netif_device_detach(dev);
 
-       bcmgenet_netif_stop(dev);
+       bcmgenet_netif_stop(dev, true);
 
        if (!device_may_wakeup(d))
                phy_suspend(dev->phydev);
index 946f6e2..1985c0e 100644 (file)
@@ -703,4 +703,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
 void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
                               enum bcmgenet_power_mode mode);
 
+void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
+                            bool tx_lpi_enabled);
+
 #endif /* __BCMGENET_H__ */
index be04290..c15ed0a 100644 (file)
@@ -87,6 +87,11 @@ static void bcmgenet_mac_config(struct net_device *dev)
                reg |= CMD_TX_EN | CMD_RX_EN;
        }
        bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+       priv->eee.eee_active = phy_init_eee(phydev, 0) >= 0;
+       bcmgenet_eee_enable_set(dev,
+                               priv->eee.eee_enabled && priv->eee.eee_active,
+                               priv->eee.tx_lpi_enabled);
 }
 
 /* setup netdev link state when PHY link status change and
index 06a0c00..276c32c 100644 (file)
@@ -72,6 +72,8 @@
 #include <linux/gfp.h>
 #include <linux/io.h>
 
+#include <net/Space.h>
+
 #include <asm/irq.h>
 #include <linux/atomic.h>
 #if ALLOW_DMA
index 3c4fa26..9e1b253 100644 (file)
@@ -1229,7 +1229,13 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
                if (!skb)
                        break;
 
-               rx_byte_cnt += skb->len;
+               /* When set, the outer VLAN header is extracted and reported
+                * in the receive buffer descriptor. So rx_byte_cnt should
+                * add the length of the extracted VLAN header.
+                */
+               if (bd_status & ENETC_RXBD_FLAG_VLAN)
+                       rx_byte_cnt += VLAN_HLEN;
+               rx_byte_cnt += skb->len + ETH_HLEN;
                rx_frm_cnt++;
 
                napi_gro_receive(napi, skb);
@@ -1565,6 +1571,14 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
                enetc_build_xdp_buff(rx_ring, bd_status, &rxbd, &i,
                                     &cleaned_cnt, &xdp_buff);
 
+               /* When set, the outer VLAN header is extracted and reported
+                * in the receive buffer descriptor. So rx_byte_cnt should
+                * add the length of the extracted VLAN header.
+                */
+               if (bd_status & ENETC_RXBD_FLAG_VLAN)
+                       rx_byte_cnt += VLAN_HLEN;
+               rx_byte_cnt += xdp_get_buff_len(&xdp_buff);
+
                xdp_act = bpf_prog_run_xdp(prog, &xdp_buff);
 
                switch (xdp_act) {
index 42ec6ca..38e5b5a 100644 (file)
@@ -3798,7 +3798,6 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
        entries_free = fec_enet_get_free_txdesc_num(txq);
        if (entries_free < MAX_SKB_FRAGS + 1) {
                netdev_err(fep->netdev, "NOT enough BD for SG!\n");
-               xdp_return_frame(frame);
                return NETDEV_TX_BUSY;
        }
 
@@ -3835,6 +3834,11 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
        index = fec_enet_get_bd_index(last_bdp, &txq->bd);
        txq->tx_skbuff[index] = NULL;
 
+       /* Make sure the updates to rest of the descriptor are performed before
+        * transferring ownership.
+        */
+       dma_wmb();
+
        /* Send it on its way.  Tell FEC it's ready, interrupt when done,
         * it's the last BD of the frame, and to put the CRC on the end.
         */
@@ -3844,8 +3848,14 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
        /* If this was the last BD in the ring, start at the beginning again. */
        bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
 
+       /* Make sure the update to bdp are performed before txq->bd.cur. */
+       dma_wmb();
+
        txq->bd.cur = bdp;
 
+       /* Trigger transmission start */
+       writel(0, txq->bd.reg_desc_active);
+
        return 0;
 }
 
@@ -3874,12 +3884,6 @@ static int fec_enet_xdp_xmit(struct net_device *dev,
                sent_frames++;
        }
 
-       /* Make sure the update to bdp and tx_skbuff are performed. */
-       wmb();
-
-       /* Trigger transmission start */
-       writel(0, txq->bd.reg_desc_active);
-
        __netif_tx_unlock(nq);
 
        return sent_frames;
@@ -4478,9 +4482,11 @@ fec_drv_remove(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        int ret;
 
-       ret = pm_runtime_resume_and_get(&pdev->dev);
+       ret = pm_runtime_get_sync(&pdev->dev);
        if (ret < 0)
-               return ret;
+               dev_err(&pdev->dev,
+                       "Failed to resume device in remove callback (%pe)\n",
+                       ERR_PTR(ret));
 
        cancel_work_sync(&fep->tx_timeout_work);
        fec_ptp_stop(pdev);
@@ -4493,8 +4499,13 @@ fec_drv_remove(struct platform_device *pdev)
                of_phy_deregister_fixed_link(np);
        of_node_put(fep->phy_node);
 
-       clk_disable_unprepare(fep->clk_ahb);
-       clk_disable_unprepare(fep->clk_ipg);
+       /* After pm_runtime_get_sync() failed, the clks are still off, so skip
+        * disabling them again.
+        */
+       if (ret >= 0) {
+               clk_disable_unprepare(fep->clk_ahb);
+               clk_disable_unprepare(fep->clk_ipg);
+       }
        pm_runtime_put_noidle(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
index cbbab5b..b85c412 100644 (file)
@@ -331,9 +331,25 @@ static int hclge_comm_cmd_csq_done(struct hclge_comm_hw *hw)
        return head == hw->cmq.csq.next_to_use;
 }
 
-static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
+static u32 hclge_get_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
+{
+       static const struct hclge_cmdq_tx_timeout_map cmdq_tx_timeout_map[] = {
+               {HCLGE_OPC_CFG_RST_TRIGGER, HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS},
+       };
+       u32 i;
+
+       for (i = 0; i < ARRAY_SIZE(cmdq_tx_timeout_map); i++)
+               if (cmdq_tx_timeout_map[i].opcode == opcode)
+                       return cmdq_tx_timeout_map[i].tx_timeout;
+
+       return tx_timeout;
+}
+
+static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw, u16 opcode,
                                     bool *is_completed)
 {
+       u32 cmdq_tx_timeout = hclge_get_cmdq_tx_timeout(opcode,
+                                                       hw->cmq.tx_timeout);
        u32 timeout = 0;
 
        do {
@@ -343,7 +359,7 @@ static void hclge_comm_wait_for_resp(struct hclge_comm_hw *hw,
                }
                udelay(1);
                timeout++;
-       } while (timeout < hw->cmq.tx_timeout);
+       } while (timeout < cmdq_tx_timeout);
 }
 
 static int hclge_comm_cmd_convert_err_code(u16 desc_ret)
@@ -407,7 +423,8 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
         * if multi descriptors to be sent, use the first one to check
         */
        if (HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
-               hclge_comm_wait_for_resp(hw, &is_completed);
+               hclge_comm_wait_for_resp(hw, le16_to_cpu(desc->opcode),
+                                        &is_completed);
 
        if (!is_completed)
                ret = -EBADE;
@@ -529,7 +546,7 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
        cmdq->crq.desc_num = HCLGE_COMM_NIC_CMQ_DESC_NUM;
 
        /* Setup Tx write back timeout */
-       cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT;
+       cmdq->tx_timeout = HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT;
 
        /* Setup queue rings */
        ret = hclge_comm_alloc_cmd_queue(hw, HCLGE_COMM_TYPE_CSQ);
index de72ecb..18f1b4b 100644 (file)
@@ -54,7 +54,8 @@
 #define HCLGE_COMM_NIC_SW_RST_RDY              BIT(HCLGE_COMM_NIC_SW_RST_RDY_B)
 #define HCLGE_COMM_NIC_CMQ_DESC_NUM_S          3
 #define HCLGE_COMM_NIC_CMQ_DESC_NUM            1024
-#define HCLGE_COMM_CMDQ_TX_TIMEOUT             30000
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT_DEFAULT     30000
+#define HCLGE_COMM_CMDQ_TX_TIMEOUT_500MS       500000
 
 enum hclge_opcode_type {
        /* Generic commands */
@@ -360,6 +361,11 @@ struct hclge_comm_caps_bit_map {
        u16 local_bit;
 };
 
+struct hclge_cmdq_tx_timeout_map {
+       u32 opcode;
+       u32 tx_timeout;
+};
+
 struct hclge_comm_firmware_compat_cmd {
        __le32 compat;
        u8 rsv[20];
index 4c3e90a..d385ffc 100644 (file)
@@ -130,7 +130,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
                .name = "tx_bd_queue",
                .cmd = HNAE3_DBG_CMD_TX_BD,
                .dentry = HNS3_DBG_DENTRY_TX_BD,
-               .buf_len = HNS3_DBG_READ_LEN_4MB,
+               .buf_len = HNS3_DBG_READ_LEN_5MB,
                .init = hns3_dbg_bd_file_init,
        },
        {
index 97578ea..4a5ef8a 100644 (file)
@@ -10,6 +10,7 @@
 #define HNS3_DBG_READ_LEN_128KB        0x20000
 #define HNS3_DBG_READ_LEN_1MB  0x100000
 #define HNS3_DBG_READ_LEN_4MB  0x400000
+#define HNS3_DBG_READ_LEN_5MB  0x500000
 #define HNS3_DBG_WRITE_LEN     1024
 
 #define HNS3_DBG_DATA_STR_LEN  32
index 4fb5406..2689b10 100644 (file)
@@ -8053,12 +8053,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
        /* If it is not PF reset or FLR, the firmware will disable the MAC,
         * so it only need to stop phy here.
         */
-       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
-           hdev->reset_type != HNAE3_FUNC_RESET &&
-           hdev->reset_type != HNAE3_FLR_RESET) {
-               hclge_mac_stop_phy(hdev);
-               hclge_update_link_status(hdev);
-               return;
+       if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+               hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
+                                      HCLGE_PFC_DISABLE);
+               if (hdev->reset_type != HNAE3_FUNC_RESET &&
+                   hdev->reset_type != HNAE3_FLR_RESET) {
+                       hclge_mac_stop_phy(hdev);
+                       hclge_update_link_status(hdev);
+                       return;
+               }
        }
 
        hclge_reset_tqp(handle);
index 4a33f65..922c0da 100644 (file)
@@ -171,8 +171,8 @@ int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
        return hclge_cmd_send(&hdev->hw, &desc, 1);
 }
 
-static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
-                                 u8 pfc_bitmap)
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+                          u8 pfc_bitmap)
 {
        struct hclge_desc desc;
        struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
index 68f28a9..dd6f1fd 100644 (file)
@@ -164,6 +164,9 @@ struct hclge_bp_to_qs_map_cmd {
        u32 rsvd1;
 };
 
+#define HCLGE_PFC_DISABLE      0
+#define HCLGE_PFC_TX_RX_DISABLE        0
+
 struct hclge_pfc_en_cmd {
        u8 tx_rx_en_bitmap;
        u8 pri_en_bitmap;
@@ -235,6 +238,8 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
 void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+                          u8 pfc_bitmap);
 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
 void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
index f240462..dd08989 100644 (file)
@@ -1436,7 +1436,10 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
         * might happen in case reset assertion was made by PF. Yes, this also
         * means we might end up waiting bit more even for VF reset.
         */
-       msleep(5000);
+       if (hdev->reset_type == HNAE3_VF_FULL_RESET)
+               msleep(5000);
+       else
+               msleep(500);
 
        return 0;
 }
index 9afbbda..7c0578b 100644 (file)
@@ -2238,11 +2238,6 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
                iavf_process_config(adapter);
                adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES;
 
-               /* Request VLAN offload settings */
-               if (VLAN_V2_ALLOWED(adapter))
-                       iavf_set_vlan_offload_features(adapter, 0,
-                                                      netdev->features);
-
                iavf_set_queue_vlan_tag_loc(adapter);
 
                was_mac_changed = !ether_addr_equal(netdev->dev_addr,
index 0157f6e..eb2dc09 100644 (file)
@@ -5160,7 +5160,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
  */
 int
 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
-                u16 bus_addr, __le16 addr, u8 params, u8 *data,
+                u16 bus_addr, __le16 addr, u8 params, const u8 *data,
                 struct ice_sq_cd *cd)
 {
        struct ice_aq_desc desc = { 0 };
index 8ba5f93..81961a7 100644 (file)
@@ -229,7 +229,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
                struct ice_sq_cd *cd);
 int
 ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
-                u16 bus_addr, __le16 addr, u8 params, u8 *data,
+                u16 bus_addr, __le16 addr, u8 params, const u8 *data,
                 struct ice_sq_cd *cd);
 bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
 #endif /* _ICE_COMMON_H_ */
index c6d4926..850db8e 100644 (file)
@@ -932,10 +932,9 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
        if ((first->tx_flags & ICE_TX_FLAGS_HW_VLAN ||
             first->tx_flags & ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN) ||
            skb->priority != TC_PRIO_CONTROL) {
-               first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
+               first->vid &= ~VLAN_PRIO_MASK;
                /* Mask the lower 3 bits to set the 802.1p priority */
-               first->tx_flags |= (skb->priority & 0x7) <<
-                                  ICE_TX_FLAGS_VLAN_PR_S;
+               first->vid |= (skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
                /* if this is not already set it means a VLAN 0 + priority needs
                 * to be offloaded
                 */
index 2ea8a2b..bd0ed15 100644 (file)
@@ -16,8 +16,8 @@
  * * number of bytes written - success
  * * negative - error code
  */
-static unsigned int
-ice_gnss_do_write(struct ice_pf *pf, unsigned char *buf, unsigned int size)
+static int
+ice_gnss_do_write(struct ice_pf *pf, const unsigned char *buf, unsigned int size)
 {
        struct ice_aqc_link_topo_addr link_topo;
        struct ice_hw *hw = &pf->hw;
@@ -72,39 +72,7 @@ err_out:
        dev_err(ice_pf_to_dev(pf), "GNSS failed to write, offset=%u, size=%u, err=%d\n",
                offset, size, err);
 
-       return offset;
-}
-
-/**
- * ice_gnss_write_pending - Write all pending data to internal GNSS
- * @work: GNSS write work structure
- */
-static void ice_gnss_write_pending(struct kthread_work *work)
-{
-       struct gnss_serial *gnss = container_of(work, struct gnss_serial,
-                                               write_work);
-       struct ice_pf *pf = gnss->back;
-
-       if (!pf)
-               return;
-
-       if (!test_bit(ICE_FLAG_GNSS, pf->flags))
-               return;
-
-       if (!list_empty(&gnss->queue)) {
-               struct gnss_write_buf *write_buf = NULL;
-               unsigned int bytes;
-
-               write_buf = list_first_entry(&gnss->queue,
-                                            struct gnss_write_buf, queue);
-
-               bytes = ice_gnss_do_write(pf, write_buf->buf, write_buf->size);
-               dev_dbg(ice_pf_to_dev(pf), "%u bytes written to GNSS\n", bytes);
-
-               list_del(&write_buf->queue);
-               kfree(write_buf->buf);
-               kfree(write_buf);
-       }
+       return err;
 }
 
 /**
@@ -220,8 +188,6 @@ static struct gnss_serial *ice_gnss_struct_init(struct ice_pf *pf)
        pf->gnss_serial = gnss;
 
        kthread_init_delayed_work(&gnss->read_work, ice_gnss_read);
-       INIT_LIST_HEAD(&gnss->queue);
-       kthread_init_work(&gnss->write_work, ice_gnss_write_pending);
        kworker = kthread_create_worker(0, "ice-gnss-%s", dev_name(dev));
        if (IS_ERR(kworker)) {
                kfree(gnss);
@@ -281,7 +247,6 @@ static void ice_gnss_close(struct gnss_device *gdev)
        if (!gnss)
                return;
 
-       kthread_cancel_work_sync(&gnss->write_work);
        kthread_cancel_delayed_work_sync(&gnss->read_work);
 }
 
@@ -300,10 +265,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
               size_t count)
 {
        struct ice_pf *pf = gnss_get_drvdata(gdev);
-       struct gnss_write_buf *write_buf;
        struct gnss_serial *gnss;
-       unsigned char *cmd_buf;
-       int err = count;
 
        /* We cannot write a single byte using our I2C implementation. */
        if (count <= 1 || count > ICE_GNSS_TTY_WRITE_BUF)
@@ -319,24 +281,7 @@ ice_gnss_write(struct gnss_device *gdev, const unsigned char *buf,
        if (!gnss)
                return -ENODEV;
 
-       cmd_buf = kcalloc(count, sizeof(*buf), GFP_KERNEL);
-       if (!cmd_buf)
-               return -ENOMEM;
-
-       memcpy(cmd_buf, buf, count);
-       write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
-       if (!write_buf) {
-               kfree(cmd_buf);
-               return -ENOMEM;
-       }
-
-       write_buf->buf = cmd_buf;
-       write_buf->size = count;
-       INIT_LIST_HEAD(&write_buf->queue);
-       list_add_tail(&write_buf->queue, &gnss->queue);
-       kthread_queue_work(gnss->kworker, &gnss->write_work);
-
-       return err;
+       return ice_gnss_do_write(pf, buf, count);
 }
 
 static const struct gnss_operations ice_gnss_ops = {
@@ -432,7 +377,6 @@ void ice_gnss_exit(struct ice_pf *pf)
        if (pf->gnss_serial) {
                struct gnss_serial *gnss = pf->gnss_serial;
 
-               kthread_cancel_work_sync(&gnss->write_work);
                kthread_cancel_delayed_work_sync(&gnss->read_work);
                kthread_destroy_worker(gnss->kworker);
                gnss->kworker = NULL;
index b8bb8b6..75e567a 100644 (file)
  */
 #define ICE_GNSS_UBX_WRITE_BYTES       (ICE_MAX_I2C_WRITE_BYTES + 1)
 
-struct gnss_write_buf {
-       struct list_head queue;
-       unsigned int size;
-       unsigned char *buf;
-};
-
 /**
  * struct gnss_serial - data used to initialize GNSS TTY port
  * @back: back pointer to PF
  * @kworker: kwork thread for handling periodic work
  * @read_work: read_work function for handling GNSS reads
- * @write_work: write_work function for handling GNSS writes
- * @queue: write buffers queue
  */
 struct gnss_serial {
        struct ice_pf *back;
        struct kthread_worker *kworker;
        struct kthread_delayed_work read_work;
-       struct kthread_work write_work;
-       struct list_head queue;
 };
 
 #if IS_ENABLED(CONFIG_GNSS)
index 450317d..11ae0e4 100644 (file)
@@ -2745,6 +2745,8 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
                        goto unroll_vector_base;
 
                ice_vsi_map_rings_to_vectors(vsi);
+               vsi->stat_offsets_loaded = false;
+
                if (ice_is_xdp_ena_vsi(vsi)) {
                        ret = ice_vsi_determine_xdp_res(vsi);
                        if (ret)
@@ -2793,6 +2795,9 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
                ret = ice_vsi_alloc_ring_stats(vsi);
                if (ret)
                        goto unroll_vector_base;
+
+               vsi->stat_offsets_loaded = false;
+
                /* Do not exit if configuring RSS had an issue, at least
                 * receive traffic on first queue. Hence no need to capture
                 * return value
index f1dca59..588ad86 100644 (file)
@@ -1171,7 +1171,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
        if (!vf)
                return -EINVAL;
 
-       ret = ice_check_vf_ready_for_cfg(vf);
+       ret = ice_check_vf_ready_for_reset(vf);
        if (ret)
                goto out_put_vf;
 
@@ -1286,7 +1286,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
                goto out_put_vf;
        }
 
-       ret = ice_check_vf_ready_for_cfg(vf);
+       ret = ice_check_vf_ready_for_reset(vf);
        if (ret)
                goto out_put_vf;
 
@@ -1340,7 +1340,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
                return -EOPNOTSUPP;
        }
 
-       ret = ice_check_vf_ready_for_cfg(vf);
+       ret = ice_check_vf_ready_for_reset(vf);
        if (ret)
                goto out_put_vf;
 
@@ -1653,7 +1653,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
        if (!vf)
                return -EINVAL;
 
-       ret = ice_check_vf_ready_for_cfg(vf);
+       ret = ice_check_vf_ready_for_reset(vf);
        if (ret)
                goto out_put_vf;
 
index 4fcf2d0..52d0a12 100644 (file)
@@ -1152,11 +1152,11 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
        unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
        unsigned int offset = rx_ring->rx_offset;
        struct xdp_buff *xdp = &rx_ring->xdp;
+       u32 cached_ntc = rx_ring->first_desc;
        struct ice_tx_ring *xdp_ring = NULL;
        struct bpf_prog *xdp_prog = NULL;
        u32 ntc = rx_ring->next_to_clean;
        u32 cnt = rx_ring->count;
-       u32 cached_ntc = ntc;
        u32 xdp_xmit = 0;
        u32 cached_ntu;
        bool failure;
@@ -1664,8 +1664,7 @@ ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
 
        if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
                td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
-               td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
-                         ICE_TX_FLAGS_VLAN_S;
+               td_tag = first->vid;
        }
 
        dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -1998,7 +1997,7 @@ ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
         * VLAN offloads exclusively so we only care about the VLAN ID here
         */
        if (skb_vlan_tag_present(skb)) {
-               first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
+               first->vid = skb_vlan_tag_get(skb);
                if (tx_ring->flags & ICE_TX_FLAGS_RING_VLAN_L2TAG2)
                        first->tx_flags |= ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN;
                else
@@ -2388,8 +2387,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
                offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
                                        (ICE_TX_CTX_DESC_IL2TAG2 <<
                                        ICE_TXD_CTX_QW1_CMD_S));
-               offload.cd_l2tag2 = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
-                       ICE_TX_FLAGS_VLAN_S;
+               offload.cd_l2tag2 = first->vid;
        }
 
        /* set up TSO offload */
index fff0efe..166413f 100644 (file)
@@ -127,10 +127,6 @@ static inline int ice_skb_pad(void)
 #define ICE_TX_FLAGS_IPV6      BIT(6)
 #define ICE_TX_FLAGS_TUNNEL    BIT(7)
 #define ICE_TX_FLAGS_HW_OUTER_SINGLE_VLAN      BIT(8)
-#define ICE_TX_FLAGS_VLAN_M    0xffff0000
-#define ICE_TX_FLAGS_VLAN_PR_M 0xe0000000
-#define ICE_TX_FLAGS_VLAN_PR_S 29
-#define ICE_TX_FLAGS_VLAN_S    16
 
 #define ICE_XDP_PASS           0
 #define ICE_XDP_CONSUMED       BIT(0)
@@ -182,8 +178,9 @@ struct ice_tx_buf {
                unsigned int gso_segs;
                unsigned int nr_frags;  /* used for mbuf XDP */
        };
-       u32 type:16;                    /* &ice_tx_buf_type */
-       u32 tx_flags:16;
+       u32 tx_flags:12;
+       u32 type:4;                     /* &ice_tx_buf_type */
+       u32 vid:16;
        DEFINE_DMA_UNMAP_LEN(len);
        DEFINE_DMA_UNMAP_ADDR(dma);
 };
index 89fd698..bf74a2f 100644 (file)
@@ -186,6 +186,25 @@ int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
 }
 
 /**
+ * ice_check_vf_ready_for_reset - check if VF is ready to be reset
+ * @vf: VF to check if it's ready to be reset
+ *
+ * The purpose of this function is to ensure that the VF is not in reset,
+ * disabled, and is both initialized and active, thus enabling us to safely
+ * initialize another reset.
+ */
+int ice_check_vf_ready_for_reset(struct ice_vf *vf)
+{
+       int ret;
+
+       ret = ice_check_vf_ready_for_cfg(vf);
+       if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
+               ret = -EAGAIN;
+
+       return ret;
+}
+
+/**
  * ice_trigger_vf_reset - Reset a VF on HW
  * @vf: pointer to the VF structure
  * @is_vflr: true if VFLR was issued, false if not
index e3cda6f..a38ef00 100644 (file)
@@ -215,6 +215,7 @@ u16 ice_get_num_vfs(struct ice_pf *pf);
 struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
 bool ice_is_vf_disabled(struct ice_vf *vf);
 int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+int ice_check_vf_ready_for_reset(struct ice_vf *vf);
 void ice_set_vf_state_dis(struct ice_vf *vf);
 bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf);
 void
index 97243c6..f4a524f 100644 (file)
@@ -3955,6 +3955,7 @@ error_handler:
                ice_vc_notify_vf_link_state(vf);
                break;
        case VIRTCHNL_OP_RESET_VF:
+               clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
                ops->reset_vf(vf);
                break;
        case VIRTCHNL_OP_ADD_ETH_ADDR:
index 205d577..caf91c6 100644 (file)
@@ -426,7 +426,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
 {
        u32 hash_value, hash_mask;
-       u8 bit_shift = 0;
+       u8 bit_shift = 1;
 
        /* Register count multiplied by bits per register */
        hash_mask = (hw->mac.mta_reg_count * 32) - 1;
@@ -434,7 +434,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
        /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
         * where 0xFF would still fall within the hash mask.
         */
-       while (hash_mask >> bit_shift != 0xFF)
+       while (hash_mask >> bit_shift != 0xFF && bit_shift < 4)
                bit_shift++;
 
        /* The portion of the address that is used for the hash table
index 5d83c88..1726297 100644 (file)
@@ -1256,7 +1256,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
        if (!__netif_txq_completed_wake(txq, total_packets, total_bytes,
                                        ixgbe_desc_unused(tx_ring),
                                        TX_WAKE_THRESHOLD,
-                                       netif_carrier_ok(tx_ring->netdev) &&
+                                       !netif_carrier_ok(tx_ring->netdev) ||
                                        test_bit(__IXGBE_DOWN, &adapter->state)))
                ++tx_ring->tx_stats.restart_queue;
 
index 7045fed..7af223b 100644 (file)
@@ -652,9 +652,7 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
                                htons(ext->lso_sb - skb_network_offset(skb));
                } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
                        ext->lso_format = pfvf->hw.lso_tsov6_idx;
-
-                       ipv6_hdr(skb)->payload_len =
-                               htons(ext->lso_sb - skb_network_offset(skb));
+                       ipv6_hdr(skb)->payload_len = htons(tcp_hdrlen(skb));
                } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
                        __be16 l3_proto = vlan_get_protocol(skb);
                        struct udphdr *udph = udp_hdr(skb);
index a75fd07..834c644 100644 (file)
@@ -3269,18 +3269,14 @@ static int mtk_open(struct net_device *dev)
                        eth->dsa_meta[i] = md_dst;
                }
        } else {
-               /* Hardware special tag parsing needs to be disabled if at least
-                * one MAC does not use DSA.
+               /* Hardware DSA untagging and VLAN RX offloading need to be
+                * disabled if at least one MAC does not use DSA.
                 */
                u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
 
                val &= ~MTK_CDMP_STAG_EN;
                mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
 
-               val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
-               val &= ~MTK_CDMQ_STAG_EN;
-               mtk_w32(eth, val, MTK_CDMQ_IG_CTRL);
-
                mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
        }
 
index d53de39..d532883 100644 (file)
@@ -1920,9 +1920,10 @@ static void mlx5_cmd_err_trace(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod
 static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
                           u32 syndrome, int err)
 {
+       const char *namep = mlx5_command_str(opcode);
        struct mlx5_cmd_stats *stats;
 
-       if (!err)
+       if (!err || !(strcmp(namep, "unknown command opcode")))
                return;
 
        stats = &dev->cmd.stats[opcode];
index f404978..7c0f2ad 100644 (file)
@@ -490,7 +490,7 @@ static void poll_trace(struct mlx5_fw_tracer *tracer,
                                (u64)timestamp_low;
                break;
        default:
-               if (tracer_event->event_id >= tracer->str_db.first_string_trace ||
+               if (tracer_event->event_id >= tracer->str_db.first_string_trace &&
                    tracer_event->event_id <= tracer->str_db.first_string_trace +
                                              tracer->str_db.num_string_trace) {
                        tracer_event->type = TRACER_EVENT_TYPE_STRING;
index b8987a4..8e999f2 100644 (file)
@@ -327,6 +327,7 @@ struct mlx5e_params {
        unsigned int sw_mtu;
        int hard_mtu;
        bool ptp_rx;
+       __be32 terminate_lkey_be;
 };
 
 static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params)
index 7ac1ad9..7e8e96c 100644 (file)
@@ -51,7 +51,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
        if (err)
                goto out;
 
-       for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+       for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
                buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]);
                port_buffer->buffer[i].lossy =
                        MLX5_GET(bufferx_reg, buffer, lossy);
@@ -73,14 +73,24 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
                          port_buffer->buffer[i].lossy);
        }
 
-       port_buffer->headroom_size = total_used;
+       port_buffer->internal_buffers_size = 0;
+       for (i = MLX5E_MAX_NETWORK_BUFFER; i < MLX5E_TOTAL_BUFFERS; i++) {
+               buffer = MLX5_ADDR_OF(pbmc_reg, out, buffer[i]);
+               port_buffer->internal_buffers_size +=
+                       MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
+       }
+
        port_buffer->port_buffer_size =
                MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
-       port_buffer->spare_buffer_size =
-               port_buffer->port_buffer_size - total_used;
-
-       mlx5e_dbg(HW, priv, "total buffer size=%d, spare buffer size=%d\n",
-                 port_buffer->port_buffer_size,
+       port_buffer->headroom_size = total_used;
+       port_buffer->spare_buffer_size = port_buffer->port_buffer_size -
+                                        port_buffer->internal_buffers_size -
+                                        port_buffer->headroom_size;
+
+       mlx5e_dbg(HW, priv,
+                 "total buffer size=%u, headroom buffer size=%u, internal buffers size=%u, spare buffer size=%u\n",
+                 port_buffer->port_buffer_size, port_buffer->headroom_size,
+                 port_buffer->internal_buffers_size,
                  port_buffer->spare_buffer_size);
 out:
        kfree(out);
@@ -206,11 +216,11 @@ static int port_update_pool_cfg(struct mlx5_core_dev *mdev,
        if (!MLX5_CAP_GEN(mdev, sbcam_reg))
                return 0;
 
-       for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+       for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
                lossless_buff_count += ((port_buffer->buffer[i].size) &&
                                       (!(port_buffer->buffer[i].lossy)));
 
-       for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+       for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
                p = select_sbcm_params(&port_buffer->buffer[i], lossless_buff_count);
                err = mlx5e_port_set_sbcm(mdev, 0, i,
                                          MLX5_INGRESS_DIR,
@@ -293,7 +303,7 @@ static int port_set_buffer(struct mlx5e_priv *priv,
        if (err)
                goto out;
 
-       for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+       for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
                void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
                u64 size = port_buffer->buffer[i].size;
                u64 xoff = port_buffer->buffer[i].xoff;
@@ -351,7 +361,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
 {
        int i;
 
-       for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+       for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
                if (port_buffer->buffer[i].lossy) {
                        port_buffer->buffer[i].xoff = 0;
                        port_buffer->buffer[i].xon  = 0;
@@ -408,7 +418,7 @@ static int update_buffer_lossy(struct mlx5_core_dev *mdev,
        int err;
        int i;
 
-       for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+       for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
                prio_count = 0;
                lossy_count = 0;
 
@@ -432,11 +442,11 @@ static int update_buffer_lossy(struct mlx5_core_dev *mdev,
        }
 
        if (changed) {
-               err = port_update_pool_cfg(mdev, port_buffer);
+               err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
                if (err)
                        return err;
 
-               err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
+               err = port_update_pool_cfg(mdev, port_buffer);
                if (err)
                        return err;
 
@@ -515,7 +525,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
 
        if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
                update_prio2buffer = true;
-               for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+               for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
                        mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n",
                                  __func__, i, prio2buffer[i]);
 
@@ -530,7 +540,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
        }
 
        if (change & MLX5E_PORT_BUFFER_SIZE) {
-               for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+               for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
                        mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
                        if (!port_buffer.buffer[i].lossy && !buffer_size[i]) {
                                mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n",
@@ -544,7 +554,9 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
 
                mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used);
 
-               if (total_used > port_buffer.port_buffer_size)
+               if (total_used > port_buffer.headroom_size &&
+                   (total_used - port_buffer.headroom_size) >
+                           port_buffer.spare_buffer_size)
                        return -EINVAL;
 
                update_buffer = true;
index a6ef118..f4a19ff 100644 (file)
@@ -35,7 +35,8 @@
 #include "en.h"
 #include "port.h"
 
-#define MLX5E_MAX_BUFFER 8
+#define MLX5E_MAX_NETWORK_BUFFER 8
+#define MLX5E_TOTAL_BUFFERS 10
 #define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */
 
 #define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \
@@ -60,8 +61,9 @@ struct mlx5e_bufferx_reg {
 struct mlx5e_port_buffer {
        u32                       port_buffer_size;
        u32                       spare_buffer_size;
-       u32                       headroom_size;
-       struct mlx5e_bufferx_reg  buffer[MLX5E_MAX_BUFFER];
+       u32                       headroom_size;          /* Buffers 0-7 */
+       u32                       internal_buffers_size;  /* Buffers 8-9 */
+       struct mlx5e_bufferx_reg  buffer[MLX5E_MAX_NETWORK_BUFFER];
 };
 
 int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
index eb5abd0..3cbebfb 100644 (file)
@@ -175,6 +175,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
        /* ensure cq space is freed before enabling more cqes */
        wmb();
 
+       mlx5e_txqsq_wake(&ptpsq->txqsq);
+
        return work_done == budget;
 }
 
index fc923a9..0380a04 100644 (file)
@@ -84,7 +84,7 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
 
 int
 mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
-                       struct flow_action *flow_action,
+                       struct flow_action *flow_action, int from, int to,
                        struct mlx5_flow_attr *attr,
                        enum mlx5_flow_namespace_type ns_type)
 {
@@ -96,6 +96,11 @@ mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
        priv = parse_state->flow->priv;
 
        flow_action_for_each(i, act, flow_action) {
+               if (i < from)
+                       continue;
+               else if (i > to)
+                       break;
+
                tc_act = mlx5e_tc_act_get(act->id, ns_type);
                if (!tc_act || !tc_act->post_parse)
                        continue;
index 0e6e187..d6c12d0 100644 (file)
@@ -112,7 +112,7 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
 
 int
 mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
-                       struct flow_action *flow_action,
+                       struct flow_action *flow_action, int from, int to,
                        struct mlx5_flow_attr *attr,
                        enum mlx5_flow_namespace_type ns_type);
 
index 20c2d2e..f0c3464 100644 (file)
@@ -492,6 +492,19 @@ void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
        mlx5e_encap_dealloc(priv, e);
 }
 
+static void mlx5e_encap_put_locked(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
+{
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+       lockdep_assert_held(&esw->offloads.encap_tbl_lock);
+
+       if (!refcount_dec_and_test(&e->refcnt))
+               return;
+       list_del(&e->route_list);
+       hash_del_rcu(&e->encap_hlist);
+       mlx5e_encap_dealloc(priv, e);
+}
+
 static void mlx5e_decap_put(struct mlx5e_priv *priv, struct mlx5e_decap_entry *d)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
@@ -816,6 +829,8 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
        uintptr_t hash_key;
        int err = 0;
 
+       lockdep_assert_held(&esw->offloads.encap_tbl_lock);
+
        parse_attr = attr->parse_attr;
        tun_info = parse_attr->tun_info[out_index];
        mpls_info = &parse_attr->mpls_info[out_index];
@@ -829,7 +844,6 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
 
        hash_key = hash_encap_info(&key);
 
-       mutex_lock(&esw->offloads.encap_tbl_lock);
        e = mlx5e_encap_get(priv, &key, hash_key);
 
        /* must verify if encap is valid or not */
@@ -840,15 +854,6 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
                        goto out_err;
                }
 
-               mutex_unlock(&esw->offloads.encap_tbl_lock);
-               wait_for_completion(&e->res_ready);
-
-               /* Protect against concurrent neigh update. */
-               mutex_lock(&esw->offloads.encap_tbl_lock);
-               if (e->compl_result < 0) {
-                       err = -EREMOTEIO;
-                       goto out_err;
-               }
                goto attach_flow;
        }
 
@@ -877,15 +882,12 @@ int mlx5e_attach_encap(struct mlx5e_priv *priv,
        INIT_LIST_HEAD(&e->flows);
        hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
        tbl_time_before = mlx5e_route_tbl_get_last_update(priv);
-       mutex_unlock(&esw->offloads.encap_tbl_lock);
 
        if (family == AF_INET)
                err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
        else if (family == AF_INET6)
                err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
 
-       /* Protect against concurrent neigh update. */
-       mutex_lock(&esw->offloads.encap_tbl_lock);
        complete_all(&e->res_ready);
        if (err) {
                e->compl_result = err;
@@ -920,18 +922,15 @@ attach_flow:
        } else {
                flow_flag_set(flow, SLOW);
        }
-       mutex_unlock(&esw->offloads.encap_tbl_lock);
 
        return err;
 
 out_err:
-       mutex_unlock(&esw->offloads.encap_tbl_lock);
        if (e)
-               mlx5e_encap_put(priv, e);
+               mlx5e_encap_put_locked(priv, e);
        return err;
 
 out_err_init:
-       mutex_unlock(&esw->offloads.encap_tbl_lock);
        kfree(tun_info);
        kfree(e);
        return err;
@@ -1016,6 +1015,93 @@ out_err:
        return err;
 }
 
+int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv,
+                                struct mlx5e_tc_flow *flow,
+                                struct mlx5_flow_attr *attr,
+                                struct netlink_ext_ack *extack,
+                                bool *vf_tun)
+{
+       struct mlx5e_tc_flow_parse_attr *parse_attr;
+       struct mlx5_esw_flow_attr *esw_attr;
+       struct net_device *encap_dev = NULL;
+       struct mlx5e_rep_priv *rpriv;
+       struct mlx5e_priv *out_priv;
+       struct mlx5_eswitch *esw;
+       int out_index;
+       int err = 0;
+
+       if (!mlx5e_is_eswitch_flow(flow))
+               return 0;
+
+       parse_attr = attr->parse_attr;
+       esw_attr = attr->esw_attr;
+       *vf_tun = false;
+
+       esw = priv->mdev->priv.eswitch;
+       mutex_lock(&esw->offloads.encap_tbl_lock);
+       for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+               struct net_device *out_dev;
+               int mirred_ifindex;
+
+               if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+                       continue;
+
+               mirred_ifindex = parse_attr->mirred_ifindex[out_index];
+               out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
+               if (!out_dev) {
+                       NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
+                       err = -ENODEV;
+                       goto out;
+               }
+               err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
+                                        extack, &encap_dev);
+               dev_put(out_dev);
+               if (err)
+                       goto out;
+
+               if (esw_attr->dests[out_index].flags &
+                   MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
+                   !esw_attr->dest_int_port)
+                       *vf_tun = true;
+
+               out_priv = netdev_priv(encap_dev);
+               rpriv = out_priv->ppriv;
+               esw_attr->dests[out_index].rep = rpriv->rep;
+               esw_attr->dests[out_index].mdev = out_priv->mdev;
+       }
+
+       if (*vf_tun && esw_attr->out_count > 1) {
+               NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
+               err = -EOPNOTSUPP;
+               goto out;
+       }
+
+out:
+       mutex_unlock(&esw->offloads.encap_tbl_lock);
+       return err;
+}
+
+void mlx5e_tc_tun_encap_dests_unset(struct mlx5e_priv *priv,
+                                   struct mlx5e_tc_flow *flow,
+                                   struct mlx5_flow_attr *attr)
+{
+       struct mlx5_esw_flow_attr *esw_attr;
+       int out_index;
+
+       if (!mlx5e_is_eswitch_flow(flow))
+               return;
+
+       esw_attr = attr->esw_attr;
+
+       for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
+               if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+                       continue;
+
+               mlx5e_detach_encap(flow->priv, flow, attr, out_index);
+               kfree(attr->parse_attr->tun_info[out_index]);
+       }
+}
+
 static int cmp_route_info(struct mlx5e_route_key *a,
                          struct mlx5e_route_key *b)
 {
@@ -1369,11 +1455,13 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
        struct mlx5e_tc_flow *flow;
 
        list_for_each_entry(flow, encap_flows, tmp_list) {
-               struct mlx5_flow_attr *attr = flow->attr;
                struct mlx5_esw_flow_attr *esw_attr;
+               struct mlx5_flow_attr *attr;
 
                if (!mlx5e_is_offloaded_flow(flow))
                        continue;
+
+               attr = mlx5e_tc_get_encap_attr(flow);
                esw_attr = attr->esw_attr;
 
                if (flow_flag_test(flow, SLOW))
index 8ad273d..5d7d676 100644 (file)
@@ -30,6 +30,15 @@ int mlx5e_attach_decap_route(struct mlx5e_priv *priv,
 void mlx5e_detach_decap_route(struct mlx5e_priv *priv,
                              struct mlx5e_tc_flow *flow);
 
+int mlx5e_tc_tun_encap_dests_set(struct mlx5e_priv *priv,
+                                struct mlx5e_tc_flow *flow,
+                                struct mlx5_flow_attr *attr,
+                                struct netlink_ext_ack *extack,
+                                bool *vf_tun);
+void mlx5e_tc_tun_encap_dests_unset(struct mlx5e_priv *priv,
+                                   struct mlx5e_tc_flow *flow,
+                                   struct mlx5_flow_attr *attr);
+
 struct ip_tunnel_info *mlx5e_dup_tun_info(const struct ip_tunnel_info *tun_info);
 
 int mlx5e_tc_set_attr_rx_tun(struct mlx5e_tc_flow *flow,
index 47381e9..879d698 100644 (file)
@@ -193,6 +193,8 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
        return pi;
 }
 
+void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq);
+
 static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
 {
        return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
index 1f90594..41c396e 100644 (file)
@@ -150,10 +150,8 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
 
        inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
        in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in) {
-               err = -ENOMEM;
-               goto out;
-       }
+       if (!in)
+               return -ENOMEM;
 
        if (enable_uc_lb)
                lb_flags = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
@@ -171,14 +169,13 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
                tirn = tir->tirn;
                err = mlx5_core_modify_tir(mdev, tirn, in);
                if (err)
-                       goto out;
+                       break;
        }
+       mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock);
 
-out:
        kvfree(in);
        if (err)
                netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
-       mutex_unlock(&mdev->mlx5e_res.hw_objs.td.list_lock);
 
        return err;
 }
index 89de92d..ebee52a 100644 (file)
@@ -926,9 +926,10 @@ static int mlx5e_dcbnl_getbuffer(struct net_device *dev,
        if (err)
                return err;
 
-       for (i = 0; i < MLX5E_MAX_BUFFER; i++)
+       for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
                dcb_buffer->buffer_size[i] = port_buffer.buffer[i].size;
-       dcb_buffer->total_size = port_buffer.port_buffer_size;
+       dcb_buffer->total_size = port_buffer.port_buffer_size -
+                                port_buffer.internal_buffers_size;
 
        return 0;
 }
@@ -970,7 +971,7 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
        if (err)
                return err;
 
-       for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
+       for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
                if (port_buffer.buffer[i].size != dcb_buffer->buffer_size[i]) {
                        changed |= MLX5E_PORT_BUFFER_SIZE;
                        buffer_size = dcb_buffer->buffer_size;
index 2944691..a7c526e 100644 (file)
@@ -727,26 +727,6 @@ static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
        mlx5e_rq_shampo_hd_free(rq);
 }
 
-static __be32 mlx5e_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev)
-{
-       u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
-       u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
-       int res;
-
-       if (!MLX5_CAP_GEN(dev, terminate_scatter_list_mkey))
-               return MLX5_TERMINATE_SCATTER_LIST_LKEY;
-
-       MLX5_SET(query_special_contexts_in, in, opcode,
-                MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
-       res = mlx5_cmd_exec_inout(dev, query_special_contexts, in, out);
-       if (res)
-               return MLX5_TERMINATE_SCATTER_LIST_LKEY;
-
-       res = MLX5_GET(query_special_contexts_out, out,
-                      terminate_scatter_list_mkey);
-       return cpu_to_be32(res);
-}
-
 static int mlx5e_alloc_rq(struct mlx5e_params *params,
                          struct mlx5e_xsk_param *xsk,
                          struct mlx5e_rq_param *rqp,
@@ -908,7 +888,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
                        /* check if num_frags is not a pow of two */
                        if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
                                wqe->data[f].byte_count = 0;
-                               wqe->data[f].lkey = mlx5e_get_terminate_scatter_list_mkey(mdev);
+                               wqe->data[f].lkey = params->terminate_lkey_be;
                                wqe->data[f].addr = 0;
                        }
                }
@@ -5007,6 +4987,8 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
        /* RQ */
        mlx5e_build_rq_params(mdev, params);
 
+       params->terminate_lkey_be = mlx5_core_get_terminate_scatter_list_mkey(mdev);
+
        params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
 
        /* CQ moderation params */
@@ -5279,12 +5261,16 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
 
        mlx5e_timestamp_init(priv);
 
+       priv->dfs_root = debugfs_create_dir("nic",
+                                           mlx5_debugfs_get_dev_root(mdev));
+
        fs = mlx5e_fs_init(priv->profile, mdev,
                           !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
                           priv->dfs_root);
        if (!fs) {
                err = -ENOMEM;
                mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
+               debugfs_remove_recursive(priv->dfs_root);
                return err;
        }
        priv->fs = fs;
@@ -5305,6 +5291,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
        mlx5e_health_destroy_reporters(priv);
        mlx5e_ktls_cleanup(priv);
        mlx5e_fs_cleanup(priv->fs);
+       debugfs_remove_recursive(priv->dfs_root);
        priv->fs = NULL;
 }
 
@@ -5851,8 +5838,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
 }
 
 static int
-mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
-                           const struct mlx5e_profile *new_profile, void *new_ppriv)
+mlx5e_netdev_init_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
+                         const struct mlx5e_profile *new_profile, void *new_ppriv)
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
        int err;
@@ -5868,6 +5855,25 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
        err = new_profile->init(priv->mdev, priv->netdev);
        if (err)
                goto priv_cleanup;
+
+       return 0;
+
+priv_cleanup:
+       mlx5e_priv_cleanup(priv);
+       return err;
+}
+
+static int
+mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mdev,
+                           const struct mlx5e_profile *new_profile, void *new_ppriv)
+{
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       int err;
+
+       err = mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
+       if (err)
+               return err;
+
        err = mlx5e_attach_netdev(priv);
        if (err)
                goto profile_cleanup;
@@ -5875,7 +5881,6 @@ mlx5e_netdev_attach_profile(struct net_device *netdev, struct mlx5_core_dev *mde
 
 profile_cleanup:
        new_profile->cleanup(priv);
-priv_cleanup:
        mlx5e_priv_cleanup(priv);
        return err;
 }
@@ -5894,6 +5899,12 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
        priv->profile->cleanup(priv);
        mlx5e_priv_cleanup(priv);
 
+       if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+               mlx5e_netdev_init_profile(netdev, mdev, new_profile, new_ppriv);
+               set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+               return -EIO;
+       }
+
        err = mlx5e_netdev_attach_profile(netdev, mdev, new_profile, new_ppriv);
        if (err) { /* roll back to original profile */
                netdev_warn(netdev, "%s: new profile init failed, %d\n", __func__, err);
@@ -5955,8 +5966,11 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
        struct net_device *netdev = priv->netdev;
        struct mlx5_core_dev *mdev = priv->mdev;
 
-       if (!netif_device_present(netdev))
+       if (!netif_device_present(netdev)) {
+               if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
+                       mlx5e_destroy_mdev_resources(mdev);
                return -ENODEV;
+       }
 
        mlx5e_detach_netdev(priv);
        mlx5e_destroy_mdev_resources(mdev);
@@ -6002,9 +6016,6 @@ static int mlx5e_probe(struct auxiliary_device *adev,
        priv->profile = profile;
        priv->ppriv = NULL;
 
-       priv->dfs_root = debugfs_create_dir("nic",
-                                           mlx5_debugfs_get_dev_root(priv->mdev));
-
        err = profile->init(mdev, netdev);
        if (err) {
                mlx5_core_err(mdev, "mlx5e_nic_profile init failed, %d\n", err);
@@ -6033,7 +6044,6 @@ err_resume:
 err_profile_cleanup:
        profile->cleanup(priv);
 err_destroy_netdev:
-       debugfs_remove_recursive(priv->dfs_root);
        mlx5e_destroy_netdev(priv);
 err_devlink_port_unregister:
        mlx5e_devlink_port_unregister(mlx5e_dev);
@@ -6053,7 +6063,6 @@ static void mlx5e_remove(struct auxiliary_device *adev)
        unregister_netdev(priv->netdev);
        mlx5e_suspend(adev, state);
        priv->profile->cleanup(priv);
-       debugfs_remove_recursive(priv->dfs_root);
        mlx5e_destroy_netdev(priv);
        mlx5e_devlink_port_unregister(mlx5e_dev);
        mlx5e_destroy_devlink(mlx5e_dev);
index 1fc386e..3e7041b 100644 (file)
@@ -30,6 +30,7 @@
  * SOFTWARE.
  */
 
+#include <linux/debugfs.h>
 #include <linux/mlx5/fs.h>
 #include <net/switchdev.h>
 #include <net/pkt_cls.h>
@@ -812,11 +813,15 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
 {
        struct mlx5e_priv *priv = netdev_priv(netdev);
 
+       priv->dfs_root = debugfs_create_dir("nic",
+                                           mlx5_debugfs_get_dev_root(mdev));
+
        priv->fs = mlx5e_fs_init(priv->profile, mdev,
                                 !test_bit(MLX5E_STATE_DESTROYING, &priv->state),
                                 priv->dfs_root);
        if (!priv->fs) {
                netdev_err(priv->netdev, "FS allocation failed\n");
+               debugfs_remove_recursive(priv->dfs_root);
                return -ENOMEM;
        }
 
@@ -829,6 +834,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
 {
        mlx5e_fs_cleanup(priv->fs);
+       debugfs_remove_recursive(priv->dfs_root);
        priv->fs = NULL;
 }
 
index 728b82c..8a5a870 100644 (file)
@@ -1665,11 +1665,9 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
 {
        struct mlx5e_priv *out_priv, *route_priv;
-       struct mlx5_devcom *devcom = NULL;
        struct mlx5_core_dev *route_mdev;
        struct mlx5_eswitch *esw;
        u16 vhca_id;
-       int err;
 
        out_priv = netdev_priv(out_dev);
        esw = out_priv->mdev->priv.eswitch;
@@ -1678,6 +1676,9 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
 
        vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
        if (mlx5_lag_is_active(out_priv->mdev)) {
+               struct mlx5_devcom *devcom;
+               int err;
+
                /* In lag case we may get devices from different eswitch instances.
                 * If we failed to get vport num, it means, mostly, that we on the wrong
                 * eswitch.
@@ -1686,101 +1687,16 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
                if (err != -ENOENT)
                        return err;
 
+               rcu_read_lock();
                devcom = out_priv->mdev->priv.devcom;
-               esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
-               if (!esw)
-                       return -ENODEV;
-       }
-
-       err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
-       if (devcom)
-               mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
-       return err;
-}
-
-static int
-set_encap_dests(struct mlx5e_priv *priv,
-               struct mlx5e_tc_flow *flow,
-               struct mlx5_flow_attr *attr,
-               struct netlink_ext_ack *extack,
-               bool *vf_tun)
-{
-       struct mlx5e_tc_flow_parse_attr *parse_attr;
-       struct mlx5_esw_flow_attr *esw_attr;
-       struct net_device *encap_dev = NULL;
-       struct mlx5e_rep_priv *rpriv;
-       struct mlx5e_priv *out_priv;
-       int out_index;
-       int err = 0;
-
-       if (!mlx5e_is_eswitch_flow(flow))
-               return 0;
-
-       parse_attr = attr->parse_attr;
-       esw_attr = attr->esw_attr;
-       *vf_tun = false;
-
-       for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
-               struct net_device *out_dev;
-               int mirred_ifindex;
-
-               if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
-                       continue;
-
-               mirred_ifindex = parse_attr->mirred_ifindex[out_index];
-               out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
-               if (!out_dev) {
-                       NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
-                       err = -ENODEV;
-                       goto out;
-               }
-               err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
-                                        extack, &encap_dev);
-               dev_put(out_dev);
-               if (err)
-                       goto out;
+               esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+               err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV;
+               rcu_read_unlock();
 
-               if (esw_attr->dests[out_index].flags &
-                   MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
-                   !esw_attr->dest_int_port)
-                       *vf_tun = true;
-
-               out_priv = netdev_priv(encap_dev);
-               rpriv = out_priv->ppriv;
-               esw_attr->dests[out_index].rep = rpriv->rep;
-               esw_attr->dests[out_index].mdev = out_priv->mdev;
-       }
-
-       if (*vf_tun && esw_attr->out_count > 1) {
-               NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
-               err = -EOPNOTSUPP;
-               goto out;
+               return err;
        }
 
-out:
-       return err;
-}
-
-static void
-clean_encap_dests(struct mlx5e_priv *priv,
-                 struct mlx5e_tc_flow *flow,
-                 struct mlx5_flow_attr *attr)
-{
-       struct mlx5_esw_flow_attr *esw_attr;
-       int out_index;
-
-       if (!mlx5e_is_eswitch_flow(flow))
-               return;
-
-       esw_attr = attr->esw_attr;
-
-       for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
-               if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
-                       continue;
-
-               mlx5e_detach_encap(priv, flow, attr, out_index);
-               kfree(attr->parse_attr->tun_info[out_index]);
-       }
+       return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
 }
 
 static int
@@ -1819,7 +1735,7 @@ post_process_attr(struct mlx5e_tc_flow *flow,
        if (err)
                goto err_out;
 
-       err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
+       err = mlx5e_tc_tun_encap_dests_set(flow->priv, flow, attr, extack, &vf_tun);
        if (err)
                goto err_out;
 
@@ -3943,8 +3859,8 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
        struct mlx5_flow_attr *prev_attr;
        struct flow_action_entry *act;
        struct mlx5e_tc_act *tc_act;
+       int err, i, i_split = 0;
        bool is_missable;
-       int err, i;
 
        ns_type = mlx5e_get_flow_namespace(flow);
        list_add(&attr->list, &flow->attrs);
@@ -3985,7 +3901,8 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
                    i < flow_action->num_entries - 1)) {
                        is_missable = tc_act->is_missable ? tc_act->is_missable(act) : false;
 
-                       err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+                       err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr,
+                                                     ns_type);
                        if (err)
                                goto out_free_post_acts;
 
@@ -3995,6 +3912,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
                                goto out_free_post_acts;
                        }
 
+                       i_split = i + 1;
                        list_add(&attr->list, &flow->attrs);
                }
 
@@ -4009,7 +3927,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
                }
        }
 
-       err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
+       err = mlx5e_tc_act_post_parse(parse_state, flow_action, i_split, i, attr, ns_type);
        if (err)
                goto out_free_post_acts;
 
@@ -4323,7 +4241,7 @@ mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *a
        if (attr->post_act_handle)
                mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle);
 
-       clean_encap_dests(flow->priv, flow, attr);
+       mlx5e_tc_tun_encap_dests_unset(flow->priv, flow, attr);
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
                mlx5_fc_destroy(counter_dev, attr->counter);
@@ -5301,6 +5219,8 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
                goto err_action_counter;
        }
 
+       mlx5_esw_offloads_devcom_init(esw);
+
        return 0;
 
 err_action_counter:
@@ -5329,7 +5249,7 @@ void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
        priv = netdev_priv(rpriv->netdev);
        esw = priv->mdev->priv.eswitch;
 
-       mlx5e_tc_clean_fdb_peer_flows(esw);
+       mlx5_esw_offloads_devcom_cleanup(esw);
 
        mlx5e_tc_tun_cleanup(uplink_priv->encap);
 
@@ -5643,22 +5563,43 @@ bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
                                   0, NULL);
 }
 
+static struct mapping_ctx *
+mlx5e_get_priv_obj_mapping(struct mlx5e_priv *priv)
+{
+       struct mlx5e_tc_table *tc;
+       struct mlx5_eswitch *esw;
+       struct mapping_ctx *ctx;
+
+       if (is_mdev_switchdev_mode(priv->mdev)) {
+               esw = priv->mdev->priv.eswitch;
+               ctx = esw->offloads.reg_c0_obj_pool;
+       } else {
+               tc = mlx5e_fs_get_tc(priv->fs);
+               ctx = tc->mapping;
+       }
+
+       return ctx;
+}
+
 int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
                                     u64 act_miss_cookie, u32 *act_miss_mapping)
 {
-       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5_mapped_obj mapped_obj = {};
+       struct mlx5_eswitch *esw;
        struct mapping_ctx *ctx;
        int err;
 
-       ctx = esw->offloads.reg_c0_obj_pool;
-
+       ctx = mlx5e_get_priv_obj_mapping(priv);
        mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
        mapped_obj.act_miss_cookie = act_miss_cookie;
        err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
        if (err)
                return err;
 
+       if (!is_mdev_switchdev_mode(priv->mdev))
+               return 0;
+
+       esw = priv->mdev->priv.eswitch;
        attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
        if (IS_ERR(attr->act_id_restore_rule))
                goto err_rule;
@@ -5673,10 +5614,9 @@ err_rule:
 void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
                                      u32 act_miss_mapping)
 {
-       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-       struct mapping_ctx *ctx;
+       struct mapping_ctx *ctx = mlx5e_get_priv_obj_mapping(priv);
 
-       ctx = esw->offloads.reg_c0_obj_pool;
-       mlx5_del_flow_rules(attr->act_id_restore_rule);
+       if (is_mdev_switchdev_mode(priv->mdev))
+               mlx5_del_flow_rules(attr->act_id_restore_rule);
        mapping_remove(ctx, act_miss_mapping);
 }
index df5e780..c7eb6b2 100644 (file)
@@ -762,6 +762,17 @@ static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_t
        }
 }
 
+void mlx5e_txqsq_wake(struct mlx5e_txqsq *sq)
+{
+       if (netif_tx_queue_stopped(sq->txq) &&
+           mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
+           mlx5e_ptpsq_fifo_has_room(sq) &&
+           !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
+               netif_tx_wake_queue(sq->txq);
+               sq->stats->wake++;
+       }
+}
+
 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 {
        struct mlx5e_sq_stats *stats;
@@ -861,13 +872,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
 
        netdev_tx_completed_queue(sq->txq, npkts, nbytes);
 
-       if (netif_tx_queue_stopped(sq->txq) &&
-           mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) &&
-           mlx5e_ptpsq_fifo_has_room(sq) &&
-           !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) {
-               netif_tx_wake_queue(sq->txq);
-               stats->wake++;
-       }
+       mlx5e_txqsq_wake(sq);
 
        return (i == MLX5E_TX_CQ_POLL_BUDGET);
 }
index a50bfda..fbb2d96 100644 (file)
@@ -161,20 +161,22 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
                }
        }
 
+       /* budget=0 means we may be in IRQ context, do as little as possible */
+       if (unlikely(!budget))
+               goto out;
+
        busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq);
 
        if (c->xdp)
                busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq);
 
-       if (likely(budget)) { /* budget=0 means: don't poll rx rings */
-               if (xsk_open)
-                       work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
+       if (xsk_open)
+               work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget);
 
-               if (likely(budget - work_done))
-                       work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
+       if (likely(budget - work_done))
+               work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done);
 
-               busy |= work_done == budget;
-       }
+       busy |= work_done == budget;
 
        mlx5e_poll_ico_cq(&c->icosq.cq);
        if (mlx5e_poll_ico_cq(&c->async_icosq.cq))
index 1c35d72..3db4866 100644 (file)
@@ -824,7 +824,7 @@ static int comp_irqs_request_pci(struct mlx5_core_dev *dev)
        ncomp_eqs = table->num_comp_eqs;
        cpus = kcalloc(ncomp_eqs, sizeof(*cpus), GFP_KERNEL);
        if (!cpus)
-               ret = -ENOMEM;
+               return -ENOMEM;
 
        i = 0;
        rcu_read_lock();
@@ -1104,7 +1104,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev)
        struct mlx5_eq_table *table = dev->priv.eq_table;
 
        mutex_lock(&table->lock); /* sync with create/destroy_async_eq */
-       mlx5_irq_table_destroy(dev);
+       mlx5_irq_table_free_irqs(dev);
        mutex_unlock(&table->lock);
 }
 
index 1a042c9..add6cfa 100644 (file)
@@ -342,6 +342,7 @@ struct mlx5_eswitch {
                u32             large_group_num;
        }  params;
        struct blocking_notifier_head n_head;
+       bool paired[MLX5_MAX_PORTS];
 };
 
 void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -369,6 +370,8 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs);
 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf);
 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw);
 void mlx5_eswitch_disable(struct mlx5_eswitch *esw);
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw);
+void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw);
 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
                               u16 vport, const u8 *mac);
 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
@@ -767,6 +770,8 @@ static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {}
 static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { return 0; }
 static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) {}
 static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw) {}
+static inline void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw) {}
+static inline void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) {}
 static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
 static inline
 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
index 69215ff..8d19c20 100644 (file)
@@ -2742,6 +2742,9 @@ static int mlx5_esw_offloads_devcom_event(int event,
                    mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
                        break;
 
+               if (esw->paired[mlx5_get_dev_index(peer_esw->dev)])
+                       break;
+
                err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
                if (err)
                        goto err_out;
@@ -2753,14 +2756,18 @@ static int mlx5_esw_offloads_devcom_event(int event,
                if (err)
                        goto err_pair;
 
+               esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true;
+               peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true;
                mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
                break;
 
        case ESW_OFFLOADS_DEVCOM_UNPAIR:
-               if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+               if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)])
                        break;
 
                mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
+               esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false;
+               peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false;
                mlx5_esw_offloads_unpair(peer_esw);
                mlx5_esw_offloads_unpair(esw);
                mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
@@ -2779,7 +2786,7 @@ err_out:
        return err;
 }
 
-static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
+void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
 {
        struct mlx5_devcom *devcom = esw->dev->priv.devcom;
 
@@ -2802,7 +2809,7 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
                               ESW_OFFLOADS_DEVCOM_PAIR, esw);
 }
 
-static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
+void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
 {
        struct mlx5_devcom *devcom = esw->dev->priv.devcom;
 
@@ -3250,8 +3257,6 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
        if (err)
                goto err_vports;
 
-       esw_offloads_devcom_init(esw);
-
        return 0;
 
 err_vports:
@@ -3292,7 +3297,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
 
 void esw_offloads_disable(struct mlx5_eswitch *esw)
 {
-       esw_offloads_devcom_cleanup(esw);
        mlx5_eswitch_disable_pf_vf_vports(esw);
        esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
        esw_set_passing_vport_metadata(esw, false);
index adefde3..b7d779d 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/mlx5/vport.h>
 #include "lib/devcom.h"
+#include "mlx5_core.h"
 
 static LIST_HEAD(devcom_list);
 
@@ -13,7 +14,7 @@ static LIST_HEAD(devcom_list);
 
 struct mlx5_devcom_component {
        struct {
-               void *data;
+               void __rcu *data;
        } device[MLX5_DEVCOM_PORTS_SUPPORTED];
 
        mlx5_devcom_event_handler_t handler;
@@ -77,6 +78,7 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
        if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED)
                return NULL;
 
+       mlx5_dev_list_lock();
        sguid0 = mlx5_query_nic_system_image_guid(dev);
        list_for_each_entry(iter, &devcom_list, list) {
                struct mlx5_core_dev *tmp_dev = NULL;
@@ -102,8 +104,10 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
 
        if (!priv) {
                priv = mlx5_devcom_list_alloc();
-               if (!priv)
-                       return ERR_PTR(-ENOMEM);
+               if (!priv) {
+                       devcom = ERR_PTR(-ENOMEM);
+                       goto out;
+               }
 
                idx = 0;
                new_priv = true;
@@ -112,13 +116,16 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
        priv->devs[idx] = dev;
        devcom = mlx5_devcom_alloc(priv, idx);
        if (!devcom) {
-               kfree(priv);
-               return ERR_PTR(-ENOMEM);
+               if (new_priv)
+                       kfree(priv);
+               devcom = ERR_PTR(-ENOMEM);
+               goto out;
        }
 
        if (new_priv)
                list_add(&priv->list, &devcom_list);
-
+out:
+       mlx5_dev_list_unlock();
        return devcom;
 }
 
@@ -131,6 +138,7 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
        if (IS_ERR_OR_NULL(devcom))
                return;
 
+       mlx5_dev_list_lock();
        priv = devcom->priv;
        priv->devs[devcom->idx] = NULL;
 
@@ -141,10 +149,12 @@ void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom)
                        break;
 
        if (i != MLX5_DEVCOM_PORTS_SUPPORTED)
-               return;
+               goto out;
 
        list_del(&priv->list);
        kfree(priv);
+out:
+       mlx5_dev_list_unlock();
 }
 
 void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
@@ -162,7 +172,7 @@ void mlx5_devcom_register_component(struct mlx5_devcom *devcom,
        comp = &devcom->priv->components[id];
        down_write(&comp->sem);
        comp->handler = handler;
-       comp->device[devcom->idx].data = data;
+       rcu_assign_pointer(comp->device[devcom->idx].data, data);
        up_write(&comp->sem);
 }
 
@@ -176,8 +186,9 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
 
        comp = &devcom->priv->components[id];
        down_write(&comp->sem);
-       comp->device[devcom->idx].data = NULL;
+       RCU_INIT_POINTER(comp->device[devcom->idx].data, NULL);
        up_write(&comp->sem);
+       synchronize_rcu();
 }
 
 int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
@@ -193,12 +204,15 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
 
        comp = &devcom->priv->components[id];
        down_write(&comp->sem);
-       for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
-               if (i != devcom->idx && comp->device[i].data) {
-                       err = comp->handler(event, comp->device[i].data,
-                                           event_data);
+       for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
+               void *data = rcu_dereference_protected(comp->device[i].data,
+                                                      lockdep_is_held(&comp->sem));
+
+               if (i != devcom->idx && data) {
+                       err = comp->handler(event, data, event_data);
                        break;
                }
+       }
 
        up_write(&comp->sem);
        return err;
@@ -213,7 +227,7 @@ void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
        comp = &devcom->priv->components[id];
        WARN_ON(!rwsem_is_locked(&comp->sem));
 
-       comp->paired = paired;
+       WRITE_ONCE(comp->paired, paired);
 }
 
 bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
@@ -222,7 +236,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
        if (IS_ERR_OR_NULL(devcom))
                return false;
 
-       return devcom->priv->components[id].paired;
+       return READ_ONCE(devcom->priv->components[id].paired);
 }
 
 void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
@@ -236,7 +250,7 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
 
        comp = &devcom->priv->components[id];
        down_read(&comp->sem);
-       if (!comp->paired) {
+       if (!READ_ONCE(comp->paired)) {
                up_read(&comp->sem);
                return NULL;
        }
@@ -245,7 +259,29 @@ void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
                if (i != devcom->idx)
                        break;
 
-       return comp->device[i].data;
+       return rcu_dereference_protected(comp->device[i].data, lockdep_is_held(&comp->sem));
+}
+
+void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id)
+{
+       struct mlx5_devcom_component *comp;
+       int i;
+
+       if (IS_ERR_OR_NULL(devcom))
+               return NULL;
+
+       for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
+               if (i != devcom->idx)
+                       break;
+
+       comp = &devcom->priv->components[id];
+       /* This can change concurrently, however 'data' pointer will remain
+        * valid for the duration of RCU read section.
+        */
+       if (!READ_ONCE(comp->paired))
+               return NULL;
+
+       return rcu_dereference(comp->device[i].data);
 }
 
 void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
index 94313c1..9a496f4 100644 (file)
@@ -41,6 +41,7 @@ bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
 
 void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
                                enum mlx5_devcom_components id);
+void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id);
 void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
                                   enum mlx5_devcom_components id);
 
index 995eb2d..d6ee016 100644 (file)
@@ -923,7 +923,6 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
        }
 
        mlx5_pci_vsc_init(dev);
-       dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
        return 0;
 
 err_clr_master:
@@ -1049,7 +1048,7 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
 
        dev->dm = mlx5_dm_create(dev);
        if (IS_ERR(dev->dm))
-               mlx5_core_warn(dev, "Failed to init device memory%d\n", err);
+               mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
 
        dev->tracer = mlx5_fw_tracer_create(dev);
        dev->hv_vhca = mlx5_hv_vhca_create(dev);
@@ -1155,6 +1154,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout
                goto err_cmd_cleanup;
        }
 
+       dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
        mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
 
        mlx5_start_health_poll(dev);
@@ -1802,15 +1802,16 @@ static void remove_one(struct pci_dev *pdev)
        struct devlink *devlink = priv_to_devlink(dev);
 
        set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
-       /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain
-        * fw_reset before unregistering the devlink.
+       /* mlx5_drain_fw_reset() and mlx5_drain_health_wq() are using
+        * devlink notify APIs.
+        * Hence, we must drain them before unregistering the devlink.
         */
        mlx5_drain_fw_reset(dev);
+       mlx5_drain_health_wq(dev);
        devlink_unregister(devlink);
        mlx5_sriov_disable(pdev);
        mlx5_thermal_uninit(dev);
        mlx5_crdump_disable(dev);
-       mlx5_drain_health_wq(dev);
        mlx5_uninit_one(dev);
        mlx5_pci_close(dev);
        mlx5_mdev_uninit(dev);
index efd0c29..aa403a5 100644 (file)
@@ -15,6 +15,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev);
 void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
 int mlx5_irq_table_create(struct mlx5_core_dev *dev);
 void mlx5_irq_table_destroy(struct mlx5_core_dev *dev);
+void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev);
 int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table);
 int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table);
 struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev);
index 9d735c3..678f0be 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/kernel.h>
 #include <linux/mlx5/driver.h>
+#include <linux/mlx5/qp.h>
 #include "mlx5_core.h"
 
 int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
@@ -122,3 +123,23 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
        return mlx5_cmd_exec_in(dev, destroy_psv, in);
 }
 EXPORT_SYMBOL(mlx5_core_destroy_psv);
+
+__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev)
+{
+       u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {};
+       u32 mkey;
+
+       if (!MLX5_CAP_GEN(dev, terminate_scatter_list_mkey))
+               return MLX5_TERMINATE_SCATTER_LIST_LKEY;
+
+       MLX5_SET(query_special_contexts_in, in, opcode,
+                MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
+       if (mlx5_cmd_exec_inout(dev, query_special_contexts, in, out))
+               return MLX5_TERMINATE_SCATTER_LIST_LKEY;
+
+       mkey = MLX5_GET(query_special_contexts_out, out,
+                       terminate_scatter_list_mkey);
+       return cpu_to_be32(mkey);
+}
+EXPORT_SYMBOL(mlx5_core_get_terminate_scatter_list_mkey);
index 2245d3b..843da89 100644 (file)
@@ -32,6 +32,7 @@ struct mlx5_irq {
        struct mlx5_irq_pool *pool;
        int refcount;
        struct msi_map map;
+       u32 pool_index;
 };
 
 struct mlx5_irq_table {
@@ -132,7 +133,7 @@ static void irq_release(struct mlx5_irq *irq)
        struct cpu_rmap *rmap;
 #endif
 
-       xa_erase(&pool->irqs, irq->map.index);
+       xa_erase(&pool->irqs, irq->pool_index);
        /* free_irq requires that affinity_hint and rmap will be cleared before
         * calling it. To satisfy this requirement, we call
         * irq_cpu_rmap_remove() to remove the notifier
@@ -140,7 +141,7 @@ static void irq_release(struct mlx5_irq *irq)
        irq_update_affinity_hint(irq->map.virq, NULL);
 #ifdef CONFIG_RFS_ACCEL
        rmap = mlx5_eq_table_get_rmap(pool->dev);
-       if (rmap && irq->map.index)
+       if (rmap)
                irq_cpu_rmap_remove(rmap, irq->map.virq);
 #endif
 
@@ -231,12 +232,13 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
        if (!irq)
                return ERR_PTR(-ENOMEM);
        if (!i || !pci_msix_can_alloc_dyn(dev->pdev)) {
-               /* The vector at index 0 was already allocated.
-                * Just get the irq number. If dynamic irq is not supported
-                * vectors have also been allocated.
+               /* The vector at index 0 is always statically allocated. If
+                * dynamic irq is not supported all vectors are statically
+                * allocated. In both cases just get the irq number and set
+                * the index.
                 */
                irq->map.virq = pci_irq_vector(dev->pdev, i);
-               irq->map.index = 0;
+               irq->map.index = i;
        } else {
                irq->map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, af_desc);
                if (!irq->map.virq) {
@@ -276,11 +278,11 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
        }
        irq->pool = pool;
        irq->refcount = 1;
-       irq->map.index = i;
-       err = xa_err(xa_store(&pool->irqs, irq->map.index, irq, GFP_KERNEL));
+       irq->pool_index = i;
+       err = xa_err(xa_store(&pool->irqs, irq->pool_index, irq, GFP_KERNEL));
        if (err) {
                mlx5_core_err(dev, "Failed to alloc xa entry for irq(%u). err = %d\n",
-                             irq->map.index, err);
+                             irq->pool_index, err);
                goto err_xa;
        }
        return irq;
@@ -567,13 +569,13 @@ int mlx5_irqs_request_vectors(struct mlx5_core_dev *dev, u16 *cpus, int nirqs,
        struct mlx5_irq *irq;
        int i;
 
-       af_desc.is_managed = 1;
+       af_desc.is_managed = false;
        for (i = 0; i < nirqs; i++) {
+               cpumask_clear(&af_desc.mask);
                cpumask_set_cpu(cpus[i], &af_desc.mask);
                irq = mlx5_irq_request(dev, i + 1, &af_desc, rmap);
                if (IS_ERR(irq))
                        break;
-               cpumask_clear(&af_desc.mask);
                irqs[i] = irq;
        }
 
@@ -691,6 +693,24 @@ static void irq_pools_destroy(struct mlx5_irq_table *table)
        irq_pool_free(table->pcif_pool);
 }
 
+static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool)
+{
+       struct mlx5_irq *irq;
+       unsigned long index;
+
+       xa_for_each(&pool->irqs, index, irq)
+               free_irq(irq->map.virq, &irq->nh);
+}
+
+static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table)
+{
+       if (table->sf_ctrl_pool) {
+               mlx5_irq_pool_free_irqs(table->sf_comp_pool);
+               mlx5_irq_pool_free_irqs(table->sf_ctrl_pool);
+       }
+       mlx5_irq_pool_free_irqs(table->pcif_pool);
+}
+
 /* irq_table API */
 
 int mlx5_irq_table_init(struct mlx5_core_dev *dev)
@@ -774,6 +794,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev)
        pci_free_irq_vectors(dev->pdev);
 }
 
+void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev)
+{
+       struct mlx5_irq_table *table = dev->priv.irq_table;
+
+       if (mlx5_core_is_sf(dev))
+               return;
+
+       mlx5_irq_pools_free_irqs(table);
+       pci_free_irq_vectors(dev->pdev);
+}
+
 int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table)
 {
        if (table->sf_comp_pool)
index e2f26d0..0692363 100644 (file)
@@ -63,6 +63,7 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
        struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
        struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
 
+       mlx5_drain_health_wq(sf_dev->mdev);
        devlink_unregister(devlink);
        mlx5_uninit_one(sf_dev->mdev);
        iounmap(sf_dev->mdev->iseg);
index 3835ba3..1aa525e 100644 (file)
@@ -117,6 +117,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
        caps->gvmi              = MLX5_CAP_GEN(mdev, vhca_id);
        caps->flex_protocols    = MLX5_CAP_GEN(mdev, flex_parser_protocols);
        caps->sw_format_ver     = MLX5_CAP_GEN(mdev, steering_format_version);
+       caps->roce_caps.fl_rc_qp_when_roce_disabled =
+               MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled);
 
        if (MLX5_CAP_GEN(mdev, roce)) {
                err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
@@ -124,7 +126,7 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
                        return err;
 
                caps->roce_caps.roce_en = roce_en;
-               caps->roce_caps.fl_rc_qp_when_roce_disabled =
+               caps->roce_caps.fl_rc_qp_when_roce_disabled |=
                        MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
                caps->roce_caps.fl_rc_qp_when_roce_enabled =
                        MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
index 13e06a6..d6947fe 100644 (file)
@@ -213,6 +213,8 @@ struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn)
        }
 
        INIT_LIST_HEAD(&mgr->ptrn_list);
+       mutex_init(&mgr->modify_hdr_mutex);
+
        return mgr;
 
 free_mgr:
@@ -237,5 +239,6 @@ void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr)
        }
 
        mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool);
+       mutex_destroy(&mgr->modify_hdr_mutex);
        kfree(mgr);
 }
index 9413aaf..e94fbb0 100644 (file)
@@ -15,7 +15,8 @@ static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
 {
        u32 crc = crc32(0, input_data, length);
 
-       return (__force u32)htonl(crc);
+       return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
+                           ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
 }
 
 bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
index afa3b92..0d5a41a 100644 (file)
@@ -245,12 +245,6 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
 
                skb = priv->rx_skb[rx_pi_rem];
 
-               skb_put(skb, datalen);
-
-               skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
-
-               skb->protocol = eth_type_trans(skb, netdev);
-
                /* Alloc another RX SKB for this same index */
                rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
                                              &rx_buf_dma, DMA_FROM_DEVICE);
@@ -259,6 +253,13 @@ static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
                priv->rx_skb[rx_pi_rem] = rx_skb;
                dma_unmap_single(priv->dev, *rx_wqe_addr,
                                 MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
+
+               skb_put(skb, datalen);
+
+               skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
+
+               skb->protocol = eth_type_trans(skb, netdev);
+
                *rx_wqe_addr = rx_buf_dma;
        } else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
                priv->stats.rx_mac_errors++;
index 2b6e046..ee26986 100644 (file)
@@ -1039,6 +1039,16 @@ static int lan966x_reset_switch(struct lan966x *lan966x)
 
        reset_control_reset(switch_reset);
 
+       /* Don't reinitialize the switch core, if it is already initialized. In
+        * case it is initialized twice, some pointers inside the queue system
+        * in HW will get corrupted and then after a while the queue system gets
+        * full and no traffic is passing through the switch. The issue is seen
+        * when loading and unloading the driver and sending traffic through the
+        * switch.
+        */
+       if (lan_rd(lan966x, SYS_RESET_CFG) & SYS_RESET_CFG_CORE_ENA)
+               return 0;
+
        lan_wr(SYS_RESET_CFG_CORE_ENA_SET(0), lan966x, SYS_RESET_CFG);
        lan_wr(SYS_RAM_INIT_RAM_INIT_SET(1), lan966x, SYS_RAM_INIT);
        ret = readx_poll_timeout(lan966x_ram_init, lan966x,
index 06d6292..d907727 100644 (file)
@@ -1279,8 +1279,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
        if (comp_read < 1)
                return;
 
-       apc->eth_stats.tx_cqes = comp_read;
-
        for (i = 0; i < comp_read; i++) {
                struct mana_tx_comp_oob *cqe_oob;
 
@@ -1363,8 +1361,6 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
                WARN_ON_ONCE(1);
 
        cq->work_done = pkt_transmitted;
-
-       apc->eth_stats.tx_cqes -= pkt_transmitted;
 }
 
 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
@@ -1626,15 +1622,11 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
 {
        struct gdma_comp *comp = cq->gdma_comp_buf;
        struct mana_rxq *rxq = cq->rxq;
-       struct mana_port_context *apc;
        int comp_read, i;
 
-       apc = netdev_priv(rxq->ndev);
-
        comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
        WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
 
-       apc->eth_stats.rx_cqes = comp_read;
        rxq->xdp_flush = false;
 
        for (i = 0; i < comp_read; i++) {
@@ -1646,8 +1638,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
                        return;
 
                mana_process_rx_cqe(rxq, cq, &comp[i]);
-
-               apc->eth_stats.rx_cqes--;
        }
 
        if (rxq->xdp_flush)
index a64c814..0dc7867 100644 (file)
@@ -13,11 +13,9 @@ static const struct {
 } mana_eth_stats[] = {
        {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
        {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
-       {"tx_cqes", offsetof(struct mana_ethtool_stats, tx_cqes)},
        {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)},
        {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
                                        tx_cqe_unknown_type)},
-       {"rx_cqes", offsetof(struct mana_ethtool_stats, rx_cqes)},
        {"rx_coalesced_err", offsetof(struct mana_ethtool_stats,
                                        rx_coalesced_err)},
        {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats,
index 094374d..38b8b10 100644 (file)
@@ -8,7 +8,7 @@
 
 #ifdef CONFIG_DCB
 /* DCB feature definitions */
-#define NFP_NET_MAX_DSCP       4
+#define NFP_NET_MAX_DSCP       64
 #define NFP_NET_MAX_TC         IEEE_8021QAZ_MAX_TCS
 #define NFP_NET_MAX_PRIO       8
 #define NFP_DCB_CFG_STRIDE     256
index 0605d1e..7a549b8 100644 (file)
@@ -6138,6 +6138,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
        return 0;
 
 out_error:
+       nv_mgmt_release_sema(dev);
        if (phystate_orig)
                writel(phystate|NVREG_ADAPTCTL_RUNNING, base + NvRegAdapterControl);
 out_freering:
index 2edd6bf..7776d3b 100644 (file)
@@ -1903,7 +1903,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
 {
        u32 i;
 
-       if (!cdev) {
+       if (!cdev || cdev->recov_in_prog) {
                memset(stats, 0, sizeof(*stats));
                return;
        }
index f9931ec..4d83cee 100644 (file)
@@ -269,6 +269,10 @@ struct qede_dev {
 #define QEDE_ERR_WARN                  3
 
        struct qede_dump_info           dump_info;
+       struct delayed_work             periodic_task;
+       unsigned long                   stats_coal_ticks;
+       u32                             stats_coal_usecs;
+       spinlock_t                      stats_lock; /* lock for vport stats access */
 };
 
 enum QEDE_STATE {
index 374a86b..95820cf 100644 (file)
@@ -429,6 +429,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
                }
        }
 
+       spin_lock(&edev->stats_lock);
+
        for (i = 0; i < QEDE_NUM_STATS; i++) {
                if (qede_is_irrelevant_stat(edev, i))
                        continue;
@@ -438,6 +440,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
                buf++;
        }
 
+       spin_unlock(&edev->stats_lock);
+
        __qede_unlock(edev);
 }
 
@@ -829,6 +833,7 @@ out:
 
        coal->rx_coalesce_usecs = rx_coal;
        coal->tx_coalesce_usecs = tx_coal;
+       coal->stats_block_coalesce_usecs = edev->stats_coal_usecs;
 
        return rc;
 }
@@ -842,6 +847,19 @@ int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal,
        int i, rc = 0;
        u16 rxc, txc;
 
+       if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) {
+               edev->stats_coal_usecs = coal->stats_block_coalesce_usecs;
+               if (edev->stats_coal_usecs) {
+                       edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs);
+                       schedule_delayed_work(&edev->periodic_task, 0);
+
+                       DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n",
+                               edev->stats_coal_ticks);
+               } else {
+                       cancel_delayed_work_sync(&edev->periodic_task);
+               }
+       }
+
        if (!netif_running(dev)) {
                DP_INFO(edev, "Interface is down\n");
                return -EINVAL;
@@ -2252,7 +2270,8 @@ out:
 }
 
 static const struct ethtool_ops qede_ethtool_ops = {
-       .supported_coalesce_params      = ETHTOOL_COALESCE_USECS,
+       .supported_coalesce_params      = ETHTOOL_COALESCE_USECS |
+                                         ETHTOOL_COALESCE_STATS_BLOCK_USECS,
        .get_link_ksettings             = qede_get_link_ksettings,
        .set_link_ksettings             = qede_set_link_ksettings,
        .get_drvinfo                    = qede_get_drvinfo,
@@ -2303,7 +2322,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
 };
 
 static const struct ethtool_ops qede_vf_ethtool_ops = {
-       .supported_coalesce_params      = ETHTOOL_COALESCE_USECS,
+       .supported_coalesce_params      = ETHTOOL_COALESCE_USECS |
+                                         ETHTOOL_COALESCE_STATS_BLOCK_USECS,
        .get_link_ksettings             = qede_get_link_ksettings,
        .get_drvinfo                    = qede_get_drvinfo,
        .get_msglevel                   = qede_get_msglevel,
index 4c6c685..4b004a7 100644 (file)
@@ -307,6 +307,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
 
        edev->ops->get_vport_stats(edev->cdev, &stats);
 
+       spin_lock(&edev->stats_lock);
+
        p_common->no_buff_discards = stats.common.no_buff_discards;
        p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
        p_common->ttl0_discard = stats.common.ttl0_discard;
@@ -404,6 +406,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
                p_ah->tx_1519_to_max_byte_packets =
                    stats.ah.tx_1519_to_max_byte_packets;
        }
+
+       spin_unlock(&edev->stats_lock);
 }
 
 static void qede_get_stats64(struct net_device *dev,
@@ -412,9 +416,10 @@ static void qede_get_stats64(struct net_device *dev,
        struct qede_dev *edev = netdev_priv(dev);
        struct qede_stats_common *p_common;
 
-       qede_fill_by_demand_stats(edev);
        p_common = &edev->stats.common;
 
+       spin_lock(&edev->stats_lock);
+
        stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
                            p_common->rx_bcast_pkts;
        stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
@@ -434,6 +439,8 @@ static void qede_get_stats64(struct net_device *dev,
                stats->collisions = edev->stats.bb.tx_total_collisions;
        stats->rx_crc_errors = p_common->rx_crc_errors;
        stats->rx_frame_errors = p_common->rx_align_errors;
+
+       spin_unlock(&edev->stats_lock);
 }
 
 #ifdef CONFIG_QED_SRIOV
@@ -1063,6 +1070,23 @@ static void qede_unlock(struct qede_dev *edev)
        rtnl_unlock();
 }
 
+static void qede_periodic_task(struct work_struct *work)
+{
+       struct qede_dev *edev = container_of(work, struct qede_dev,
+                                            periodic_task.work);
+
+       qede_fill_by_demand_stats(edev);
+       schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
+}
+
+static void qede_init_periodic_task(struct qede_dev *edev)
+{
+       INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
+       spin_lock_init(&edev->stats_lock);
+       edev->stats_coal_usecs = USEC_PER_SEC;
+       edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
+}
+
 static void qede_sp_task(struct work_struct *work)
 {
        struct qede_dev *edev = container_of(work, struct qede_dev,
@@ -1082,6 +1106,7 @@ static void qede_sp_task(struct work_struct *work)
         */
 
        if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
+               cancel_delayed_work_sync(&edev->periodic_task);
 #ifdef CONFIG_QED_SRIOV
                /* SRIOV must be disabled outside the lock to avoid a deadlock.
                 * The recovery of the active VFs is currently not supported.
@@ -1272,6 +1297,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
                 */
                INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
                mutex_init(&edev->qede_lock);
+               qede_init_periodic_task(edev);
 
                rc = register_netdev(edev->ndev);
                if (rc) {
@@ -1296,6 +1322,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
        edev->rx_copybreak = QEDE_RX_HDR_SIZE;
 
        qede_log_probe(edev);
+
+       /* retain user config (for example - after recovery) */
+       if (edev->stats_coal_usecs)
+               schedule_delayed_work(&edev->periodic_task, 0);
+
        return 0;
 
 err4:
@@ -1364,6 +1395,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
                unregister_netdev(ndev);
 
                cancel_delayed_work_sync(&edev->sp_task);
+               cancel_delayed_work_sync(&edev->periodic_task);
 
                edev->ops->common->set_power_state(cdev, PCI_D0);
 
index a7e376e..4b19803 100644 (file)
@@ -616,10 +616,10 @@ struct rtl8169_private {
                struct work_struct work;
        } wk;
 
-       spinlock_t config25_lock;
-       spinlock_t mac_ocp_lock;
+       raw_spinlock_t config25_lock;
+       raw_spinlock_t mac_ocp_lock;
 
-       spinlock_t cfg9346_usage_lock;
+       raw_spinlock_t cfg9346_usage_lock;
        int cfg9346_usage_count;
 
        unsigned supports_gmii:1;
@@ -671,20 +671,20 @@ static void rtl_lock_config_regs(struct rtl8169_private *tp)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
+       raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
        if (!--tp->cfg9346_usage_count)
                RTL_W8(tp, Cfg9346, Cfg9346_Lock);
-       spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
 }
 
 static void rtl_unlock_config_regs(struct rtl8169_private *tp)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
+       raw_spin_lock_irqsave(&tp->cfg9346_usage_lock, flags);
        if (!tp->cfg9346_usage_count++)
                RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
-       spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->cfg9346_usage_lock, flags);
 }
 
 static void rtl_pci_commit(struct rtl8169_private *tp)
@@ -698,10 +698,10 @@ static void rtl_mod_config2(struct rtl8169_private *tp, u8 clear, u8 set)
        unsigned long flags;
        u8 val;
 
-       spin_lock_irqsave(&tp->config25_lock, flags);
+       raw_spin_lock_irqsave(&tp->config25_lock, flags);
        val = RTL_R8(tp, Config2);
        RTL_W8(tp, Config2, (val & ~clear) | set);
-       spin_unlock_irqrestore(&tp->config25_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
 }
 
 static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
@@ -709,10 +709,10 @@ static void rtl_mod_config5(struct rtl8169_private *tp, u8 clear, u8 set)
        unsigned long flags;
        u8 val;
 
-       spin_lock_irqsave(&tp->config25_lock, flags);
+       raw_spin_lock_irqsave(&tp->config25_lock, flags);
        val = RTL_R8(tp, Config5);
        RTL_W8(tp, Config5, (val & ~clear) | set);
-       spin_unlock_irqrestore(&tp->config25_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
 }
 
 static bool rtl_is_8125(struct rtl8169_private *tp)
@@ -899,9 +899,9 @@ static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
 {
        unsigned long flags;
 
-       spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+       raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
        __r8168_mac_ocp_write(tp, reg, data);
-       spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
 }
 
 static u16 __r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
@@ -919,9 +919,9 @@ static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
        unsigned long flags;
        u16 val;
 
-       spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+       raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
        val = __r8168_mac_ocp_read(tp, reg);
-       spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
 
        return val;
 }
@@ -932,10 +932,10 @@ static void r8168_mac_ocp_modify(struct rtl8169_private *tp, u32 reg, u16 mask,
        unsigned long flags;
        u16 data;
 
-       spin_lock_irqsave(&tp->mac_ocp_lock, flags);
+       raw_spin_lock_irqsave(&tp->mac_ocp_lock, flags);
        data = __r8168_mac_ocp_read(tp, reg);
        __r8168_mac_ocp_write(tp, reg, (data & ~mask) | set);
-       spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->mac_ocp_lock, flags);
 }
 
 /* Work around a hw issue with RTL8168g PHY, the quirk disables
@@ -1420,14 +1420,14 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
                        r8168_mac_ocp_modify(tp, 0xc0b6, BIT(0), 0);
        }
 
-       spin_lock_irqsave(&tp->config25_lock, flags);
+       raw_spin_lock_irqsave(&tp->config25_lock, flags);
        for (i = 0; i < tmp; i++) {
                options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
                if (wolopts & cfg[i].opt)
                        options |= cfg[i].mask;
                RTL_W8(tp, cfg[i].reg, options);
        }
-       spin_unlock_irqrestore(&tp->config25_lock, flags);
+       raw_spin_unlock_irqrestore(&tp->config25_lock, flags);
 
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
@@ -5179,9 +5179,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        tp->eee_adv = -1;
        tp->ocp_base = OCP_STD_PHY_BASE;
 
-       spin_lock_init(&tp->cfg9346_usage_lock);
-       spin_lock_init(&tp->config25_lock);
-       spin_lock_init(&tp->mac_ocp_lock);
+       raw_spin_lock_init(&tp->cfg9346_usage_lock);
+       raw_spin_lock_init(&tp->config25_lock);
+       raw_spin_lock_init(&tp->mac_ocp_lock);
 
        dev->tstats = devm_netdev_alloc_pcpu_stats(&pdev->dev,
                                                   struct pcpu_sw_netstats);
index 29afadd..aace871 100644 (file)
@@ -1485,7 +1485,7 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
 
        if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
                netif_stop_subqueue(ndev, 0);
-               return ret;
+               return NETDEV_TX_BUSY;
        }
 
        if (skb_put_padto(skb, ETH_ZLEN))
index d916877..be395cd 100644 (file)
@@ -378,7 +378,9 @@ int ef100_probe_netdev(struct efx_probe_data *probe_data)
        efx->net_dev = net_dev;
        SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
 
-       net_dev->features |= efx->type->offload_features;
+       /* enable all supported features except rx-fcs and rx-all */
+       net_dev->features |= efx->type->offload_features &
+                            ~(NETIF_F_RXFCS | NETIF_F_RXALL);
        net_dev->hw_features |= efx->type->offload_features;
        net_dev->hw_enc_features |= efx->type->offload_features;
        net_dev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_SG |
index 381b805..ef9971c 100644 (file)
@@ -171,9 +171,14 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx,
 
        rc = efx_mcdi_nvram_metadata(efx, partition_type, NULL, version, NULL,
                                     0);
+
+       /* If the partition does not exist, that is not an error. */
+       if (rc == -ENOENT)
+               return 0;
+
        if (rc) {
-               netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed\n",
-                         version_name);
+               netif_err(efx, drv, efx->net_dev, "mcdi nvram %s: failed (rc=%d)\n",
+                         version_name, rc);
                return rc;
        }
 
@@ -187,36 +192,33 @@ static int efx_devlink_info_nvram_partition(struct efx_nic *efx,
 static int efx_devlink_info_stored_versions(struct efx_nic *efx,
                                            struct devlink_info_req *req)
 {
-       int rc;
-
-       rc = efx_devlink_info_nvram_partition(efx, req,
-                                             NVRAM_PARTITION_TYPE_BUNDLE,
-                                             DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID);
-       if (rc)
-               return rc;
-
-       rc = efx_devlink_info_nvram_partition(efx, req,
-                                             NVRAM_PARTITION_TYPE_MC_FIRMWARE,
-                                             DEVLINK_INFO_VERSION_GENERIC_FW_MGMT);
-       if (rc)
-               return rc;
-
-       rc = efx_devlink_info_nvram_partition(efx, req,
-                                             NVRAM_PARTITION_TYPE_SUC_FIRMWARE,
-                                             EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC);
-       if (rc)
-               return rc;
-
-       rc = efx_devlink_info_nvram_partition(efx, req,
-                                             NVRAM_PARTITION_TYPE_EXPANSION_ROM,
-                                             EFX_DEVLINK_INFO_VERSION_FW_EXPROM);
-       if (rc)
-               return rc;
+       int err;
 
-       rc = efx_devlink_info_nvram_partition(efx, req,
-                                             NVRAM_PARTITION_TYPE_EXPANSION_UEFI,
-                                             EFX_DEVLINK_INFO_VERSION_FW_UEFI);
-       return rc;
+       /* We do not care here about the specific error but just if an error
+        * happened. The specific error will be reported inside the call
+        * through system messages, and if any error happened in any call
+        * below, we report it through extack.
+        */
+       err = efx_devlink_info_nvram_partition(efx, req,
+                                              NVRAM_PARTITION_TYPE_BUNDLE,
+                                              DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID);
+
+       err |= efx_devlink_info_nvram_partition(efx, req,
+                                               NVRAM_PARTITION_TYPE_MC_FIRMWARE,
+                                               DEVLINK_INFO_VERSION_GENERIC_FW_MGMT);
+
+       err |= efx_devlink_info_nvram_partition(efx, req,
+                                               NVRAM_PARTITION_TYPE_SUC_FIRMWARE,
+                                               EFX_DEVLINK_INFO_VERSION_FW_MGMT_SUC);
+
+       err |= efx_devlink_info_nvram_partition(efx, req,
+                                               NVRAM_PARTITION_TYPE_EXPANSION_ROM,
+                                               EFX_DEVLINK_INFO_VERSION_FW_EXPROM);
+
+       err |= efx_devlink_info_nvram_partition(efx, req,
+                                               NVRAM_PARTITION_TYPE_EXPANSION_UEFI,
+                                               EFX_DEVLINK_INFO_VERSION_FW_UEFI);
+       return err;
 }
 
 #define EFX_VER_FLAG(_f)       \
@@ -587,27 +589,20 @@ static int efx_devlink_info_get(struct devlink *devlink,
 {
        struct efx_devlink *devlink_private = devlink_priv(devlink);
        struct efx_nic *efx = devlink_private->efx;
-       int rc;
+       int err;
 
-       /* Several different MCDI commands are used. We report first error
-        * through extack returning at that point. Specific error
-        * information via system messages.
+       /* Several different MCDI commands are used. We report if errors
+        * happened through extack. Specific error information via system
+        * messages inside the calls.
         */
-       rc = efx_devlink_info_board_cfg(efx, req);
-       if (rc) {
-               NL_SET_ERR_MSG_MOD(extack, "Getting board info failed");
-               return rc;
-       }
-       rc = efx_devlink_info_stored_versions(efx, req);
-       if (rc) {
-               NL_SET_ERR_MSG_MOD(extack, "Getting stored versions failed");
-               return rc;
-       }
-       rc = efx_devlink_info_running_versions(efx, req);
-       if (rc) {
-               NL_SET_ERR_MSG_MOD(extack, "Getting running versions failed");
-               return rc;
-       }
+       err = efx_devlink_info_board_cfg(efx, req);
+
+       err |= efx_devlink_info_stored_versions(efx, req);
+
+       err |= efx_devlink_info_running_versions(efx, req);
+
+       if (err)
+               NL_SET_ERR_MSG_MOD(extack, "Errors when getting device info. Check system messages");
 
        return 0;
 }
index 0327639..c004443 100644 (file)
@@ -624,13 +624,12 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
        if (!found) { /* We don't care. */
                netif_dbg(efx, drv, efx->net_dev,
                          "Ignoring foreign filter that doesn't egdev us\n");
-               rc = -EOPNOTSUPP;
-               goto release;
+               return -EOPNOTSUPP;
        }
 
        rc = efx_mae_match_check_caps(efx, &match.mask, NULL);
        if (rc)
-               goto release;
+               return rc;
 
        if (efx_tc_match_is_encap(&match.mask)) {
                enum efx_encap_type type;
@@ -639,8 +638,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
                if (type == EFX_ENCAP_TYPE_NONE) {
                        NL_SET_ERR_MSG_MOD(extack,
                                           "Egress encap match on unsupported tunnel device");
-                       rc = -EOPNOTSUPP;
-                       goto release;
+                       return -EOPNOTSUPP;
                }
 
                rc = efx_mae_check_encap_type_supported(efx, type);
@@ -648,25 +646,24 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
                        NL_SET_ERR_MSG_FMT_MOD(extack,
                                               "Firmware reports no support for %s encap match",
                                               efx_tc_encap_type_name(type));
-                       goto release;
+                       return rc;
                }
 
                rc = efx_tc_flower_record_encap_match(efx, &match, type,
                                                      extack);
                if (rc)
-                       goto release;
+                       return rc;
        } else {
                /* This is not a tunnel decap rule, ignore it */
                netif_dbg(efx, drv, efx->net_dev,
                          "Ignoring foreign filter without encap match\n");
-               rc = -EOPNOTSUPP;
-               goto release;
+               return -EOPNOTSUPP;
        }
 
        rule = kzalloc(sizeof(*rule), GFP_USER);
        if (!rule) {
                rc = -ENOMEM;
-               goto release;
+               goto out_free;
        }
        INIT_LIST_HEAD(&rule->acts.list);
        rule->cookie = tc->cookie;
@@ -678,7 +675,7 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
                          "Ignoring already-offloaded rule (cookie %lx)\n",
                          tc->cookie);
                rc = -EEXIST;
-               goto release;
+               goto out_free;
        }
 
        act = kzalloc(sizeof(*act), GFP_USER);
@@ -843,6 +840,7 @@ release:
                                       efx_tc_match_action_ht_params);
                efx_tc_free_action_set_list(efx, &rule->acts, false);
        }
+out_free:
        kfree(rule);
        if (match.encap)
                efx_tc_flower_release_encap_match(efx, match.encap);
@@ -899,8 +897,7 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
                return rc;
        if (efx_tc_match_is_encap(&match.mask)) {
                NL_SET_ERR_MSG_MOD(extack, "Ingress enc_key matches not supported");
-               rc = -EOPNOTSUPP;
-               goto release;
+               return -EOPNOTSUPP;
        }
 
        if (tc->common.chain_index) {
@@ -924,9 +921,9 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
        if (old) {
                netif_dbg(efx, drv, efx->net_dev,
                          "Already offloaded rule (cookie %lx)\n", tc->cookie);
-               rc = -EEXIST;
                NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
-               goto release;
+               kfree(rule);
+               return -EEXIST;
        }
 
        /* Parse actions */
index 16a8c36..f07905f 100644 (file)
@@ -644,7 +644,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
        plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
        plat_dat->dump_debug_regs = rgmii_dump;
        plat_dat->has_gmac4 = 1;
-       plat_dat->dwmac4_addrs = &data->dwmac4_addrs;
+       if (ethqos->has_emac3)
+               plat_dat->dwmac4_addrs = &data->dwmac4_addrs;
        plat_dat->pmt = 1;
        plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
        if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
index 0fca815..52cab9d 100644 (file)
@@ -7233,8 +7233,7 @@ int stmmac_dvr_probe(struct device *device,
        ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
                            NETIF_F_RXCSUM;
        ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
-                            NETDEV_XDP_ACT_XSK_ZEROCOPY |
-                            NETDEV_XDP_ACT_NDO_XMIT;
+                            NETDEV_XDP_ACT_XSK_ZEROCOPY;
 
        ret = stmmac_tc_init(priv, priv);
        if (!ret) {
index 9d4d8c3..aa6f16d 100644 (file)
@@ -117,6 +117,9 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
                return -EOPNOTSUPP;
        }
 
+       if (!prog)
+               xdp_features_clear_redirect_target(dev);
+
        need_update = !!priv->xdp_prog != !!prog;
        if (if_running && need_update)
                stmmac_xdp_release(dev);
@@ -131,5 +134,8 @@ int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
        if (if_running && need_update)
                stmmac_xdp_open(dev);
 
+       if (prog)
+               xdp_features_set_redirect_target(dev, false);
+
        return 0;
 }
index 4ef05ba..d61dfa2 100644 (file)
@@ -5077,6 +5077,8 @@ err_out_iounmap:
                cas_shutdown(cp);
        mutex_unlock(&cp->pm_mutex);
 
+       vfree(cp->fw_data);
+
        pci_iounmap(pdev, cp->regs);
 
 
index 2ee80ed..afa1d56 100644 (file)
@@ -119,7 +119,7 @@ enum ipa_status_field_id {
 };
 
 /* Size in bytes of an IPA packet status structure */
-#define IPA_STATUS_SIZE                        sizeof(__le32[4])
+#define IPA_STATUS_SIZE                        sizeof(__le32[8])
 
 /* IPA status structure decoder; looks up field values for a structure */
 static u32 ipa_status_extract(struct ipa *ipa, const void *data,
index 1e0c206..da2001e 100644 (file)
@@ -291,7 +291,8 @@ static int i2c_rollball_mii_cmd(struct mii_bus *bus, int bus_addr, u8 cmd,
        return i2c_transfer_rollball(i2c, msgs, ARRAY_SIZE(msgs));
 }
 
-static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
+static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int devad,
+                                int reg)
 {
        u8 buf[4], res[6];
        int bus_addr, ret;
@@ -302,7 +303,7 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
                return 0xffff;
 
        buf[0] = ROLLBALL_DATA_ADDR;
-       buf[1] = (reg >> 16) & 0x1f;
+       buf[1] = devad;
        buf[2] = (reg >> 8) & 0xff;
        buf[3] = reg & 0xff;
 
@@ -322,8 +323,8 @@ static int i2c_mii_read_rollball(struct mii_bus *bus, int phy_id, int reg)
        return val;
 }
 
-static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg,
-                                 u16 val)
+static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int devad,
+                                 int reg, u16 val)
 {
        int bus_addr, ret;
        u8 buf[6];
@@ -333,7 +334,7 @@ static int i2c_mii_write_rollball(struct mii_bus *bus, int phy_id, int reg,
                return 0;
 
        buf[0] = ROLLBALL_DATA_ADDR;
-       buf[1] = (reg >> 16) & 0x1f;
+       buf[1] = devad;
        buf[2] = (reg >> 8) & 0xff;
        buf[3] = reg & 0xff;
        buf[4] = val >> 8;
@@ -405,8 +406,8 @@ struct mii_bus *mdio_i2c_alloc(struct device *parent, struct i2c_adapter *i2c,
                        return ERR_PTR(ret);
                }
 
-               mii->read = i2c_mii_read_rollball;
-               mii->write = i2c_mii_write_rollball;
+               mii->read_c45 = i2c_mii_read_rollball;
+               mii->write_c45 = i2c_mii_write_rollball;
                break;
        default:
                mii->read = i2c_mii_read_default_c22;
index f19d48c..72f25e7 100644 (file)
@@ -873,7 +873,7 @@ int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
 
        switch (compat->an_mode) {
        case DW_AN_C73:
-               if (phylink_autoneg_inband(mode)) {
+               if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising)) {
                        ret = xpcs_config_aneg_c73(xpcs, compat);
                        if (ret)
                                return ret;
index d75f526..76f5a24 100644 (file)
@@ -44,6 +44,7 @@
 #define DP83867_STRAP_STS1     0x006E
 #define DP83867_STRAP_STS2     0x006f
 #define DP83867_RGMIIDCTL      0x0086
+#define DP83867_DSP_FFE_CFG    0x012c
 #define DP83867_RXFCFG         0x0134
 #define DP83867_RXFPMD1        0x0136
 #define DP83867_RXFPMD2        0x0137
@@ -941,8 +942,27 @@ static int dp83867_phy_reset(struct phy_device *phydev)
 
        usleep_range(10, 20);
 
-       return phy_modify(phydev, MII_DP83867_PHYCTRL,
+       err = phy_modify(phydev, MII_DP83867_PHYCTRL,
                         DP83867_PHYCR_FORCE_LINK_GOOD, 0);
+       if (err < 0)
+               return err;
+
+       /* Configure the DSP Feedforward Equalizer Configuration register to
+        * improve short cable (< 1 meter) performance. This will not affect
+        * long cable performance.
+        */
+       err = phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_DSP_FFE_CFG,
+                           0x0e81);
+       if (err < 0)
+               return err;
+
+       err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
+       if (err < 0)
+               return err;
+
+       usleep_range(10, 20);
+
+       return 0;
 }
 
 static void dp83867_link_change_notify(struct phy_device *phydev)
index a50235f..defe5cc 100644 (file)
@@ -179,6 +179,7 @@ enum rgmii_clock_delay {
 #define VSC8502_RGMII_CNTL               20
 #define VSC8502_RGMII_RX_DELAY_MASK      0x0070
 #define VSC8502_RGMII_TX_DELAY_MASK      0x0007
+#define VSC8502_RGMII_RX_CLK_DISABLE     0x0800
 
 #define MSCC_PHY_WOL_LOWER_MAC_ADDR      21
 #define MSCC_PHY_WOL_MID_MAC_ADDR        22
@@ -276,6 +277,7 @@ enum rgmii_clock_delay {
 /* Microsemi PHY ID's
  *   Code assumes lowest nibble is 0
  */
+#define PHY_ID_VSC8501                   0x00070530
 #define PHY_ID_VSC8502                   0x00070630
 #define PHY_ID_VSC8504                   0x000704c0
 #define PHY_ID_VSC8514                   0x00070670
index 62bf99e..28df8a2 100644 (file)
@@ -519,16 +519,27 @@ out_unlock:
  *  * 2.0 ns (which causes the data to be sampled at exactly half way between
  *    clock transitions at 1000 Mbps) if delays should be enabled
  */
-static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
-                                  u16 rgmii_rx_delay_mask,
-                                  u16 rgmii_tx_delay_mask)
+static int vsc85xx_update_rgmii_cntl(struct phy_device *phydev, u32 rgmii_cntl,
+                                    u16 rgmii_rx_delay_mask,
+                                    u16 rgmii_tx_delay_mask)
 {
        u16 rgmii_rx_delay_pos = ffs(rgmii_rx_delay_mask) - 1;
        u16 rgmii_tx_delay_pos = ffs(rgmii_tx_delay_mask) - 1;
        u16 reg_val = 0;
-       int rc;
+       u16 mask = 0;
+       int rc = 0;
 
-       mutex_lock(&phydev->lock);
+       /* For traffic to pass, the VSC8502 family needs the RX_CLK disable bit
+        * to be unset for all PHY modes, so do that as part of the paged
+        * register modification.
+        * For some family members (like VSC8530/31/40/41) this bit is reserved
+        * and read-only, and the RX clock is enabled by default.
+        */
+       if (rgmii_cntl == VSC8502_RGMII_CNTL)
+               mask |= VSC8502_RGMII_RX_CLK_DISABLE;
+
+       if (phy_interface_is_rgmii(phydev))
+               mask |= rgmii_rx_delay_mask | rgmii_tx_delay_mask;
 
        if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID ||
            phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
@@ -537,31 +548,20 @@ static int vsc85xx_rgmii_set_skews(struct phy_device *phydev, u32 rgmii_cntl,
            phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
                reg_val |= RGMII_CLK_DELAY_2_0_NS << rgmii_tx_delay_pos;
 
-       rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
-                             rgmii_cntl,
-                             rgmii_rx_delay_mask | rgmii_tx_delay_mask,
-                             reg_val);
-
-       mutex_unlock(&phydev->lock);
+       if (mask)
+               rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2,
+                                     rgmii_cntl, mask, reg_val);
 
        return rc;
 }
 
 static int vsc85xx_default_config(struct phy_device *phydev)
 {
-       int rc;
-
        phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
 
-       if (phy_interface_mode_is_rgmii(phydev->interface)) {
-               rc = vsc85xx_rgmii_set_skews(phydev, VSC8502_RGMII_CNTL,
-                                            VSC8502_RGMII_RX_DELAY_MASK,
-                                            VSC8502_RGMII_TX_DELAY_MASK);
-               if (rc)
-                       return rc;
-       }
-
-       return 0;
+       return vsc85xx_update_rgmii_cntl(phydev, VSC8502_RGMII_CNTL,
+                                        VSC8502_RGMII_RX_DELAY_MASK,
+                                        VSC8502_RGMII_TX_DELAY_MASK);
 }
 
 static int vsc85xx_get_tunable(struct phy_device *phydev,
@@ -1758,13 +1758,11 @@ static int vsc8584_config_init(struct phy_device *phydev)
        if (ret)
                return ret;
 
-       if (phy_interface_is_rgmii(phydev)) {
-               ret = vsc85xx_rgmii_set_skews(phydev, VSC8572_RGMII_CNTL,
-                                             VSC8572_RGMII_RX_DELAY_MASK,
-                                             VSC8572_RGMII_TX_DELAY_MASK);
-               if (ret)
-                       return ret;
-       }
+       ret = vsc85xx_update_rgmii_cntl(phydev, VSC8572_RGMII_CNTL,
+                                       VSC8572_RGMII_RX_DELAY_MASK,
+                                       VSC8572_RGMII_TX_DELAY_MASK);
+       if (ret)
+               return ret;
 
        ret = genphy_soft_reset(phydev);
        if (ret)
@@ -2317,6 +2315,30 @@ static int vsc85xx_probe(struct phy_device *phydev)
 /* Microsemi VSC85xx PHYs */
 static struct phy_driver vsc85xx_driver[] = {
 {
+       .phy_id         = PHY_ID_VSC8501,
+       .name           = "Microsemi GE VSC8501 SyncE",
+       .phy_id_mask    = 0xfffffff0,
+       /* PHY_BASIC_FEATURES */
+       .soft_reset     = &genphy_soft_reset,
+       .config_init    = &vsc85xx_config_init,
+       .config_aneg    = &vsc85xx_config_aneg,
+       .read_status    = &vsc85xx_read_status,
+       .handle_interrupt = vsc85xx_handle_interrupt,
+       .config_intr    = &vsc85xx_config_intr,
+       .suspend        = &genphy_suspend,
+       .resume         = &genphy_resume,
+       .probe          = &vsc85xx_probe,
+       .set_wol        = &vsc85xx_wol_set,
+       .get_wol        = &vsc85xx_wol_get,
+       .get_tunable    = &vsc85xx_get_tunable,
+       .set_tunable    = &vsc85xx_set_tunable,
+       .read_page      = &vsc85xx_phy_read_page,
+       .write_page     = &vsc85xx_phy_write_page,
+       .get_sset_count = &vsc85xx_get_sset_count,
+       .get_strings    = &vsc85xx_get_strings,
+       .get_stats      = &vsc85xx_get_stats,
+},
+{
        .phy_id         = PHY_ID_VSC8502,
        .name           = "Microsemi GE VSC8502 SyncE",
        .phy_id_mask    = 0xfffffff0,
@@ -2656,6 +2678,8 @@ static struct phy_driver vsc85xx_driver[] = {
 module_phy_driver(vsc85xx_driver);
 
 static struct mdio_device_id __maybe_unused vsc85xx_tbl[] = {
+       { PHY_ID_VSC8501, 0xfffffff0, },
+       { PHY_ID_VSC8502, 0xfffffff0, },
        { PHY_ID_VSC8504, 0xfffffff0, },
        { PHY_ID_VSC8514, 0xfffffff0, },
        { PHY_ID_VSC8530, 0xfffffff0, },
index 6301a9a..ea1073a 100644 (file)
@@ -274,13 +274,6 @@ static int gpy_config_init(struct phy_device *phydev)
        return ret < 0 ? ret : 0;
 }
 
-static bool gpy_has_broken_mdint(struct phy_device *phydev)
-{
-       /* At least these PHYs are known to have broken interrupt handling */
-       return phydev->drv->phy_id == PHY_ID_GPY215B ||
-              phydev->drv->phy_id == PHY_ID_GPY215C;
-}
-
 static int gpy_probe(struct phy_device *phydev)
 {
        struct device *dev = &phydev->mdio.dev;
@@ -300,8 +293,7 @@ static int gpy_probe(struct phy_device *phydev)
        phydev->priv = priv;
        mutex_init(&priv->mbox_lock);
 
-       if (gpy_has_broken_mdint(phydev) &&
-           !device_property_present(dev, "maxlinear,use-broken-interrupts"))
+       if (!device_property_present(dev, "maxlinear,use-broken-interrupts"))
                phydev->dev_flags |= PHY_F_NO_IRQ;
 
        fw_version = phy_read(phydev, PHY_FWV);
@@ -659,11 +651,9 @@ static irqreturn_t gpy_handle_interrupt(struct phy_device *phydev)
         * frame. Therefore, polling is the best we can do and won't do any more
         * harm.
         * It was observed that this bug happens on link state and link speed
-        * changes on a GPY215B and GYP215C independent of the firmware version
-        * (which doesn't mean that this list is exhaustive).
+        * changes independent of the firmware version.
         */
-       if (gpy_has_broken_mdint(phydev) &&
-           (reg & (PHY_IMASK_LSTC | PHY_IMASK_LSPC))) {
+       if (reg & (PHY_IMASK_LSTC | PHY_IMASK_LSPC)) {
                reg = gpy_mbox_read(phydev, REG_GPIO0_OUT);
                if (reg < 0) {
                        phy_error(phydev);
index a4111f1..b483111 100644 (file)
@@ -2226,6 +2226,12 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
        ASSERT_RTNL();
 
        if (pl->phydev) {
+               struct ethtool_link_ksettings phy_kset = *kset;
+
+               linkmode_and(phy_kset.link_modes.advertising,
+                            phy_kset.link_modes.advertising,
+                            pl->supported);
+
                /* We can rely on phylib for this update; we also do not need
                 * to update the pl->link_config settings:
                 * - the configuration returned via ksettings_get() will come
@@ -2244,11 +2250,10 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
                 *   the presence of a PHY, this should not be changed as that
                 *   should be determined from the media side advertisement.
                 */
-               return phy_ethtool_ksettings_set(pl->phydev, kset);
+               return phy_ethtool_ksettings_set(pl->phydev, &phy_kset);
        }
 
        config = pl->link_config;
-
        /* Mask out unsupported advertisements */
        linkmode_and(config.advertising, kset->link_modes.advertising,
                     pl->supported);
index d10606f..555b0b1 100644 (file)
@@ -1629,6 +1629,7 @@ static int team_init(struct net_device *dev)
 
        team->dev = dev;
        team_set_no_mode(team);
+       team->notifier_ctx = false;
 
        team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
        if (!team->pcpu_stats)
@@ -3022,7 +3023,11 @@ static int team_device_event(struct notifier_block *unused,
                team_del_slave(port->team->dev, dev);
                break;
        case NETDEV_FEAT_CHANGE:
-               team_compute_features(port->team);
+               if (!port->team->notifier_ctx) {
+                       port->team->notifier_ctx = true;
+                       team_compute_features(port->team);
+                       port->team->notifier_ctx = false;
+               }
                break;
        case NETDEV_PRECHANGEMTU:
                /* Forbid to change mtu of underlaying device */
index d4d0a41..d75456a 100644 (file)
@@ -1977,6 +1977,14 @@ napi_busy:
                int queue_len;
 
                spin_lock_bh(&queue->lock);
+
+               if (unlikely(tfile->detached)) {
+                       spin_unlock_bh(&queue->lock);
+                       rcu_read_unlock();
+                       err = -EBUSY;
+                       goto free_skb;
+               }
+
                __skb_queue_tail(queue, skb);
                queue_len = skb_queue_len(queue);
                spin_unlock(&queue->lock);
@@ -2512,6 +2520,13 @@ build:
        if (tfile->napi_enabled) {
                queue = &tfile->sk.sk_write_queue;
                spin_lock(&queue->lock);
+
+               if (unlikely(tfile->detached)) {
+                       spin_unlock(&queue->lock);
+                       kfree_skb(skb);
+                       return -EBUSY;
+               }
+
                __skb_queue_tail(queue, skb);
                spin_unlock(&queue->lock);
                ret = 1;
index 6ce8f4f..db05622 100644 (file)
@@ -181,9 +181,12 @@ static u32 cdc_ncm_check_tx_max(struct usbnet *dev, u32 new_tx)
        else
                min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth32);
 
-       max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize));
-       if (max == 0)
+       if (le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) == 0)
                max = CDC_NCM_NTB_MAX_SIZE_TX; /* dwNtbOutMaxSize not set */
+       else
+               max = clamp_t(u32, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize),
+                             USB_CDC_NCM_NTB_MIN_OUT_SIZE,
+                             CDC_NCM_NTB_MAX_SIZE_TX);
 
        /* some devices set dwNtbOutMaxSize too low for the above default */
        min = min(min, max);
@@ -1244,6 +1247,9 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                         * further.
                         */
                        if (skb_out == NULL) {
+                               /* If even the smallest allocation fails, abort. */
+                               if (ctx->tx_curr_size == USB_CDC_NCM_NTB_MIN_OUT_SIZE)
+                                       goto alloc_failed;
                                ctx->tx_low_mem_max_cnt = min(ctx->tx_low_mem_max_cnt + 1,
                                                              (unsigned)CDC_NCM_LOW_MEM_MAX_CNT);
                                ctx->tx_low_mem_val = ctx->tx_low_mem_max_cnt;
@@ -1262,13 +1268,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
                        skb_out = alloc_skb(ctx->tx_curr_size, GFP_ATOMIC);
 
                        /* No allocation possible so we will abort */
-                       if (skb_out == NULL) {
-                               if (skb != NULL) {
-                                       dev_kfree_skb_any(skb);
-                                       dev->net->stats.tx_dropped++;
-                               }
-                               goto exit_no_skb;
-                       }
+                       if (!skb_out)
+                               goto alloc_failed;
                        ctx->tx_low_mem_val--;
                }
                if (ctx->is_ndp16) {
@@ -1461,6 +1462,11 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
 
        return skb_out;
 
+alloc_failed:
+       if (skb) {
+               dev_kfree_skb_any(skb);
+               dev->net->stats.tx_dropped++;
+       }
 exit_no_skb:
        /* Start timer, if there is a remaining non-empty skb */
        if (ctx->tx_curr_skb != NULL && n > 0)
index 571e37e..f1865d0 100644 (file)
@@ -1325,7 +1325,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2001, 0x7e3d, 4)},    /* D-Link DWM-222 A2 */
        {QMI_FIXED_INTF(0x2020, 0x2031, 4)},    /* Olicard 600 */
        {QMI_FIXED_INTF(0x2020, 0x2033, 4)},    /* BroadMobi BM806U */
-       {QMI_FIXED_INTF(0x2020, 0x2060, 4)},    /* BroadMobi BM818 */
+       {QMI_QUIRK_SET_DTR(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
        {QMI_FIXED_INTF(0x1199, 0x68a2, 8)},    /* Sierra Wireless MC7710 in QMI mode */
index a12ae26..486b584 100644 (file)
@@ -205,6 +205,8 @@ struct control_buf {
        __virtio16 vid;
        __virtio64 offloads;
        struct virtio_net_ctrl_rss rss;
+       struct virtio_net_ctrl_coal_tx coal_tx;
+       struct virtio_net_ctrl_coal_rx coal_rx;
 };
 
 struct virtnet_info {
@@ -1868,6 +1870,38 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        return received;
 }
 
+static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
+{
+       virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
+       napi_disable(&vi->rq[qp_index].napi);
+       xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
+}
+
+static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
+{
+       struct net_device *dev = vi->dev;
+       int err;
+
+       err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
+                              vi->rq[qp_index].napi.napi_id);
+       if (err < 0)
+               return err;
+
+       err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
+                                        MEM_TYPE_PAGE_SHARED, NULL);
+       if (err < 0)
+               goto err_xdp_reg_mem_model;
+
+       virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
+       virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
+
+       return 0;
+
+err_xdp_reg_mem_model:
+       xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
+       return err;
+}
+
 static int virtnet_open(struct net_device *dev)
 {
        struct virtnet_info *vi = netdev_priv(dev);
@@ -1881,22 +1915,20 @@ static int virtnet_open(struct net_device *dev)
                        if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
                                schedule_delayed_work(&vi->refill, 0);
 
-               err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i, vi->rq[i].napi.napi_id);
+               err = virtnet_enable_queue_pair(vi, i);
                if (err < 0)
-                       return err;
-
-               err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
-                                                MEM_TYPE_PAGE_SHARED, NULL);
-               if (err < 0) {
-                       xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
-                       return err;
-               }
-
-               virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
-               virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
+                       goto err_enable_qp;
        }
 
        return 0;
+
+err_enable_qp:
+       disable_delayed_refill(vi);
+       cancel_delayed_work_sync(&vi->refill);
+
+       for (i--; i >= 0; i--)
+               virtnet_disable_queue_pair(vi, i);
+       return err;
 }
 
 static int virtnet_poll_tx(struct napi_struct *napi, int budget)
@@ -2305,11 +2337,8 @@ static int virtnet_close(struct net_device *dev)
        /* Make sure refill_work doesn't re-enable napi! */
        cancel_delayed_work_sync(&vi->refill);
 
-       for (i = 0; i < vi->max_queue_pairs; i++) {
-               virtnet_napi_tx_disable(&vi->sq[i].napi);
-               napi_disable(&vi->rq[i].napi);
-               xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
-       }
+       for (i = 0; i < vi->max_queue_pairs; i++)
+               virtnet_disable_queue_pair(vi, i);
 
        return 0;
 }
@@ -2907,12 +2936,10 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
                                       struct ethtool_coalesce *ec)
 {
        struct scatterlist sgs_tx, sgs_rx;
-       struct virtio_net_ctrl_coal_tx coal_tx;
-       struct virtio_net_ctrl_coal_rx coal_rx;
 
-       coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
-       coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
-       sg_init_one(&sgs_tx, &coal_tx, sizeof(coal_tx));
+       vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+       vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+       sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
                                  VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
@@ -2923,9 +2950,9 @@ static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
        vi->tx_usecs = ec->tx_coalesce_usecs;
        vi->tx_max_packets = ec->tx_max_coalesced_frames;
 
-       coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
-       coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
-       sg_init_one(&sgs_rx, &coal_rx, sizeof(coal_rx));
+       vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+       vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+       sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
                                  VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
index 9fc7c08..67b4bac 100644 (file)
@@ -651,7 +651,7 @@ struct b43_iv {
        union {
                __be16 d16;
                __be32 d32;
-       } data __packed;
+       } __packed data;
 } __packed;
 
 
index 6b0cec4..f49365d 100644 (file)
@@ -379,7 +379,7 @@ struct b43legacy_iv {
        union {
                __be16 d16;
                __be32 d32;
-       } data __packed;
+       } __packed data;
 } __packed;
 
 #define B43legacy_PHYMODE(phytype)     (1 << (phytype))
index ff710b0..00679a9 100644 (file)
@@ -1039,6 +1039,11 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
        struct brcmf_sdio_dev *sdiodev;
        struct brcmf_bus *bus_if;
 
+       if (!id) {
+               dev_err(&func->dev, "Error no sdio_device_id passed for %x:%x\n", func->vendor, func->device);
+               return -ENODEV;
+       }
+
        brcmf_dbg(SDIO, "Enter\n");
        brcmf_dbg(SDIO, "Class=%x\n", func->class);
        brcmf_dbg(SDIO, "sdio vendor ID: 0x%04x\n", func->vendor);
index 59f3e9c..8022068 100644 (file)
@@ -2394,6 +2394,9 @@ static void brcmf_pcie_debugfs_create(struct device *dev)
 }
 #endif
 
+/* Forward declaration for pci_match_id() call */
+static const struct pci_device_id brcmf_pcie_devid_table[];
+
 static int
 brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
@@ -2404,6 +2407,14 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        struct brcmf_core *core;
        struct brcmf_bus *bus;
 
+       if (!id) {
+               id = pci_match_id(brcmf_pcie_devid_table, pdev);
+               if (!id) {
+                       pci_err(pdev, "Error could not find pci_device_id for %x:%x\n", pdev->vendor, pdev->device);
+                       return -ENODEV;
+               }
+       }
+
        brcmf_dbg(PCIE, "Enter %x:%x\n", pdev->vendor, pdev->device);
 
        ret = -ENOMEM;
index 246843a..2178675 100644 (file)
@@ -1331,6 +1331,9 @@ brcmf_usb_disconnect_cb(struct brcmf_usbdev_info *devinfo)
        brcmf_usb_detach(devinfo);
 }
 
+/* Forward declaration for usb_match_id() call */
+static const struct usb_device_id brcmf_usb_devid_table[];
+
 static int
 brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
 {
@@ -1342,6 +1345,14 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
        u32 num_of_eps;
        u8 endpoint_num, ep;
 
+       if (!id) {
+               id = usb_match_id(intf, brcmf_usb_devid_table);
+               if (!id) {
+                       dev_err(&intf->dev, "Error could not find matching usb_device_id\n");
+                       return -ENODEV;
+               }
+       }
+
        brcmf_dbg(USB, "Enter 0x%04x:0x%04x\n", id->idVendor, id->idProduct);
 
        devinfo = kzalloc(sizeof(*devinfo), GFP_ATOMIC);
index 5f4a513..cb9181f 100644 (file)
@@ -38,7 +38,7 @@ static const struct dmi_system_id dmi_ppag_approved_list[] = {
        },
        { .ident = "ASUS",
          .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek COMPUTER INC."),
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
                },
        },
        {}
index d9faaae..5521997 100644 (file)
@@ -1664,14 +1664,10 @@ static __le32 iwl_get_mon_reg(struct iwl_fw_runtime *fwrt, u32 alloc_id,
 }
 
 static void *
-iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt,
-                            struct iwl_dump_ini_region_data *reg_data,
+iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime *fwrt, u32 alloc_id,
                             struct iwl_fw_ini_monitor_dump *data,
                             const struct iwl_fw_mon_regs *addrs)
 {
-       struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
-       u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
-
        if (!iwl_trans_grab_nic_access(fwrt->trans)) {
                IWL_ERR(fwrt, "Failed to get monitor header\n");
                return NULL;
@@ -1702,8 +1698,10 @@ iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime *fwrt,
                                  void *data, u32 data_len)
 {
        struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+       struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+       u32 alloc_id = le32_to_cpu(reg->dram_alloc_id);
 
-       return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+       return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
                                            &fwrt->trans->cfg->mon_dram_regs);
 }
 
@@ -1713,8 +1711,10 @@ iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime *fwrt,
                                  void *data, u32 data_len)
 {
        struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
+       struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+       u32 alloc_id = le32_to_cpu(reg->internal_buffer.alloc_id);
 
-       return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+       return iwl_dump_ini_mon_fill_header(fwrt, alloc_id, mon_dump,
                                            &fwrt->trans->cfg->mon_smem_regs);
 }
 
@@ -1725,7 +1725,10 @@ iwl_dump_ini_mon_dbgi_fill_header(struct iwl_fw_runtime *fwrt,
 {
        struct iwl_fw_ini_monitor_dump *mon_dump = (void *)data;
 
-       return iwl_dump_ini_mon_fill_header(fwrt, reg_data, mon_dump,
+       return iwl_dump_ini_mon_fill_header(fwrt,
+                                           /* no offset calculation later */
+                                           IWL_FW_INI_ALLOCATION_ID_DBGC1,
+                                           mon_dump,
                                            &fwrt->trans->cfg->mon_dbgi_regs);
 }
 
index 37aa467..6d1007f 100644 (file)
@@ -2732,17 +2732,13 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
                if (wowlan_info_ver < 2) {
                        struct iwl_wowlan_info_notif_v1 *notif_v1 = (void *)pkt->data;
 
-                       notif = kmemdup(notif_v1,
-                                       offsetofend(struct iwl_wowlan_info_notif,
-                                                   received_beacons),
-                                       GFP_ATOMIC);
-
+                       notif = kmemdup(notif_v1, sizeof(*notif), GFP_ATOMIC);
                        if (!notif)
                                return false;
 
                        notif->tid_tear_down = notif_v1->tid_tear_down;
                        notif->station_id = notif_v1->station_id;
-
+                       memset_after(notif, 0, station_id);
                } else {
                        notif = (void *)pkt->data;
                }
index 3963a0d..652a603 100644 (file)
@@ -526,6 +526,11 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                rcu_read_lock();
 
                sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->deflink.ap_sta_id]);
+               if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+                       rcu_read_unlock();
+                       return PTR_ERR_OR_ZERO(sta);
+               }
+
                if (sta->mfp && (peer->ftm.trigger_based || peer->ftm.non_trigger_based))
                        FTM_PUT_FLAG(PMF);
 
index b35c96c..205c09b 100644 (file)
@@ -1091,7 +1091,7 @@ static const struct dmi_system_id dmi_tas_approved_list[] = {
        },
                { .ident = "LENOVO",
          .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Lenovo"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
                },
        },
        { .ident = "DELL",
@@ -1727,8 +1727,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        iwl_mvm_tas_init(mvm);
        iwl_mvm_leds_sync(mvm);
 
-       if (fw_has_capa(&mvm->fw->ucode_capa,
-                       IWL_UCODE_TLV_CAPA_RFIM_SUPPORT)) {
+       if (iwl_rfi_supported(mvm)) {
                if (iwl_mvm_eval_dsm_rfi(mvm) == DSM_VALUE_RFI_ENABLE)
                        iwl_rfi_send_config_cmd(mvm, NULL);
        }
index eb828de..3814915 100644 (file)
@@ -123,11 +123,13 @@ int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                if (mvmvif->link[i]->phy_ctxt)
                                        count++;
 
-                       /* FIXME: IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM should be
-                        * defined per HW
-                        */
-                       if (count >= IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM)
-                               return -EINVAL;
+                       if (vif->type == NL80211_IFTYPE_AP) {
+                               if (count > mvm->fw->ucode_capa.num_beacons)
+                                       return -EOPNOTSUPP;
+                       /* this should be per HW or such */
+                       } else if (count >= IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM) {
+                               return -EOPNOTSUPP;
+                       }
                }
 
                /* Catch early if driver tries to activate or deactivate a link
index 0f01b62..17f788a 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
  * Copyright (C) 2016-2017 Intel Deutschland GmbH
  */
@@ -3607,7 +3607,8 @@ static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm,
                                      struct ieee80211_vif *vif,
                                      struct ieee80211_sta *sta)
 {
-       unsigned int i;
+       struct ieee80211_link_sta *link_sta;
+       unsigned int link_id;
 
        /* Beacon interval check - firmware will crash if the beacon
         * interval is less than 16. We can't avoid connecting at all,
@@ -3616,14 +3617,11 @@ static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm,
         * wpa_s will blocklist the AP...
         */
 
-       for_each_set_bit(i, (unsigned long *)&sta->valid_links,
-                        IEEE80211_MLD_MAX_NUM_LINKS) {
-               struct ieee80211_link_sta *link_sta =
-                       link_sta_dereference_protected(sta, i);
+       for_each_sta_active_link(vif, sta, link_sta, link_id) {
                struct ieee80211_bss_conf *link_conf =
-                       link_conf_dereference_protected(vif, i);
+                       link_conf_dereference_protected(vif, link_id);
 
-               if (!link_conf || !link_sta)
+               if (!link_conf)
                        continue;
 
                if (link_conf->beacon_int < IWL_MVM_MIN_BEACON_INTERVAL_TU) {
@@ -3645,24 +3643,23 @@ static void iwl_mvm_vif_set_he_support(struct ieee80211_hw *hw,
                                       bool is_sta)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       unsigned int i;
+       struct ieee80211_link_sta *link_sta;
+       unsigned int link_id;
 
-       for_each_set_bit(i, (unsigned long *)&sta->valid_links,
-                        IEEE80211_MLD_MAX_NUM_LINKS) {
-               struct ieee80211_link_sta *link_sta =
-                       link_sta_dereference_protected(sta, i);
+       for_each_sta_active_link(vif, sta, link_sta, link_id) {
                struct ieee80211_bss_conf *link_conf =
-                       link_conf_dereference_protected(vif, i);
+                       link_conf_dereference_protected(vif, link_id);
 
-               if (!link_conf || !link_sta || !mvmvif->link[i])
+               if (!link_conf || !mvmvif->link[link_id])
                        continue;
 
                link_conf->he_support = link_sta->he_cap.has_he;
 
                if (is_sta) {
-                       mvmvif->link[i]->he_ru_2mhz_block = false;
+                       mvmvif->link[link_id]->he_ru_2mhz_block = false;
                        if (link_sta->he_cap.has_he)
-                               iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif, i,
+                               iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif,
+                                                                  link_id,
                                                                   link_conf);
                }
        }
@@ -3675,6 +3672,7 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
                                   struct iwl_mvm_sta_state_ops *callbacks)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct ieee80211_link_sta *link_sta;
        unsigned int i;
        int ret;
 
@@ -3699,15 +3697,9 @@ iwl_mvm_sta_state_notexist_to_none(struct iwl_mvm *mvm,
                                           NL80211_TDLS_SETUP);
        }
 
-       for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
-               struct ieee80211_link_sta *link_sta;
-
-               link_sta = link_sta_dereference_protected(sta, i);
-               if (!link_sta)
-                       continue;
-
+       for_each_sta_active_link(vif, sta, link_sta, i)
                link_sta->agg.max_rc_amsdu_len = 1;
-       }
+
        ieee80211_sta_recalc_aggregates(sta);
 
        if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
@@ -3725,7 +3717,8 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
-       unsigned int i;
+       struct ieee80211_link_sta *link_sta;
+       unsigned int link_id;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -3751,14 +3744,13 @@ iwl_mvm_sta_state_auth_to_assoc(struct ieee80211_hw *hw,
                if (!mvm->mld_api_is_used)
                        goto out;
 
-               for_each_set_bit(i, (unsigned long *)&sta->valid_links,
-                                IEEE80211_MLD_MAX_NUM_LINKS) {
+               for_each_sta_active_link(vif, sta, link_sta, link_id) {
                        struct ieee80211_bss_conf *link_conf =
-                               link_conf_dereference_protected(vif, i);
+                               link_conf_dereference_protected(vif, link_id);
 
                        if (WARN_ON(!link_conf))
                                return -EINVAL;
-                       if (!mvmvif->link[i])
+                       if (!mvmvif->link[link_id])
                                continue;
 
                        iwl_mvm_link_changed(mvm, vif, link_conf,
@@ -3889,6 +3881,9 @@ int iwl_mvm_mac_sta_state_common(struct ieee80211_hw *hw,
                 * from the AP now.
                 */
                iwl_mvm_reset_cca_40mhz_workaround(mvm, vif);
+
+               /* Also free dup data just in case any assertions below fail */
+               kfree(mvm_sta->dup_data);
        }
 
        mutex_lock(&mvm->mutex);
index fbc2d5e..7fb66c5 100644 (file)
@@ -906,11 +906,12 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
                                n_active++;
                }
 
-               if (vif->type == NL80211_IFTYPE_AP &&
-                   n_active > mvm->fw->ucode_capa.num_beacons)
-                       return -EOPNOTSUPP;
-               else if (n_active > 1)
+               if (vif->type == NL80211_IFTYPE_AP) {
+                       if (n_active > mvm->fw->ucode_capa.num_beacons)
+                               return -EOPNOTSUPP;
+               } else if (n_active > 1) {
                        return -EOPNOTSUPP;
+               }
        }
 
        for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
index 0bfdf44..85a4ce8 100644 (file)
@@ -667,15 +667,15 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                ret = iwl_mvm_mld_alloc_sta_links(mvm, vif, sta);
                if (ret)
                        return ret;
-       }
 
-       spin_lock_init(&mvm_sta->lock);
+               spin_lock_init(&mvm_sta->lock);
 
-       if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
-               ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta);
-       else
                ret = iwl_mvm_sta_init(mvm, vif, sta, IWL_MVM_INVALID_STA,
                                       STATION_TYPE_PEER);
+       } else {
+               ret = iwl_mvm_alloc_sta_after_restart(mvm, vif, sta);
+       }
+
        if (ret)
                goto err;
 
@@ -728,7 +728,7 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
        struct ieee80211_link_sta *link_sta;
        unsigned int link_id;
-       int ret = 0;
+       int ret = -EINVAL;
 
        lockdep_assert_held(&mvm->mutex);
 
@@ -791,8 +791,6 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
        lockdep_assert_held(&mvm->mutex);
 
-       kfree(mvm_sta->dup_data);
-
        /* flush its queues here since we are freeing mvm_sta */
        for_each_sta_active_link(vif, sta, link_sta, link_id) {
                struct iwl_mvm_link_sta *mvm_link_sta =
index 6e7470d..9e5008e 100644 (file)
@@ -2347,6 +2347,7 @@ int iwl_mvm_mld_update_sta_keys(struct iwl_mvm *mvm,
                                u32 old_sta_mask,
                                u32 new_sta_mask);
 
+bool iwl_rfi_supported(struct iwl_mvm *mvm);
 int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm,
                            struct iwl_rfi_lut_entry *rfi_table);
 struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm);
index 6d18a1f..fdf60af 100644 (file)
@@ -445,6 +445,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
                struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
 
                n_channels =  __le32_to_cpu(mcc_resp->n_channels);
+               if (iwl_rx_packet_payload_len(pkt) !=
+                   struct_size(mcc_resp, channels, n_channels)) {
+                       resp_cp = ERR_PTR(-EINVAL);
+                       goto exit;
+               }
                resp_len = sizeof(struct iwl_mcc_update_resp) +
                           n_channels * sizeof(__le32);
                resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
@@ -456,6 +461,11 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
                struct iwl_mcc_update_resp_v3 *mcc_resp_v3 = (void *)pkt->data;
 
                n_channels =  __le32_to_cpu(mcc_resp_v3->n_channels);
+               if (iwl_rx_packet_payload_len(pkt) !=
+                   struct_size(mcc_resp_v3, channels, n_channels)) {
+                       resp_cp = ERR_PTR(-EINVAL);
+                       goto exit;
+               }
                resp_len = sizeof(struct iwl_mcc_update_resp) +
                           n_channels * sizeof(__le32);
                resp_cp = kzalloc(resp_len, GFP_KERNEL);
index bb77bc9..2ecd32b 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2020 - 2021 Intel Corporation
+ * Copyright (C) 2020 - 2022 Intel Corporation
  */
 
 #include "mvm.h"
@@ -70,6 +70,16 @@ static const struct iwl_rfi_lut_entry iwl_rfi_table[IWL_RFI_LUT_SIZE] = {
                PHY_BAND_6, PHY_BAND_6,}},
 };
 
+bool iwl_rfi_supported(struct iwl_mvm *mvm)
+{
+       /* The feature depends on a platform bugfix, so for now
+        * it's always disabled.
+        * When the platform support detection is implemented we should
+        * check FW TLV and platform support instead.
+        */
+       return false;
+}
+
 int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_table)
 {
        int ret;
@@ -81,7 +91,7 @@ int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, struct iwl_rfi_lut_entry *rfi_t
                .len[0] = sizeof(cmd),
        };
 
-       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
+       if (!iwl_rfi_supported(mvm))
                return -EOPNOTSUPP;
 
        lockdep_assert_held(&mvm->mutex);
@@ -113,7 +123,7 @@ struct iwl_rfi_freq_table_resp_cmd *iwl_rfi_get_freq_table(struct iwl_mvm *mvm)
                .flags = CMD_WANT_SKB,
        };
 
-       if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_RFIM_SUPPORT))
+       if (!iwl_rfi_supported(mvm))
                return ERR_PTR(-EOPNOTSUPP);
 
        mutex_lock(&mvm->mutex);
index a4c1e3b..23266d0 100644 (file)
@@ -2691,6 +2691,8 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
                return;
 
        lq_sta = mvm_sta;
+
+       spin_lock(&lq_sta->pers.lock);
        iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags,
                                     info->band, &info->control.rates[0]);
        info->control.rates[0].count = 1;
@@ -2705,6 +2707,7 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
                iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band,
                                             &txrc->reported_rate);
        }
+       spin_unlock(&lq_sta->pers.lock);
 }
 
 static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
index e1d02c2..6226e4e 100644 (file)
@@ -691,6 +691,11 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t)
 
                rcu_read_lock();
                sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
+               if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+                       rcu_read_unlock();
+                       goto out;
+               }
+
                mvmsta = iwl_mvm_sta_from_mac80211(sta);
 
                /* SN is set to the last expired frame + 1 */
@@ -712,6 +717,8 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t)
                          entries[index].e.reorder_time +
                          1 + RX_REORDER_BUF_TIMEOUT_MQ);
        }
+
+out:
        spin_unlock(&buf->lock);
 }
 
@@ -2512,7 +2519,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                                RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
                                /* Unblock BCAST / MCAST station */
                                iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
-                               cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
+                               cancel_delayed_work(&mvm->cs_tx_unblock_dwork);
                        }
                }
 
index 5469d63..05a54a6 100644 (file)
@@ -281,7 +281,7 @@ static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
         * A-MDPU and hence the timer continues to run. Then, the
         * timer expires and sta is NULL.
         */
-       if (!sta)
+       if (IS_ERR_OR_NULL(sta))
                goto unlock;
 
        mvm_sta = iwl_mvm_sta_from_mac80211(sta);
@@ -2089,9 +2089,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (iwl_mvm_has_new_rx_api(mvm))
-               kfree(mvm_sta->dup_data);
-
        ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
        if (ret)
                return ret;
@@ -3785,6 +3782,9 @@ static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
                u8 sta_id = mvmvif->deflink.ap_sta_id;
                sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
                                                lockdep_is_held(&mvm->mutex));
+               if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+                       return NULL;
+
                return sta->addr;
        }
 
@@ -3822,6 +3822,11 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
 
        if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
                addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
+               if (!addr) {
+                       IWL_ERR(mvm, "Failed to find mac address\n");
+                       return -EINVAL;
+               }
+
                /* get phase 1 key from mac80211 */
                ieee80211_get_key_rx_seq(keyconf, 0, &seq);
                ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
index 10d7178..00719e1 100644 (file)
@@ -1875,7 +1875,7 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
        mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
 
        sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
-       if (WARN_ON_ONCE(!sta || !sta->wme)) {
+       if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) {
                rcu_read_unlock();
                return;
        }
index da1d17b..6400248 100644 (file)
@@ -914,7 +914,10 @@ void mt7615_mac_sta_poll(struct mt7615_dev *dev)
 
                msta = list_first_entry(&sta_poll_list, struct mt7615_sta,
                                        poll_list);
+
+               spin_lock_bh(&dev->sta_poll_lock);
                list_del_init(&msta->poll_list);
+               spin_unlock_bh(&dev->sta_poll_lock);
 
                addr = mt7615_mac_wtbl_addr(dev, msta->wcid.idx) + 19 * 4;
 
index a5ec0f6..fabf637 100644 (file)
@@ -173,7 +173,7 @@ enum {
 #define MT_TXS5_MPDU_TX_CNT            GENMASK(31, 23)
 
 #define MT_TXS6_MPDU_FAIL_CNT          GENMASK(31, 23)
-
+#define MT_TXS7_MPDU_RETRY_BYTE                GENMASK(22, 0)
 #define MT_TXS7_MPDU_RETRY_CNT         GENMASK(31, 23)
 
 /* RXD DW0 */
index ee0fbfc..d39a3cc 100644 (file)
@@ -608,7 +608,8 @@ bool mt76_connac2_mac_fill_txs(struct mt76_dev *dev, struct mt76_wcid *wcid,
        /* PPDU based reporting */
        if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1) {
                stats->tx_bytes +=
-                       le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE);
+                       le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_BYTE) -
+                       le32_get_bits(txs_data[7], MT_TXS7_MPDU_RETRY_BYTE);
                stats->tx_packets +=
                        le32_get_bits(txs_data[5], MT_TXS5_MPDU_TX_CNT);
                stats->tx_failed +=
index 130eb7b..9b0f605 100644 (file)
@@ -1004,10 +1004,10 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
 {
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_vif *vif = info->control.vif;
-       struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
        u8 band_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
        u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
        bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+       struct mt7996_vif *mvif;
        u16 tx_count = 15;
        u32 val;
        bool beacon = !!(changed & (BSS_CHANGED_BEACON |
@@ -1015,7 +1015,8 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
        bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
                                         BSS_CHANGED_FILS_DISCOVERY));
 
-       if (vif) {
+       mvif = vif ? (struct mt7996_vif *)vif->drv_priv : NULL;
+       if (mvif) {
                omac_idx = mvif->mt76.omac_idx;
                wmm_idx = mvif->mt76.wmm_idx;
                band_idx = mvif->mt76.band_idx;
@@ -1081,14 +1082,18 @@ void mt7996_mac_write_txwi(struct mt7996_dev *dev, __le32 *txwi,
                struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
                bool mcast = ieee80211_is_data(hdr->frame_control) &&
                             is_multicast_ether_addr(hdr->addr1);
-               u8 idx = mvif->basic_rates_idx;
+               u8 idx = MT7996_BASIC_RATES_TBL;
 
-               if (mcast && mvif->mcast_rates_idx)
-                       idx = mvif->mcast_rates_idx;
-               else if (beacon && mvif->beacon_rates_idx)
-                       idx = mvif->beacon_rates_idx;
+               if (mvif) {
+                       if (mcast && mvif->mcast_rates_idx)
+                               idx = mvif->mcast_rates_idx;
+                       else if (beacon && mvif->beacon_rates_idx)
+                               idx = mvif->beacon_rates_idx;
+                       else
+                               idx = mvif->basic_rates_idx;
+               }
 
-               txwi[6] |= FIELD_PREP(MT_TXD6_TX_RATE, idx);
+               txwi[6] |= cpu_to_le32(FIELD_PREP(MT_TXD6_TX_RATE, idx));
                txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
        }
 }
index 8eafbf1..808c1c8 100644 (file)
@@ -1803,6 +1803,7 @@ struct rtl8xxxu_priv {
        u32 rege9c;
        u32 regeb4;
        u32 regebc;
+       u32 regrcr;
        int next_mbox;
        int nr_out_eps;
 
index fd8c8c6..831639d 100644 (file)
@@ -4171,6 +4171,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
                RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL |
                RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC;
        rtl8xxxu_write32(priv, REG_RCR, val32);
+       priv->regrcr = val32;
 
        if (fops->init_reg_rxfltmap) {
                /* Accept all data frames */
@@ -6501,7 +6502,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
                                      unsigned int *total_flags, u64 multicast)
 {
        struct rtl8xxxu_priv *priv = hw->priv;
-       u32 rcr = rtl8xxxu_read32(priv, REG_RCR);
+       u32 rcr = priv->regrcr;
 
        dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n",
                __func__, changed_flags, *total_flags);
@@ -6547,6 +6548,7 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw,
         */
 
        rtl8xxxu_write32(priv, REG_RCR, rcr);
+       priv->regrcr = rcr;
 
        *total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC |
                         FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL |
index 7aa6eda..144618b 100644 (file)
@@ -88,15 +88,6 @@ static int rtw_ops_config(struct ieee80211_hw *hw, u32 changed)
                }
        }
 
-       if (changed & IEEE80211_CONF_CHANGE_PS) {
-               if (hw->conf.flags & IEEE80211_CONF_PS) {
-                       rtwdev->ps_enabled = true;
-               } else {
-                       rtwdev->ps_enabled = false;
-                       rtw_leave_lps(rtwdev);
-               }
-       }
-
        if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
                rtw_set_channel(rtwdev);
 
@@ -213,6 +204,7 @@ static int rtw_ops_add_interface(struct ieee80211_hw *hw,
        config |= PORT_SET_BCN_CTRL;
        rtw_vif_port_config(rtwdev, rtwvif, config);
        rtw_core_port_switch(rtwdev, vif);
+       rtw_recalc_lps(rtwdev, vif);
 
        mutex_unlock(&rtwdev->mutex);
 
@@ -244,6 +236,7 @@ static void rtw_ops_remove_interface(struct ieee80211_hw *hw,
        config |= PORT_SET_BCN_CTRL;
        rtw_vif_port_config(rtwdev, rtwvif, config);
        clear_bit(rtwvif->port, rtwdev->hw_port);
+       rtw_recalc_lps(rtwdev, NULL);
 
        mutex_unlock(&rtwdev->mutex);
 }
@@ -438,6 +431,9 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
        if (changed & BSS_CHANGED_ERP_SLOT)
                rtw_conf_tx(rtwdev, rtwvif);
 
+       if (changed & BSS_CHANGED_PS)
+               rtw_recalc_lps(rtwdev, NULL);
+
        rtw_vif_port_config(rtwdev, rtwvif, config);
 
        mutex_unlock(&rtwdev->mutex);
@@ -918,7 +914,7 @@ static void rtw_ops_sta_rc_update(struct ieee80211_hw *hw,
        struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
 
        if (changed & IEEE80211_RC_BW_CHANGED)
-               rtw_update_sta_info(rtwdev, si, true);
+               ieee80211_queue_work(rtwdev->hw, &si->rc_work);
 }
 
 const struct ieee80211_ops rtw_ops = {
index 5bf6b45..9447a3a 100644 (file)
@@ -271,8 +271,8 @@ static void rtw_watch_dog_work(struct work_struct *work)
         * more than two stations associated to the AP, then we can not enter
         * lps, because fw does not handle the overlapped beacon interval
         *
-        * mac80211 should iterate vifs and determine if driver can enter
-        * ps by passing IEEE80211_CONF_PS to us, all we need to do is to
+        * rtw_recalc_lps() iterate vifs and determine if driver can enter
+        * ps by vif->type and vif->cfg.ps, all we need to do here is to
         * get that vif and check if device is having traffic more than the
         * threshold.
         */
@@ -319,6 +319,17 @@ static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
        return mac_id;
 }
 
+static void rtw_sta_rc_work(struct work_struct *work)
+{
+       struct rtw_sta_info *si = container_of(work, struct rtw_sta_info,
+                                              rc_work);
+       struct rtw_dev *rtwdev = si->rtwdev;
+
+       mutex_lock(&rtwdev->mutex);
+       rtw_update_sta_info(rtwdev, si, true);
+       mutex_unlock(&rtwdev->mutex);
+}
+
 int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
                struct ieee80211_vif *vif)
 {
@@ -329,12 +340,14 @@ int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
        if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
                return -ENOSPC;
 
+       si->rtwdev = rtwdev;
        si->sta = sta;
        si->vif = vif;
        si->init_ra_lv = 1;
        ewma_rssi_init(&si->avg_rssi);
        for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
                rtw_txq_init(rtwdev, sta->txq[i]);
+       INIT_WORK(&si->rc_work, rtw_sta_rc_work);
 
        rtw_update_sta_info(rtwdev, si, true);
        rtw_fw_media_status_report(rtwdev, si->mac_id, true);
@@ -353,6 +366,8 @@ void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
        struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
        int i;
 
+       cancel_work_sync(&si->rc_work);
+
        rtw_release_macid(rtwdev, si->mac_id);
        if (fw_exist)
                rtw_fw_media_status_report(rtwdev, si->mac_id, false);
index a563285..9e841f6 100644 (file)
@@ -743,6 +743,7 @@ struct rtw_txq {
 DECLARE_EWMA(rssi, 10, 16);
 
 struct rtw_sta_info {
+       struct rtw_dev *rtwdev;
        struct ieee80211_sta *sta;
        struct ieee80211_vif *vif;
 
@@ -767,6 +768,8 @@ struct rtw_sta_info {
 
        bool use_cfg_mask;
        struct cfg80211_bitrate_mask *mask;
+
+       struct work_struct rc_work;
 };
 
 enum rtw_bfee_role {
index 9963655..53933fb 100644 (file)
@@ -299,3 +299,46 @@ void rtw_leave_lps_deep(struct rtw_dev *rtwdev)
 
        __rtw_leave_lps_deep(rtwdev);
 }
+
+struct rtw_vif_recalc_lps_iter_data {
+       struct rtw_dev *rtwdev;
+       struct ieee80211_vif *found_vif;
+       int count;
+};
+
+static void __rtw_vif_recalc_lps(struct rtw_vif_recalc_lps_iter_data *data,
+                                struct ieee80211_vif *vif)
+{
+       if (data->count < 0)
+               return;
+
+       if (vif->type != NL80211_IFTYPE_STATION) {
+               data->count = -1;
+               return;
+       }
+
+       data->count++;
+       data->found_vif = vif;
+}
+
+static void rtw_vif_recalc_lps_iter(void *data, u8 *mac,
+                                   struct ieee80211_vif *vif)
+{
+       __rtw_vif_recalc_lps(data, vif);
+}
+
+void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif)
+{
+       struct rtw_vif_recalc_lps_iter_data data = { .rtwdev = rtwdev };
+
+       if (new_vif)
+               __rtw_vif_recalc_lps(&data, new_vif);
+       rtw_iterate_vifs(rtwdev, rtw_vif_recalc_lps_iter, &data);
+
+       if (data.count == 1 && data.found_vif->cfg.ps) {
+               rtwdev->ps_enabled = true;
+       } else {
+               rtwdev->ps_enabled = false;
+               rtw_leave_lps(rtwdev);
+       }
+}
index c194386..5ae83d2 100644 (file)
@@ -23,4 +23,6 @@ void rtw_enter_lps(struct rtw_dev *rtwdev, u8 port_id);
 void rtw_leave_lps(struct rtw_dev *rtwdev);
 void rtw_leave_lps_deep(struct rtw_dev *rtwdev);
 enum rtw_lps_deep_mode rtw_get_lps_deep_mode(struct rtw_dev *rtwdev);
+void rtw_recalc_lps(struct rtw_dev *rtwdev, struct ieee80211_vif *new_vif);
+
 #endif
index af0459a..06fce7c 100644 (file)
@@ -87,11 +87,6 @@ static void rtw_sdio_writew(struct rtw_dev *rtwdev, u16 val, u32 addr,
        u8 buf[2];
        int i;
 
-       if (rtw_sdio_use_memcpy_io(rtwdev, addr, 2)) {
-               sdio_writew(rtwsdio->sdio_func, val, addr, err_ret);
-               return;
-       }
-
        *(__le16 *)buf = cpu_to_le16(val);
 
        for (i = 0; i < 2; i++) {
@@ -125,9 +120,6 @@ static u16 rtw_sdio_readw(struct rtw_dev *rtwdev, u32 addr, int *err_ret)
        u8 buf[2];
        int i;
 
-       if (rtw_sdio_use_memcpy_io(rtwdev, addr, 2))
-               return sdio_readw(rtwsdio->sdio_func, addr, err_ret);
-
        for (i = 0; i < 2; i++) {
                buf[i] = sdio_readb(rtwsdio->sdio_func, addr + i, err_ret);
                if (*err_ret)
index 30647f0..ad1d795 100644 (file)
@@ -78,7 +78,7 @@ struct rtw_usb {
        u8 pipe_interrupt;
        u8 pipe_in;
        u8 out_ep[RTW_USB_EP_MAX];
-       u8 qsel_to_ep[TX_DESC_QSEL_MAX];
+       int qsel_to_ep[TX_DESC_QSEL_MAX];
        u8 usb_txagg_num;
 
        struct workqueue_struct *txwq, *rxwq;
index 7fc0a26..bad864d 100644 (file)
@@ -2531,9 +2531,6 @@ static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwv
            rtwvif->tdls_peer)
                return;
 
-       if (rtwdev->total_sta_assoc > 1)
-               return;
-
        if (rtwvif->offchan)
                return;
 
index b8019cf..512de49 100644 (file)
@@ -1425,6 +1425,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
        .wde_size4 = {RTW89_WDE_PG_64, 0, 4096,},
        /* PCIE 64 */
        .wde_size6 = {RTW89_WDE_PG_64, 512, 0,},
+       /* 8852B PCIE SCC */
+       .wde_size7 = {RTW89_WDE_PG_64, 510, 2,},
        /* DLFW */
        .wde_size9 = {RTW89_WDE_PG_64, 0, 1024,},
        /* 8852C DLFW */
@@ -1449,6 +1451,8 @@ const struct rtw89_mac_size_set rtw89_mac_size = {
        .wde_qt4 = {0, 0, 0, 0,},
        /* PCIE 64 */
        .wde_qt6 = {448, 48, 0, 16,},
+       /* 8852B PCIE SCC */
+       .wde_qt7 = {446, 48, 0, 16,},
        /* 8852C DLFW */
        .wde_qt17 = {0, 0, 0,  0,},
        /* 8852C PCIE SCC */
index a8d9847..6ba633c 100644 (file)
@@ -792,6 +792,7 @@ struct rtw89_mac_size_set {
        const struct rtw89_dle_size wde_size0;
        const struct rtw89_dle_size wde_size4;
        const struct rtw89_dle_size wde_size6;
+       const struct rtw89_dle_size wde_size7;
        const struct rtw89_dle_size wde_size9;
        const struct rtw89_dle_size wde_size18;
        const struct rtw89_dle_size wde_size19;
@@ -804,6 +805,7 @@ struct rtw89_mac_size_set {
        const struct rtw89_wde_quota wde_qt0;
        const struct rtw89_wde_quota wde_qt4;
        const struct rtw89_wde_quota wde_qt6;
+       const struct rtw89_wde_quota wde_qt7;
        const struct rtw89_wde_quota wde_qt17;
        const struct rtw89_wde_quota wde_qt18;
        const struct rtw89_ple_quota ple_qt4;
index ee4588b..c42e310 100644 (file)
@@ -89,15 +89,6 @@ static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
            !(hw->conf.flags & IEEE80211_CONF_IDLE))
                rtw89_leave_ips(rtwdev);
 
-       if (changed & IEEE80211_CONF_CHANGE_PS) {
-               if (hw->conf.flags & IEEE80211_CONF_PS) {
-                       rtwdev->lps_enabled = true;
-               } else {
-                       rtw89_leave_lps(rtwdev);
-                       rtwdev->lps_enabled = false;
-               }
-       }
-
        if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
                rtw89_config_entity_chandef(rtwdev, RTW89_SUB_ENTITY_0,
                                            &hw->conf.chandef);
@@ -168,6 +159,8 @@ static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
        rtw89_core_txq_init(rtwdev, vif->txq);
 
        rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_START);
+
+       rtw89_recalc_lps(rtwdev);
 out:
        mutex_unlock(&rtwdev->mutex);
 
@@ -192,6 +185,7 @@ static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
        rtw89_mac_remove_vif(rtwdev, rtwvif);
        rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
        list_del_init(&rtwvif->list);
+       rtw89_recalc_lps(rtwdev);
        rtw89_enter_ips_by_hwflags(rtwdev);
 
        mutex_unlock(&rtwdev->mutex);
@@ -451,6 +445,9 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
        if (changed & BSS_CHANGED_CQM)
                rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
 
+       if (changed & BSS_CHANGED_PS)
+               rtw89_recalc_lps(rtwdev);
+
        mutex_unlock(&rtwdev->mutex);
 }
 
index fa94335..84201ef 100644 (file)
@@ -252,3 +252,29 @@ void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
        rtw89_p2p_disable_all_noa(rtwdev, vif);
        rtw89_p2p_update_noa(rtwdev, vif);
 }
+
+void rtw89_recalc_lps(struct rtw89_dev *rtwdev)
+{
+       struct ieee80211_vif *vif, *found_vif = NULL;
+       struct rtw89_vif *rtwvif;
+       int count = 0;
+
+       rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+               vif = rtwvif_to_vif(rtwvif);
+
+               if (vif->type != NL80211_IFTYPE_STATION) {
+                       count = 0;
+                       break;
+               }
+
+               count++;
+               found_vif = vif;
+       }
+
+       if (count == 1 && found_vif->cfg.ps) {
+               rtwdev->lps_enabled = true;
+       } else {
+               rtw89_leave_lps(rtwdev);
+               rtwdev->lps_enabled = false;
+       }
+}
index 73c008d..4c18f49 100644 (file)
@@ -15,6 +15,7 @@ void rtw89_enter_ips(struct rtw89_dev *rtwdev);
 void rtw89_leave_ips(struct rtw89_dev *rtwdev);
 void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl);
 void rtw89_process_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
+void rtw89_recalc_lps(struct rtw89_dev *rtwdev);
 
 static inline void rtw89_leave_ips_by_hwflags(struct rtw89_dev *rtwdev)
 {
index eaa2ea0..6da1b60 100644 (file)
        RTW8852B_FW_BASENAME "-" __stringify(RTW8852B_FW_FORMAT_MAX) ".bin"
 
 static const struct rtw89_hfc_ch_cfg rtw8852b_hfc_chcfg_pcie[] = {
-       {5, 343, grp_0}, /* ACH 0 */
-       {5, 343, grp_0}, /* ACH 1 */
-       {5, 343, grp_0}, /* ACH 2 */
-       {5, 343, grp_0}, /* ACH 3 */
+       {5, 341, grp_0}, /* ACH 0 */
+       {5, 341, grp_0}, /* ACH 1 */
+       {4, 342, grp_0}, /* ACH 2 */
+       {4, 342, grp_0}, /* ACH 3 */
        {0, 0, grp_0}, /* ACH 4 */
        {0, 0, grp_0}, /* ACH 5 */
        {0, 0, grp_0}, /* ACH 6 */
        {0, 0, grp_0}, /* ACH 7 */
-       {4, 344, grp_0}, /* B0MGQ */
-       {4, 344, grp_0}, /* B0HIQ */
+       {4, 342, grp_0}, /* B0MGQ */
+       {4, 342, grp_0}, /* B0HIQ */
        {0, 0, grp_0}, /* B1MGQ */
        {0, 0, grp_0}, /* B1HIQ */
        {40, 0, 0} /* FWCMDQ */
 };
 
 static const struct rtw89_hfc_pub_cfg rtw8852b_hfc_pubcfg_pcie = {
-       448, /* Group 0 */
+       446, /* Group 0 */
        0, /* Group 1 */
-       448, /* Public Max */
+       446, /* Public Max */
        0 /* WP threshold */
 };
 
@@ -49,13 +49,13 @@ static const struct rtw89_hfc_param_ini rtw8852b_hfc_param_ini_pcie[] = {
 };
 
 static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = {
-       [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size6,
-                          &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
-                          &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18,
+       [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &rtw89_mac_size.wde_size7,
+                          &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt7,
+                          &rtw89_mac_size.wde_qt7, &rtw89_mac_size.ple_qt18,
                           &rtw89_mac_size.ple_qt58},
-       [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size6,
-                          &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt6,
-                          &rtw89_mac_size.wde_qt6, &rtw89_mac_size.ple_qt18,
+       [RTW89_QTA_WOW] = {RTW89_QTA_WOW, &rtw89_mac_size.wde_size7,
+                          &rtw89_mac_size.ple_size6, &rtw89_mac_size.wde_qt7,
+                          &rtw89_mac_size.wde_qt7, &rtw89_mac_size.ple_qt18,
                           &rtw89_mac_size.ple_qt_52b_wow},
        [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &rtw89_mac_size.wde_size9,
                            &rtw89_mac_size.ple_size8, &rtw89_mac_size.wde_qt4,
index 9a8faaf..89c7a14 100644 (file)
@@ -5964,10 +5964,11 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
                        ret = -ENOMEM;
                        goto out_free;
                }
+               param.pmsr_capa = pmsr_capa;
+
                ret = parse_pmsr_capa(info->attrs[HWSIM_ATTR_PMSR_SUPPORT], pmsr_capa, info);
                if (ret)
                        goto out_free;
-               param.pmsr_capa = pmsr_capa;
        }
 
        ret = mac80211_hwsim_new_radio(info, &param);
index c066b00..829515a 100644 (file)
@@ -565,24 +565,32 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
        struct ipc_mux_config mux_cfg;
        struct iosm_imem *ipc_imem;
        u8 ctrl_chl_idx = 0;
+       int ret;
 
        ipc_imem = container_of(instance, struct iosm_imem, run_state_worker);
 
        if (ipc_imem->phase != IPC_P_RUN) {
                dev_err(ipc_imem->dev,
                        "Modem link down. Exit run state worker.");
-               return;
+               goto err_out;
        }
 
        if (test_and_clear_bit(IOSM_DEVLINK_INIT, &ipc_imem->flag))
                ipc_devlink_deinit(ipc_imem->ipc_devlink);
 
-       if (!ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg))
-               ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
+       ret = ipc_imem_setup_cp_mux_cap_init(ipc_imem, &mux_cfg);
+       if (ret < 0)
+               goto err_out;
+
+       ipc_imem->mux = ipc_mux_init(&mux_cfg, ipc_imem);
+       if (!ipc_imem->mux)
+               goto err_out;
+
+       ret = ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
+       if (ret < 0)
+               goto err_ipc_mux_deinit;
 
-       ipc_imem_wwan_channel_init(ipc_imem, mux_cfg.protocol);
-       if (ipc_imem->mux)
-               ipc_imem->mux->wwan = ipc_imem->wwan;
+       ipc_imem->mux->wwan = ipc_imem->wwan;
 
        while (ctrl_chl_idx < IPC_MEM_MAX_CHANNELS) {
                if (!ipc_chnl_cfg_get(&chnl_cfg_port, ctrl_chl_idx)) {
@@ -622,6 +630,13 @@ static void ipc_imem_run_state_worker(struct work_struct *instance)
 
        /* Complete all memory stores after setting bit */
        smp_mb__after_atomic();
+
+       return;
+
+err_ipc_mux_deinit:
+       ipc_mux_deinit(ipc_imem->mux);
+err_out:
+       ipc_uevent_send(ipc_imem->dev, UEVENT_CD_READY_LINK_DOWN);
 }
 
 static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
index 66b90cc..109cf89 100644 (file)
@@ -77,8 +77,8 @@ out:
 }
 
 /* Initialize wwan channel */
-void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
-                               enum ipc_mux_protocol mux_type)
+int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+                              enum ipc_mux_protocol mux_type)
 {
        struct ipc_chnl_cfg chnl_cfg = { 0 };
 
@@ -87,7 +87,7 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
        /* If modem version is invalid (0xffffffff), do not initialize WWAN. */
        if (ipc_imem->cp_version == -1) {
                dev_err(ipc_imem->dev, "invalid CP version");
-               return;
+               return -EIO;
        }
 
        ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels);
@@ -104,9 +104,13 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
 
        /* WWAN registration. */
        ipc_imem->wwan = ipc_wwan_init(ipc_imem, ipc_imem->dev);
-       if (!ipc_imem->wwan)
+       if (!ipc_imem->wwan) {
                dev_err(ipc_imem->dev,
                        "failed to register the ipc_wwan interfaces");
+               return -ENOMEM;
+       }
+
+       return 0;
 }
 
 /* Map SKB to DMA for transfer */
index f8afb21..026c5bd 100644 (file)
@@ -91,9 +91,11 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id,
  *                             MUX.
  * @ipc_imem:          Pointer to iosm_imem struct.
  * @mux_type:          Type of mux protocol.
+ *
+ * Return: 0 on success and failure value on error
  */
-void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
-                               enum ipc_mux_protocol mux_type);
+int ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
+                              enum ipc_mux_protocol mux_type);
 
 /**
  * ipc_imem_sys_devlink_open - Open a Flash/CD Channel link to CP
index 226fc17..91256e0 100644 (file)
@@ -45,6 +45,7 @@
 #define T7XX_PCI_IREG_BASE             0
 #define T7XX_PCI_EREG_BASE             2
 
+#define T7XX_INIT_TIMEOUT              20
 #define PM_SLEEP_DIS_TIMEOUT_MS                20
 #define PM_ACK_TIMEOUT_MS              1500
 #define PM_AUTOSUSPEND_MS              20000
@@ -96,6 +97,7 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
        spin_lock_init(&t7xx_dev->md_pm_lock);
        init_completion(&t7xx_dev->sleep_lock_acquire);
        init_completion(&t7xx_dev->pm_sr_ack);
+       init_completion(&t7xx_dev->init_done);
        atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
 
        device_init_wakeup(&pdev->dev, true);
@@ -124,6 +126,7 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
        pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev);
        pm_runtime_allow(&t7xx_dev->pdev->dev);
        pm_runtime_put_noidle(&t7xx_dev->pdev->dev);
+       complete_all(&t7xx_dev->init_done);
 }
 
 static int t7xx_pci_pm_reinit(struct t7xx_pci_dev *t7xx_dev)
@@ -529,6 +532,20 @@ static void t7xx_pci_shutdown(struct pci_dev *pdev)
        __t7xx_pci_pm_suspend(pdev);
 }
 
+static int t7xx_pci_pm_prepare(struct device *dev)
+{
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct t7xx_pci_dev *t7xx_dev;
+
+       t7xx_dev = pci_get_drvdata(pdev);
+       if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) {
+               dev_warn(dev, "Not ready for system sleep.\n");
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
 static int t7xx_pci_pm_suspend(struct device *dev)
 {
        return __t7xx_pci_pm_suspend(to_pci_dev(dev));
@@ -555,6 +572,7 @@ static int t7xx_pci_pm_runtime_resume(struct device *dev)
 }
 
 static const struct dev_pm_ops t7xx_pci_pm_ops = {
+       .prepare = t7xx_pci_pm_prepare,
        .suspend = t7xx_pci_pm_suspend,
        .resume = t7xx_pci_pm_resume,
        .resume_noirq = t7xx_pci_pm_resume_noirq,
index 112efa5..f08f1ab 100644 (file)
@@ -69,6 +69,7 @@ struct t7xx_pci_dev {
        struct t7xx_modem       *md;
        struct t7xx_ccmni_ctrl  *ccmni_ctlb;
        bool                    rgu_pci_irq_en;
+       struct completion       init_done;
 
        /* Low Power Items */
        struct list_head        md_pm_entities;
index 44eeb17..a55381f 100644 (file)
@@ -336,10 +336,6 @@ static struct dentry *nfcsim_debugfs_root;
 static void nfcsim_debugfs_init(void)
 {
        nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL);
-
-       if (!nfcsim_debugfs_root)
-               pr_err("Could not create debugfs entry\n");
-
 }
 
 static void nfcsim_debugfs_remove(void)
index bc523ca..5e4f884 100644 (file)
@@ -21,7 +21,7 @@ static const char * const nvme_ops[] = {
        [nvme_cmd_resv_release] = "Reservation Release",
        [nvme_cmd_zone_mgmt_send] = "Zone Management Send",
        [nvme_cmd_zone_mgmt_recv] = "Zone Management Receive",
-       [nvme_cmd_zone_append] = "Zone Management Append",
+       [nvme_cmd_zone_append] = "Zone Append",
 };
 
 static const char * const nvme_admin_ops[] = {
index ccb6eb1..3ec38e2 100644 (file)
@@ -397,7 +397,16 @@ void nvme_complete_rq(struct request *req)
        trace_nvme_complete_rq(req);
        nvme_cleanup_cmd(req);
 
-       if (ctrl->kas)
+       /*
+        * Completions of long-running commands should not be able to
+        * defer sending of periodic keep alives, since the controller
+        * may have completed processing such commands a long time ago
+        * (arbitrarily close to command submission time).
+        * req->deadline - req->timeout is the command submission time
+        * in jiffies.
+        */
+       if (ctrl->kas &&
+           req->deadline - req->timeout >= ctrl->ka_last_check_time)
                ctrl->comp_seen = true;
 
        switch (nvme_decide_disposition(req)) {
@@ -1115,7 +1124,7 @@ u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
 }
 EXPORT_SYMBOL_NS_GPL(nvme_passthru_start, NVME_TARGET_PASSTHRU);
 
-void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
                       struct nvme_command *cmd, int status)
 {
        if (effects & NVME_CMD_EFFECTS_CSE_MASK) {
@@ -1132,6 +1141,8 @@ void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
                nvme_queue_scan(ctrl);
                flush_work(&ctrl->scan_work);
        }
+       if (ns)
+               return;
 
        switch (cmd->common.opcode) {
        case nvme_admin_set_features:
@@ -1161,9 +1172,25 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
  *   The host should send Keep Alive commands at half of the Keep Alive Timeout
  *   accounting for transport roundtrip times [..].
  */
+static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
+{
+       unsigned long delay = ctrl->kato * HZ / 2;
+
+       /*
+        * When using Traffic Based Keep Alive, we need to run
+        * nvme_keep_alive_work at twice the normal frequency, as one
+        * command completion can postpone sending a keep alive command
+        * by up to twice the delay between runs.
+        */
+       if (ctrl->ctratt & NVME_CTRL_ATTR_TBKAS)
+               delay /= 2;
+       return delay;
+}
+
 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
 {
-       queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ / 2);
+       queue_delayed_work(nvme_wq, &ctrl->ka_work,
+                          nvme_keep_alive_work_period(ctrl));
 }
 
 static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
@@ -1172,6 +1199,20 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
        struct nvme_ctrl *ctrl = rq->end_io_data;
        unsigned long flags;
        bool startka = false;
+       unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
+       unsigned long delay = nvme_keep_alive_work_period(ctrl);
+
+       /*
+        * Subtract off the keepalive RTT so nvme_keep_alive_work runs
+        * at the desired frequency.
+        */
+       if (rtt <= delay) {
+               delay -= rtt;
+       } else {
+               dev_warn(ctrl->device, "long keepalive RTT (%u ms)\n",
+                        jiffies_to_msecs(rtt));
+               delay = 0;
+       }
 
        blk_mq_free_request(rq);
 
@@ -1182,6 +1223,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
                return RQ_END_IO_NONE;
        }
 
+       ctrl->ka_last_check_time = jiffies;
        ctrl->comp_seen = false;
        spin_lock_irqsave(&ctrl->lock, flags);
        if (ctrl->state == NVME_CTRL_LIVE ||
@@ -1189,7 +1231,7 @@ static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
                startka = true;
        spin_unlock_irqrestore(&ctrl->lock, flags);
        if (startka)
-               nvme_queue_keep_alive_work(ctrl);
+               queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
        return RQ_END_IO_NONE;
 }
 
@@ -1200,6 +1242,8 @@ static void nvme_keep_alive_work(struct work_struct *work)
        bool comp_seen = ctrl->comp_seen;
        struct request *rq;
 
+       ctrl->ka_last_check_time = jiffies;
+
        if ((ctrl->ctratt & NVME_CTRL_ATTR_TBKAS) && comp_seen) {
                dev_dbg(ctrl->device,
                        "reschedule traffic based keep-alive timer\n");
@@ -3585,6 +3629,9 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
 {
        struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
 
+       if (!test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags))
+               return -EBUSY;
+
        if (device_remove_file_self(dev, attr))
                nvme_delete_ctrl_sync(ctrl);
        return count;
@@ -5045,7 +5092,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
         * that were missed. We identify persistent discovery controllers by
         * checking that they started once before, hence are reconnecting back.
         */
-       if (test_and_set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
+       if (test_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags) &&
            nvme_discovery_ctrl(ctrl))
                nvme_change_uevent(ctrl, "NVME_EVENT=rediscover");
 
@@ -5056,6 +5103,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
        }
 
        nvme_change_uevent(ctrl, "NVME_EVENT=connected");
+       set_bit(NVME_CTRL_STARTED_ONCE, &ctrl->flags);
 }
 EXPORT_SYMBOL_GPL(nvme_start_ctrl);
 
index 9e6e56c..316f3e4 100644 (file)
@@ -163,7 +163,9 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
        case hwmon_temp_max:
        case hwmon_temp_min:
                if ((!channel && data->ctrl->wctemp) ||
-                   (channel && data->log->temp_sensor[channel - 1])) {
+                   (channel && data->log->temp_sensor[channel - 1] &&
+                    !(data->ctrl->quirks &
+                      NVME_QUIRK_NO_SECONDARY_TEMP_THRESH))) {
                        if (data->ctrl->quirks &
                            NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
                                return 0444;
index 81c5c9e..f15e733 100644 (file)
@@ -254,7 +254,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
        blk_mq_free_request(req);
 
        if (effects)
-               nvme_passthru_end(ctrl, effects, cmd, ret);
+               nvme_passthru_end(ctrl, ns, effects, cmd, ret);
 
        return ret;
 }
index 9171452..2bc159a 100644 (file)
@@ -884,7 +884,6 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
 {
        if (!head->disk)
                return;
-       blk_mark_disk_dead(head->disk);
        /* make sure all pending bios are cleaned up */
        kblockd_schedule_work(&head->requeue_work);
        flush_work(&head->requeue_work);
index bf46f12..8657811 100644 (file)
@@ -149,6 +149,11 @@ enum nvme_quirks {
         * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
         */
        NVME_QUIRK_BOGUS_NID                    = (1 << 18),
+
+       /*
+        * No temperature thresholds for channels other than 0 (Composite).
+        */
+       NVME_QUIRK_NO_SECONDARY_TEMP_THRESH     = (1 << 19),
 };
 
 /*
@@ -323,6 +328,7 @@ struct nvme_ctrl {
        struct delayed_work ka_work;
        struct delayed_work failfast_work;
        struct nvme_command ka_cmd;
+       unsigned long ka_last_check_time;
        struct work_struct fw_act_work;
        unsigned long events;
 
@@ -1072,7 +1078,7 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
                         u8 opcode);
 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
 int nvme_execute_rq(struct request *rq, bool at_head);
-void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects,
+void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
                       struct nvme_command *cmd, int status);
 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
index 7f25c0f..492f319 100644 (file)
@@ -2956,7 +2956,7 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev,
         * over a single page.
         */
        dev->ctrl.max_hw_sectors = min_t(u32,
-               NVME_MAX_KB_SZ << 1, dma_max_mapping_size(&pdev->dev) >> 9);
+               NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9);
        dev->ctrl.max_segments = NVME_MAX_SEGS;
 
        /*
@@ -3402,6 +3402,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
        { PCI_DEVICE(0x2646, 0x2263),   /* KINGSTON A2000 NVMe SSD  */
                .driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
+       { PCI_DEVICE(0x2646, 0x5013),   /* Kingston KC3000, Kingston FURY Renegade */
+               .driver_data = NVME_QUIRK_NO_SECONDARY_TEMP_THRESH, },
        { PCI_DEVICE(0x2646, 0x5018),   /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */
                .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, },
        { PCI_DEVICE(0x2646, 0x5016),   /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */
@@ -3422,6 +3424,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1e4B, 0x1202),   /* MAXIO MAP1202 */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x1e4B, 0x1602),   /* MAXIO MAP1602 */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1cc1, 0x5350),   /* ADATA XPG GAMMIX S50 */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(0x1dbe, 0x5236),   /* ADATA XPG GAMMIX S70 */
@@ -3441,6 +3445,10 @@ static const struct pci_device_id nvme_id_table[] = {
                                NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
                .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x1e4b, 0x1602), /* HS-SSD-FUTURE 2048G  */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
+       { PCI_DEVICE(0x10ec, 0x5765), /* TEAMGROUP MP33 2TB SSD */
+               .driver_data = NVME_QUIRK_BOGUS_NID, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
                .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
        { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
index 511c980..71a9c1c 100644 (file)
@@ -243,7 +243,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w)
        blk_mq_free_request(rq);
 
        if (effects)
-               nvme_passthru_end(ctrl, effects, req->cmd, status);
+               nvme_passthru_end(ctrl, ns, effects, req->cmd, status);
 }
 
 static enum rq_end_io_ret nvmet_passthru_req_done(struct request *rq,
index f4e2a88..c525867 100644 (file)
@@ -6003,8 +6003,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c1, aspm_l1_acceptable_latency
 
 #ifdef CONFIG_PCIE_DPC
 /*
- * Intel Tiger Lake and Alder Lake BIOS has a bug that clears the DPC
- * RP PIO Log Size of the integrated Thunderbolt PCIe Root Ports.
+ * Intel Ice Lake, Tiger Lake and Alder Lake BIOS has a bug that clears
+ * the DPC RP PIO Log Size of the integrated Thunderbolt PCIe Root
+ * Ports.
  */
 static void dpc_log_size(struct pci_dev *dev)
 {
@@ -6027,6 +6028,10 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x462f, dpc_log_size);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x463f, dpc_log_size);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x466e, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a1d, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a1f, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a21, dpc_log_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a23, dpc_log_size);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a23, dpc_log_size);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a25, dpc_log_size);
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a27, dpc_log_size);
index c14089f..cabdddb 100644 (file)
@@ -70,7 +70,7 @@ static int phy_g12a_mipi_dphy_analog_power_on(struct phy *phy)
                     HHI_MIPI_CNTL1_BANDGAP);
 
        regmap_write(priv->regmap, HHI_MIPI_CNTL2,
-                    FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x459) |
+                    FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x45a) |
                     FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL1, 0x2680));
 
        reg = DSI_LANE_CLK;
index caa9537..8aa7251 100644 (file)
@@ -237,11 +237,11 @@ static int mtk_hdmi_pll_calc(struct mtk_hdmi_phy *hdmi_phy, struct clk_hw *hw,
         */
        if (tmds_clk < 54 * MEGA)
                txposdiv = 8;
-       else if (tmds_clk >= 54 * MEGA && tmds_clk < 148.35 * MEGA)
+       else if (tmds_clk >= 54 * MEGA && (tmds_clk * 100) < 14835 * MEGA)
                txposdiv = 4;
-       else if (tmds_clk >= 148.35 * MEGA && tmds_clk < 296.7 * MEGA)
+       else if ((tmds_clk * 100) >= 14835 * MEGA && (tmds_clk * 10) < 2967 * MEGA)
                txposdiv = 2;
-       else if (tmds_clk >= 296.7 * MEGA && tmds_clk <= 594 * MEGA)
+       else if ((tmds_clk * 10) >= 2967 * MEGA && tmds_clk <= 594 * MEGA)
                txposdiv = 1;
        else
                return -EINVAL;
@@ -324,12 +324,12 @@ static int mtk_hdmi_pll_drv_setting(struct clk_hw *hw)
                clk_channel_bias = 0x34; /* 20mA */
                impedance_en = 0xf;
                impedance = 0x36; /* 100ohm */
-       } else if (pixel_clk >= 74.175 * MEGA && pixel_clk <= 300 * MEGA) {
+       } else if (((u64)pixel_clk * 1000) >= 74175 * MEGA && pixel_clk <= 300 * MEGA) {
                data_channel_bias = 0x34; /* 20mA */
                clk_channel_bias = 0x2c; /* 16mA */
                impedance_en = 0xf;
                impedance = 0x36; /* 100ohm */
-       } else if (pixel_clk >= 27 * MEGA && pixel_clk < 74.175 * MEGA) {
+       } else if (pixel_clk >= 27 * MEGA && ((u64)pixel_clk * 1000) < 74175 * MEGA) {
                data_channel_bias = 0x14; /* 10mA */
                clk_channel_bias = 0x14; /* 10mA */
                impedance_en = 0x0;
index 6850e04..87b17e5 100644 (file)
@@ -2472,7 +2472,7 @@ static int qmp_combo_com_init(struct qmp_combo *qmp)
        ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
        if (ret) {
                dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
-               goto err_unlock;
+               goto err_decrement_count;
        }
 
        ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
@@ -2522,7 +2522,8 @@ err_assert_reset:
        reset_control_bulk_assert(cfg->num_resets, qmp->resets);
 err_disable_regulators:
        regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
-err_unlock:
+err_decrement_count:
+       qmp->init_count--;
        mutex_unlock(&qmp->phy_mutex);
 
        return ret;
index 09824be..0c603bc 100644 (file)
@@ -379,7 +379,7 @@ static int qmp_pcie_msm8996_com_init(struct qmp_phy *qphy)
        ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
        if (ret) {
                dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
-               goto err_unlock;
+               goto err_decrement_count;
        }
 
        ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
@@ -409,7 +409,8 @@ err_assert_reset:
        reset_control_bulk_assert(cfg->num_resets, qmp->resets);
 err_disable_regulators:
        regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
-err_unlock:
+err_decrement_count:
+       qmp->init_count--;
        mutex_unlock(&qmp->phy_mutex);
 
        return ret;
index a590635..6c237f3 100644 (file)
@@ -115,11 +115,11 @@ struct phy_override_seq {
  *
  * @cfg_ahb_clk: AHB2PHY interface clock
  * @ref_clk: phy reference clock
- * @iface_clk: phy interface clock
  * @phy_reset: phy reset control
  * @vregs: regulator supplies bulk data
  * @phy_initialized: if PHY has been initialized correctly
  * @mode: contains the current mode the PHY is in
+ * @update_seq_cfg: tuning parameters for phy init
  */
 struct qcom_snps_hsphy {
        struct phy *phy;
index 7bfecdf..d249a03 100644 (file)
@@ -400,6 +400,7 @@ static struct meson_pmx_group meson_axg_periphs_groups[] = {
        GPIO_GROUP(GPIOA_15),
        GPIO_GROUP(GPIOA_16),
        GPIO_GROUP(GPIOA_17),
+       GPIO_GROUP(GPIOA_18),
        GPIO_GROUP(GPIOA_19),
        GPIO_GROUP(GPIOA_20),
 
index c2c9b0d..be967d7 100644 (file)
@@ -1348,9 +1348,8 @@ static int mlxbf_pmc_map_counters(struct device *dev)
 
        for (i = 0; i < pmc->total_blocks; ++i) {
                if (strstr(pmc->block_name[i], "tile")) {
-                       ret = sscanf(pmc->block_name[i], "tile%d", &tile_num);
-                       if (ret < 0)
-                               return ret;
+                       if (sscanf(pmc->block_name[i], "tile%d", &tile_num) != 1)
+                               return -EINVAL;
 
                        if (tile_num >= pmc->tile_count)
                                continue;
index 535581c..7fc602e 100644 (file)
@@ -825,7 +825,7 @@ static int ssam_cplt_init(struct ssam_cplt *cplt, struct device *dev)
 
        cplt->dev = dev;
 
-       cplt->wq = create_workqueue(SSAM_CPLT_WQ_NAME);
+       cplt->wq = alloc_workqueue(SSAM_CPLT_WQ_NAME, WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
        if (!cplt->wq)
                return -ENOMEM;
 
index 8f52b62..c0a1a58 100644 (file)
@@ -210,6 +210,7 @@ enum ssam_kip_cover_state {
        SSAM_KIP_COVER_STATE_LAPTOP        = 0x03,
        SSAM_KIP_COVER_STATE_FOLDED_CANVAS = 0x04,
        SSAM_KIP_COVER_STATE_FOLDED_BACK   = 0x05,
+       SSAM_KIP_COVER_STATE_BOOK          = 0x06,
 };
 
 static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw,
@@ -231,6 +232,9 @@ static const char *ssam_kip_cover_state_name(struct ssam_tablet_sw *sw,
        case SSAM_KIP_COVER_STATE_FOLDED_BACK:
                return "folded-back";
 
+       case SSAM_KIP_COVER_STATE_BOOK:
+               return "book";
+
        default:
                dev_warn(&sw->sdev->dev, "unknown KIP cover state: %u\n", state->state);
                return "<unknown>";
@@ -244,6 +248,7 @@ static bool ssam_kip_cover_state_is_tablet_mode(struct ssam_tablet_sw *sw,
        case SSAM_KIP_COVER_STATE_DISCONNECTED:
        case SSAM_KIP_COVER_STATE_FOLDED_CANVAS:
        case SSAM_KIP_COVER_STATE_FOLDED_BACK:
+       case SSAM_KIP_COVER_STATE_BOOK:
                return true;
 
        case SSAM_KIP_COVER_STATE_CLOSED:
@@ -335,6 +340,7 @@ enum ssam_pos_state_cover {
        SSAM_POS_COVER_LAPTOP        = 0x03,
        SSAM_POS_COVER_FOLDED_CANVAS = 0x04,
        SSAM_POS_COVER_FOLDED_BACK   = 0x05,
+       SSAM_POS_COVER_BOOK          = 0x06,
 };
 
 enum ssam_pos_state_sls {
@@ -367,6 +373,9 @@ static const char *ssam_pos_state_name_cover(struct ssam_tablet_sw *sw, u32 stat
        case SSAM_POS_COVER_FOLDED_BACK:
                return "folded-back";
 
+       case SSAM_POS_COVER_BOOK:
+               return "book";
+
        default:
                dev_warn(&sw->sdev->dev, "unknown device posture for type-cover: %u\n", state);
                return "<unknown>";
@@ -416,6 +425,7 @@ static bool ssam_pos_state_is_tablet_mode_cover(struct ssam_tablet_sw *sw, u32 s
        case SSAM_POS_COVER_DISCONNECTED:
        case SSAM_POS_COVER_FOLDED_CANVAS:
        case SSAM_POS_COVER_FOLDED_BACK:
+       case SSAM_POS_COVER_BOOK:
                return true;
 
        case SSAM_POS_COVER_CLOSED:
index d5bb775..ee5f124 100644 (file)
@@ -245,24 +245,29 @@ static const struct pci_device_id pmf_pci_ids[] = {
        { }
 };
 
-int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
+static void amd_pmf_set_dram_addr(struct amd_pmf_dev *dev)
 {
        u64 phys_addr;
        u32 hi, low;
 
-       INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
+       phys_addr = virt_to_phys(dev->buf);
+       hi = phys_addr >> 32;
+       low = phys_addr & GENMASK(31, 0);
+
+       amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
+       amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+}
 
+int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
+{
        /* Get Metrics Table Address */
        dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
        if (!dev->buf)
                return -ENOMEM;
 
-       phys_addr = virt_to_phys(dev->buf);
-       hi = phys_addr >> 32;
-       low = phys_addr & GENMASK(31, 0);
+       INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
 
-       amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
-       amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+       amd_pmf_set_dram_addr(dev);
 
        /*
         * Start collecting the metrics data after a small delay
@@ -273,6 +278,18 @@ int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
        return 0;
 }
 
+static int amd_pmf_resume_handler(struct device *dev)
+{
+       struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
+
+       if (pdev->buf)
+               amd_pmf_set_dram_addr(pdev);
+
+       return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, NULL, amd_pmf_resume_handler);
+
 static void amd_pmf_init_features(struct amd_pmf_dev *dev)
 {
        int ret;
@@ -413,6 +430,7 @@ static struct platform_driver amd_pmf_driver = {
                .name = "amd-pmf",
                .acpi_match_table = amd_pmf_acpi_ids,
                .dev_groups = amd_pmf_driver_groups,
+               .pm = pm_sleep_ptr(&amd_pmf_pm),
        },
        .probe = amd_pmf_probe,
        .remove_new = amd_pmf_remove,
index e2c9a68..fdf7da0 100644 (file)
@@ -555,6 +555,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */
        { KE_IGNORE, 0x79, },  /* Charger type dectection notification */
        { KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */
+       { KE_IGNORE, 0x7B, }, /* Charger connect/disconnect notification */
        { KE_KEY, 0x7c, { KEY_MICMUTE } },
        { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
        { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
@@ -584,6 +585,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0xAE, { KEY_FN_F5 } }, /* Fn+F5 fan mode on 2020+ */
        { KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */
        { KE_KEY, 0xB5, { KEY_CALC } },
+       { KE_IGNORE, 0xC0, }, /* External display connect/disconnect notification */
        { KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
        { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
        { KE_IGNORE, 0xC6, },  /* Ambient Light Sensor notification */
index 61dffb4..e6ae826 100644 (file)
@@ -208,7 +208,7 @@ static int scan_chunks_sanity_check(struct device *dev)
                        continue;
                reinit_completion(&ifs_done);
                local_work.dev = dev;
-               INIT_WORK(&local_work.w, copy_hashes_authenticate_chunks);
+               INIT_WORK_ONSTACK(&local_work.w, copy_hashes_authenticate_chunks);
                schedule_work_on(cpu, &local_work.w);
                wait_for_completion(&ifs_done);
                if (ifsd->loading_error) {
index 1086c3d..399f062 100644 (file)
@@ -101,9 +101,11 @@ int skl_int3472_register_clock(struct int3472_discrete_device *int3472,
 
        int3472->clock.ena_gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0],
                                                             "int3472,clk-enable");
-       if (IS_ERR(int3472->clock.ena_gpio))
-               return dev_err_probe(int3472->dev, PTR_ERR(int3472->clock.ena_gpio),
-                                    "getting clk-enable GPIO\n");
+       if (IS_ERR(int3472->clock.ena_gpio)) {
+               ret = PTR_ERR(int3472->clock.ena_gpio);
+               int3472->clock.ena_gpio = NULL;
+               return dev_err_probe(int3472->dev, ret, "getting clk-enable GPIO\n");
+       }
 
        if (polarity == GPIO_ACTIVE_LOW)
                gpiod_toggle_active_low(int3472->clock.ena_gpio);
@@ -199,8 +201,9 @@ int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
        int3472->regulator.gpio = acpi_get_and_request_gpiod(path, agpio->pin_table[0],
                                                             "int3472,regulator");
        if (IS_ERR(int3472->regulator.gpio)) {
-               dev_err(int3472->dev, "Failed to get regulator GPIO line\n");
-               return PTR_ERR(int3472->regulator.gpio);
+               ret = PTR_ERR(int3472->regulator.gpio);
+               int3472->regulator.gpio = NULL;
+               return dev_err_probe(int3472->dev, ret, "getting regulator GPIO\n");
        }
 
        /* Ensure the pin is in output mode and non-active state */
index e0572a2..02fe360 100644 (file)
@@ -304,14 +304,13 @@ struct isst_if_pkg_info {
 static struct isst_if_cpu_info *isst_cpu_info;
 static struct isst_if_pkg_info *isst_pkg_info;
 
-#define ISST_MAX_PCI_DOMAINS   8
-
 static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
 {
        struct pci_dev *matched_pci_dev = NULL;
        struct pci_dev *pci_dev = NULL;
+       struct pci_dev *_pci_dev = NULL;
        int no_matches = 0, pkg_id;
-       int i, bus_number;
+       int bus_number;
 
        if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
            cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
@@ -323,12 +322,11 @@ static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn
        if (bus_number < 0)
                return NULL;
 
-       for (i = 0; i < ISST_MAX_PCI_DOMAINS; ++i) {
-               struct pci_dev *_pci_dev;
+       for_each_pci_dev(_pci_dev) {
                int node;
 
-               _pci_dev = pci_get_domain_bus_and_slot(i, bus_number, PCI_DEVFN(dev, fn));
-               if (!_pci_dev)
+               if (_pci_dev->bus->number != bus_number ||
+                   _pci_dev->devfn != PCI_DEVFN(dev, fn))
                        continue;
 
                ++no_matches;
index 307ee6f..6f83e99 100644 (file)
@@ -624,10 +624,8 @@ static int ab8500_btemp_get_ext_psy_data(struct device *dev, void *data)
  */
 static void ab8500_btemp_external_power_changed(struct power_supply *psy)
 {
-       struct ab8500_btemp *di = power_supply_get_drvdata(psy);
-
-       class_for_each_device(power_supply_class, NULL,
-               di->btemp_psy, ab8500_btemp_get_ext_psy_data);
+       class_for_each_device(power_supply_class, NULL, psy,
+                             ab8500_btemp_get_ext_psy_data);
 }
 
 /* ab8500 btemp driver interrupts and their respective isr */
index 41a7bff..53560fb 100644 (file)
@@ -2407,10 +2407,8 @@ out:
  */
 static void ab8500_fg_external_power_changed(struct power_supply *psy)
 {
-       struct ab8500_fg *di = power_supply_get_drvdata(psy);
-
-       class_for_each_device(power_supply_class, NULL,
-               di->fg_psy, ab8500_fg_get_ext_psy_data);
+       class_for_each_device(power_supply_class, NULL, psy,
+                             ab8500_fg_get_ext_psy_data);
 }
 
 /**
index 05f4131..3be6f3b 100644 (file)
@@ -507,7 +507,7 @@ static void fuel_gauge_external_power_changed(struct power_supply *psy)
        mutex_lock(&info->lock);
        info->valid = 0; /* Force updating of the cached registers */
        mutex_unlock(&info->lock);
-       power_supply_changed(info->bat);
+       power_supply_changed(psy);
 }
 
 static struct power_supply_desc fuel_gauge_desc = {
index de67b98..dc33f00 100644 (file)
@@ -1262,6 +1262,7 @@ static void bq24190_input_current_limit_work(struct work_struct *work)
        bq24190_charger_set_property(bdi->charger,
                                     POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
                                     &val);
+       power_supply_changed(bdi->charger);
 }
 
 /* Sync the input-current-limit with our parent supply (if we have one) */
index 22cde35..f8636cf 100644 (file)
@@ -750,7 +750,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy)
        if (bq->chip_version != BQ25892)
                return;
 
-       ret = power_supply_get_property_from_supplier(bq->charger,
+       ret = power_supply_get_property_from_supplier(psy,
                                                      POWER_SUPPLY_PROP_USB_TYPE,
                                                      &val);
        if (ret)
@@ -775,6 +775,7 @@ static void bq25890_charger_external_power_changed(struct power_supply *psy)
        }
 
        bq25890_field_write(bq, F_IINLIM, input_current_limit);
+       power_supply_changed(psy);
 }
 
 static int bq25890_get_chip_state(struct bq25890_device *bq,
@@ -1106,6 +1107,8 @@ static void bq25890_pump_express_work(struct work_struct *data)
        dev_info(bq->dev, "Hi-voltage charging requested, input voltage is %d mV\n",
                 voltage);
 
+       power_supply_changed(bq->charger);
+
        return;
 error_print:
        bq25890_field_write(bq, F_PUMPX_EN, 0);
index 5ff6f44..4296600 100644 (file)
@@ -1083,10 +1083,8 @@ static int poll_interval_param_set(const char *val, const struct kernel_param *k
                return ret;
 
        mutex_lock(&bq27xxx_list_lock);
-       list_for_each_entry(di, &bq27xxx_battery_devices, list) {
-               cancel_delayed_work_sync(&di->work);
-               schedule_delayed_work(&di->work, 0);
-       }
+       list_for_each_entry(di, &bq27xxx_battery_devices, list)
+               mod_delayed_work(system_wq, &di->work, 0);
        mutex_unlock(&bq27xxx_list_lock);
 
        return ret;
@@ -1761,60 +1759,6 @@ static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di)
        return POWER_SUPPLY_HEALTH_GOOD;
 }
 
-void bq27xxx_battery_update(struct bq27xxx_device_info *di)
-{
-       struct bq27xxx_reg_cache cache = {0, };
-       bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
-
-       cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
-       if ((cache.flags & 0xff) == 0xff)
-               cache.flags = -1; /* read error */
-       if (cache.flags >= 0) {
-               cache.temperature = bq27xxx_battery_read_temperature(di);
-               if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
-                       cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
-               if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
-                       cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
-               if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
-                       cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
-
-               cache.charge_full = bq27xxx_battery_read_fcc(di);
-               cache.capacity = bq27xxx_battery_read_soc(di);
-               if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
-                       cache.energy = bq27xxx_battery_read_energy(di);
-               di->cache.flags = cache.flags;
-               cache.health = bq27xxx_battery_read_health(di);
-               if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
-                       cache.cycle_count = bq27xxx_battery_read_cyct(di);
-
-               /* We only have to read charge design full once */
-               if (di->charge_design_full <= 0)
-                       di->charge_design_full = bq27xxx_battery_read_dcap(di);
-       }
-
-       if ((di->cache.capacity != cache.capacity) ||
-           (di->cache.flags != cache.flags))
-               power_supply_changed(di->bat);
-
-       if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
-               di->cache = cache;
-
-       di->last_update = jiffies;
-}
-EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
-
-static void bq27xxx_battery_poll(struct work_struct *work)
-{
-       struct bq27xxx_device_info *di =
-                       container_of(work, struct bq27xxx_device_info,
-                                    work.work);
-
-       bq27xxx_battery_update(di);
-
-       if (poll_interval > 0)
-               schedule_delayed_work(&di->work, poll_interval * HZ);
-}
-
 static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags)
 {
        if (di->opts & BQ27XXX_O_ZERO)
@@ -1833,7 +1777,8 @@ static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags)
 static int bq27xxx_battery_current_and_status(
        struct bq27xxx_device_info *di,
        union power_supply_propval *val_curr,
-       union power_supply_propval *val_status)
+       union power_supply_propval *val_status,
+       struct bq27xxx_reg_cache *cache)
 {
        bool single_flags = (di->opts & BQ27XXX_O_ZERO);
        int curr;
@@ -1845,10 +1790,14 @@ static int bq27xxx_battery_current_and_status(
                return curr;
        }
 
-       flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags);
-       if (flags < 0) {
-               dev_err(di->dev, "error reading flags\n");
-               return flags;
+       if (cache) {
+               flags = cache->flags;
+       } else {
+               flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, single_flags);
+               if (flags < 0) {
+                       dev_err(di->dev, "error reading flags\n");
+                       return flags;
+               }
        }
 
        if (di->opts & BQ27XXX_O_ZERO) {
@@ -1883,6 +1832,78 @@ static int bq27xxx_battery_current_and_status(
        return 0;
 }
 
+static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di)
+{
+       union power_supply_propval status = di->last_status;
+       struct bq27xxx_reg_cache cache = {0, };
+       bool has_singe_flag = di->opts & BQ27XXX_O_ZERO;
+
+       cache.flags = bq27xxx_read(di, BQ27XXX_REG_FLAGS, has_singe_flag);
+       if ((cache.flags & 0xff) == 0xff)
+               cache.flags = -1; /* read error */
+       if (cache.flags >= 0) {
+               cache.temperature = bq27xxx_battery_read_temperature(di);
+               if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR)
+                       cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE);
+               if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR)
+                       cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP);
+               if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR)
+                       cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF);
+
+               cache.charge_full = bq27xxx_battery_read_fcc(di);
+               cache.capacity = bq27xxx_battery_read_soc(di);
+               if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR)
+                       cache.energy = bq27xxx_battery_read_energy(di);
+               di->cache.flags = cache.flags;
+               cache.health = bq27xxx_battery_read_health(di);
+               if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR)
+                       cache.cycle_count = bq27xxx_battery_read_cyct(di);
+
+               /*
+                * On gauges with signed current reporting the current must be
+                * checked to detect charging <-> discharging status changes.
+                */
+               if (!(di->opts & BQ27XXX_O_ZERO))
+                       bq27xxx_battery_current_and_status(di, NULL, &status, &cache);
+
+               /* We only have to read charge design full once */
+               if (di->charge_design_full <= 0)
+                       di->charge_design_full = bq27xxx_battery_read_dcap(di);
+       }
+
+       if ((di->cache.capacity != cache.capacity) ||
+           (di->cache.flags != cache.flags) ||
+           (di->last_status.intval != status.intval)) {
+               di->last_status.intval = status.intval;
+               power_supply_changed(di->bat);
+       }
+
+       if (memcmp(&di->cache, &cache, sizeof(cache)) != 0)
+               di->cache = cache;
+
+       di->last_update = jiffies;
+
+       if (!di->removed && poll_interval > 0)
+               mod_delayed_work(system_wq, &di->work, poll_interval * HZ);
+}
+
+void bq27xxx_battery_update(struct bq27xxx_device_info *di)
+{
+       mutex_lock(&di->lock);
+       bq27xxx_battery_update_unlocked(di);
+       mutex_unlock(&di->lock);
+}
+EXPORT_SYMBOL_GPL(bq27xxx_battery_update);
+
+static void bq27xxx_battery_poll(struct work_struct *work)
+{
+       struct bq27xxx_device_info *di =
+                       container_of(work, struct bq27xxx_device_info,
+                                    work.work);
+
+       bq27xxx_battery_update(di);
+}
+
 /*
  * Get the average power in ÂµW
  * Return < 0 if something fails.
@@ -1985,10 +2006,8 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
        struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
 
        mutex_lock(&di->lock);
-       if (time_is_before_jiffies(di->last_update + 5 * HZ)) {
-               cancel_delayed_work_sync(&di->work);
-               bq27xxx_battery_poll(&di->work.work);
-       }
+       if (time_is_before_jiffies(di->last_update + 5 * HZ))
+               bq27xxx_battery_update_unlocked(di);
        mutex_unlock(&di->lock);
 
        if (psp != POWER_SUPPLY_PROP_PRESENT && di->cache.flags < 0)
@@ -1996,7 +2015,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
 
        switch (psp) {
        case POWER_SUPPLY_PROP_STATUS:
-               ret = bq27xxx_battery_current_and_status(di, NULL, val);
+               ret = bq27xxx_battery_current_and_status(di, NULL, val, NULL);
                break;
        case POWER_SUPPLY_PROP_VOLTAGE_NOW:
                ret = bq27xxx_battery_voltage(di, val);
@@ -2005,7 +2024,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy,
                val->intval = di->cache.flags < 0 ? 0 : 1;
                break;
        case POWER_SUPPLY_PROP_CURRENT_NOW:
-               ret = bq27xxx_battery_current_and_status(di, val, NULL);
+               ret = bq27xxx_battery_current_and_status(di, val, NULL, NULL);
                break;
        case POWER_SUPPLY_PROP_CAPACITY:
                ret = bq27xxx_simple_value(di->cache.capacity, val);
@@ -2078,8 +2097,8 @@ static void bq27xxx_external_power_changed(struct power_supply *psy)
 {
        struct bq27xxx_device_info *di = power_supply_get_drvdata(psy);
 
-       cancel_delayed_work_sync(&di->work);
-       schedule_delayed_work(&di->work, 0);
+       /* After charger plug in/out wait 0.5s for things to stabilize */
+       mod_delayed_work(system_wq, &di->work, HZ / 2);
 }
 
 int bq27xxx_battery_setup(struct bq27xxx_device_info *di)
@@ -2127,22 +2146,18 @@ EXPORT_SYMBOL_GPL(bq27xxx_battery_setup);
 
 void bq27xxx_battery_teardown(struct bq27xxx_device_info *di)
 {
-       /*
-        * power_supply_unregister call bq27xxx_battery_get_property which
-        * call bq27xxx_battery_poll.
-        * Make sure that bq27xxx_battery_poll will not call
-        * schedule_delayed_work again after unregister (which cause OOPS).
-        */
-       poll_interval = 0;
-
-       cancel_delayed_work_sync(&di->work);
-
-       power_supply_unregister(di->bat);
-
        mutex_lock(&bq27xxx_list_lock);
        list_del(&di->list);
        mutex_unlock(&bq27xxx_list_lock);
 
+       /* Set removed to avoid bq27xxx_battery_update() re-queuing the work */
+       mutex_lock(&di->lock);
+       di->removed = true;
+       mutex_unlock(&di->lock);
+
+       cancel_delayed_work_sync(&di->work);
+
+       power_supply_unregister(di->bat);
        mutex_destroy(&di->lock);
 }
 EXPORT_SYMBOL_GPL(bq27xxx_battery_teardown);
index f876899..6d3c748 100644 (file)
@@ -179,7 +179,7 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client)
        i2c_set_clientdata(client, di);
 
        if (client->irq) {
-               ret = devm_request_threaded_irq(&client->dev, client->irq,
+               ret = request_threaded_irq(client->irq,
                                NULL, bq27xxx_battery_irq_handler_thread,
                                IRQF_ONESHOT,
                                di->name, di);
@@ -209,6 +209,7 @@ static void bq27xxx_battery_i2c_remove(struct i2c_client *client)
 {
        struct bq27xxx_device_info *di = i2c_get_clientdata(client);
 
+       free_irq(client->irq, di);
        bq27xxx_battery_teardown(di);
 
        mutex_lock(&battery_mutex);
index 92e48e3..1305cba 100644 (file)
@@ -796,7 +796,9 @@ static int mt6360_charger_probe(struct platform_device *pdev)
        mci->vinovp = 6500000;
        mutex_init(&mci->chgdet_lock);
        platform_set_drvdata(pdev, mci);
-       devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work);
+       ret = devm_work_autocancel(&pdev->dev, &mci->chrdet_work, mt6360_chrdet_work);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "Failed to set delayed work\n");
 
        ret = device_property_read_u32(&pdev->dev, "richtek,vinovp-microvolt", &mci->vinovp);
        if (ret)
index ab986db..3791aec 100644 (file)
@@ -348,6 +348,10 @@ static int __power_supply_is_system_supplied(struct device *dev, void *data)
        struct power_supply *psy = dev_get_drvdata(dev);
        unsigned int *count = data;
 
+       if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_SCOPE, &ret))
+               if (ret.intval == POWER_SUPPLY_SCOPE_DEVICE)
+                       return 0;
+
        (*count)++;
        if (psy->desc->type != POWER_SUPPLY_TYPE_BATTERY)
                if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_ONLINE,
@@ -366,8 +370,8 @@ int power_supply_is_system_supplied(void)
                                      __power_supply_is_system_supplied);
 
        /*
-        * If no power class device was found at all, most probably we are
-        * running on a desktop system, so assume we are on mains power.
+        * If no system scope power class device was found at all, most probably we
+        * are running on a desktop system, so assume we are on mains power.
         */
        if (count == 0)
                return 1;
@@ -573,7 +577,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
        struct power_supply_battery_info *info;
        struct device_node *battery_np = NULL;
        struct fwnode_reference_args args;
-       struct fwnode_handle *fwnode;
+       struct fwnode_handle *fwnode = NULL;
        const char *value;
        int err, len, index;
        const __be32 *list;
@@ -585,7 +589,7 @@ int power_supply_get_battery_info(struct power_supply *psy,
                        return -ENODEV;
 
                fwnode = fwnode_handle_get(of_fwnode_handle(battery_np));
-       } else {
+       } else if (psy->dev.parent) {
                err = fwnode_property_get_reference_args(
                                        dev_fwnode(psy->dev.parent),
                                        "monitored-battery", NULL, 0, 0, &args);
@@ -595,6 +599,9 @@ int power_supply_get_battery_info(struct power_supply *psy,
                fwnode = args.fwnode;
        }
 
+       if (!fwnode)
+               return -ENOENT;
+
        err = fwnode_property_read_string(fwnode, "compatible", &value);
        if (err)
                goto out_put_node;
index 702bf83..0674483 100644 (file)
@@ -35,8 +35,9 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
                led_trigger_event(psy->charging_full_trig, LED_FULL);
                led_trigger_event(psy->charging_trig, LED_OFF);
                led_trigger_event(psy->full_trig, LED_FULL);
-               led_trigger_event(psy->charging_blink_full_solid_trig,
-                       LED_FULL);
+               /* Going from blink to LED on requires a LED_OFF event to stop blink */
+               led_trigger_event(psy->charging_blink_full_solid_trig, LED_OFF);
+               led_trigger_event(psy->charging_blink_full_solid_trig, LED_FULL);
                break;
        case POWER_SUPPLY_STATUS_CHARGING:
                led_trigger_event(psy->charging_full_trig, LED_FULL);
index ba3b125..06e5b6b 100644 (file)
@@ -286,7 +286,8 @@ static ssize_t power_supply_show_property(struct device *dev,
 
                if (ret < 0) {
                        if (ret == -ENODATA)
-                               dev_dbg(dev, "driver has no data for `%s' property\n",
+                               dev_dbg_ratelimited(dev,
+                                       "driver has no data for `%s' property\n",
                                        attr->attr.name);
                        else if (ret != -ENODEV && ret != -EAGAIN)
                                dev_err_ratelimited(dev,
index 73f744a..ea33693 100644 (file)
@@ -1023,7 +1023,7 @@ static int rt9467_request_interrupt(struct rt9467_chg_data *data)
        for (i = 0; i < num_chg_irqs; i++) {
                virq = regmap_irq_get_virq(data->irq_chip_data, chg_irqs[i].hwirq);
                if (virq <= 0)
-                       return dev_err_probe(dev, virq, "Failed to get (%s) irq\n",
+                       return dev_err_probe(dev, -EINVAL, "Failed to get (%s) irq\n",
                                             chg_irqs[i].name);
 
                ret = devm_request_threaded_irq(dev, virq, NULL, chg_irqs[i].handler,
index 75ebcbf..a14e89a 100644 (file)
@@ -24,7 +24,7 @@
 #define SBS_CHARGER_REG_STATUS                 0x13
 #define SBS_CHARGER_REG_ALARM_WARNING          0x16
 
-#define SBS_CHARGER_STATUS_CHARGE_INHIBITED    BIT(1)
+#define SBS_CHARGER_STATUS_CHARGE_INHIBITED    BIT(0)
 #define SBS_CHARGER_STATUS_RES_COLD            BIT(9)
 #define SBS_CHARGER_STATUS_RES_HOT             BIT(10)
 #define SBS_CHARGER_STATUS_BATTERY_PRESENT     BIT(14)
index 632977f..bd23c4d 100644 (file)
@@ -733,13 +733,6 @@ static int sc27xx_fgu_set_property(struct power_supply *psy,
        return ret;
 }
 
-static void sc27xx_fgu_external_power_changed(struct power_supply *psy)
-{
-       struct sc27xx_fgu_data *data = power_supply_get_drvdata(psy);
-
-       power_supply_changed(data->battery);
-}
-
 static int sc27xx_fgu_property_is_writeable(struct power_supply *psy,
                                            enum power_supply_property psp)
 {
@@ -774,7 +767,7 @@ static const struct power_supply_desc sc27xx_fgu_desc = {
        .num_properties         = ARRAY_SIZE(sc27xx_fgu_props),
        .get_property           = sc27xx_fgu_get_property,
        .set_property           = sc27xx_fgu_set_property,
-       .external_power_changed = sc27xx_fgu_external_power_changed,
+       .external_power_changed = power_supply_changed,
        .property_is_writeable  = sc27xx_fgu_property_is_writeable,
        .no_thermal             = true,
 };
index dc741ac..698ab7f 100644 (file)
@@ -5256,7 +5256,7 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
        }
 
        rdev->debugfs = debugfs_create_dir(rname, debugfs_root);
-       if (!rdev->debugfs) {
+       if (IS_ERR(rdev->debugfs)) {
                rdev_warn(rdev, "Failed to create debugfs directory\n");
                return;
        }
@@ -6178,7 +6178,7 @@ static int __init regulator_init(void)
        ret = class_register(&regulator_class);
 
        debugfs_root = debugfs_create_dir("regulator", NULL);
-       if (!debugfs_root)
+       if (IS_ERR(debugfs_root))
                pr_warn("regulator: Failed to create debugfs directory\n");
 
 #ifdef CONFIG_DEBUG_FS
index 1849566..3eb86ec 100644 (file)
@@ -951,9 +951,12 @@ static int mt6359_regulator_probe(struct platform_device *pdev)
        struct regulator_config config = {};
        struct regulator_dev *rdev;
        struct mt6359_regulator_info *mt6359_info;
-       int i, hw_ver;
+       int i, hw_ver, ret;
+
+       ret = regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver);
+       if (ret)
+               return ret;
 
-       regmap_read(mt6397->regmap, MT6359P_HWCID, &hw_ver);
        if (hw_ver >= MT6359P_CHIP_VER)
                mt6359_info = mt6359p_regulators;
        else
index 87a746d..e75dd92 100644 (file)
@@ -264,7 +264,7 @@ static const struct pca9450_regulator_desc pca9450a_regulators[] = {
                        .vsel_reg = PCA9450_REG_BUCK2OUT_DVS0,
                        .vsel_mask = BUCK2OUT_DVS0_MASK,
                        .enable_reg = PCA9450_REG_BUCK2CTRL,
-                       .enable_mask = BUCK1_ENMODE_MASK,
+                       .enable_mask = BUCK2_ENMODE_MASK,
                        .ramp_reg = PCA9450_REG_BUCK2CTRL,
                        .ramp_mask = BUCK2_RAMP_MASK,
                        .ramp_delay_table = pca9450_dvs_buck_ramp_table,
@@ -502,7 +502,7 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = {
                        .vsel_reg = PCA9450_REG_BUCK2OUT_DVS0,
                        .vsel_mask = BUCK2OUT_DVS0_MASK,
                        .enable_reg = PCA9450_REG_BUCK2CTRL,
-                       .enable_mask = BUCK1_ENMODE_MASK,
+                       .enable_mask = BUCK2_ENMODE_MASK,
                        .ramp_reg = PCA9450_REG_BUCK2CTRL,
                        .ramp_mask = BUCK2_RAMP_MASK,
                        .ramp_delay_table = pca9450_dvs_buck_ramp_table,
index ade1369..113c509 100644 (file)
@@ -127,6 +127,8 @@ static int prepare_itcw(struct itcw *, unsigned int, unsigned int, int,
                        struct dasd_device *, struct dasd_device *,
                        unsigned int, int, unsigned int, unsigned int,
                        unsigned int, unsigned int);
+static int dasd_eckd_query_pprc_status(struct dasd_device *,
+                                      struct dasd_pprc_data_sc4 *);
 
 /* initial attempt at a probe function. this can be simplified once
  * the other detection code is gone */
@@ -3733,6 +3735,26 @@ static int count_exts(unsigned int from, unsigned int to, int trks_per_ext)
        return count;
 }
 
+static int dasd_in_copy_relation(struct dasd_device *device)
+{
+       struct dasd_pprc_data_sc4 *temp;
+       int rc;
+
+       if (!dasd_eckd_pprc_enabled(device))
+               return 0;
+
+       temp = kzalloc(sizeof(*temp), GFP_KERNEL);
+       if (!temp)
+               return -ENOMEM;
+
+       rc = dasd_eckd_query_pprc_status(device, temp);
+       if (!rc)
+               rc = temp->dev_info[0].state;
+
+       kfree(temp);
+       return rc;
+}
+
 /*
  * Release allocated space for a given range or an entire volume.
  */
@@ -3749,6 +3771,7 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
        int cur_to_trk, cur_from_trk;
        struct dasd_ccw_req *cqr;
        u32 beg_cyl, end_cyl;
+       int copy_relation;
        struct ccw1 *ccw;
        int trks_per_ext;
        size_t ras_size;
@@ -3760,6 +3783,10 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
        if (dasd_eckd_ras_sanity_checks(device, first_trk, last_trk))
                return ERR_PTR(-EINVAL);
 
+       copy_relation = dasd_in_copy_relation(device);
+       if (copy_relation < 0)
+               return ERR_PTR(copy_relation);
+
        rq = req ? blk_mq_rq_to_pdu(req) : NULL;
 
        features = &private->features;
@@ -3788,9 +3815,11 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
        /*
         * This bit guarantees initialisation of tracks within an extent that is
         * not fully specified, but is only supported with a certain feature
-        * subset.
+        * subset and for devices not in a copy relation.
         */
-       ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
+       if (features->feature[56] & 0x01 && !copy_relation)
+               ras_data->op_flags.guarantee_init = 1;
+
        ras_data->lss = private->conf.ned->ID;
        ras_data->dev_addr = private->conf.ned->unit_addr;
        ras_data->nr_exts = nr_exts;
index df5e5b7..84aa357 100644 (file)
@@ -3796,6 +3796,7 @@ struct qla_qpair {
        uint64_t retry_term_jiff;
        struct qla_tgt_counters tgt_counters;
        uint16_t cpuid;
+       bool cpu_mapped;
        struct qla_fw_resources fwres ____cacheline_aligned;
        struct  qla_buf_pool buf_pool;
        u32     cmd_cnt;
index ec0423e..1a955c3 100644 (file)
@@ -9426,6 +9426,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos,
                qpair->rsp->req = qpair->req;
                qpair->rsp->qpair = qpair;
 
+               if (!qpair->cpu_mapped)
+                       qla_cpu_update(qpair, raw_smp_processor_id());
+
                if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
                        if (ha->fw_attributes & BIT_4)
                                qpair->difdix_supported = 1;
index cce6e42..7b42558 100644 (file)
@@ -539,11 +539,14 @@ qla_mapq_init_qp_cpu_map(struct qla_hw_data *ha,
        if (!ha->qp_cpu_map)
                return;
        mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0);
+       if (!mask)
+               return;
        qpair->cpuid = cpumask_first(mask);
        for_each_cpu(cpu, mask) {
                ha->qp_cpu_map[cpu] = qpair;
        }
        msix->cpuid = qpair->cpuid;
+       qpair->cpu_mapped = true;
 }
 
 static inline void
index 71feda2..245e3a5 100644 (file)
@@ -3770,6 +3770,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
 
        if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
                rsp->qpair->rcv_intr = 1;
+
+               if (!rsp->qpair->cpu_mapped)
+                       qla_cpu_update(rsp->qpair, raw_smp_processor_id());
        }
 
 #define __update_rsp_in(_is_shadow_hba, _rsp, _rsp_in)                 \
index b7c569a..0226c92 100644 (file)
@@ -1463,6 +1463,8 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
        struct Scsi_Host *host = cmd->device->host;
        int rtn = 0;
 
+       atomic_inc(&cmd->device->iorequest_cnt);
+
        /* check if the device is still usable */
        if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
                /* in SDEV_DEL we error all commands. DID_NO_CONNECT
@@ -1483,6 +1485,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
                 */
                SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
                        "queuecommand : device blocked\n"));
+               atomic_dec(&cmd->device->iorequest_cnt);
                return SCSI_MLQUEUE_DEVICE_BUSY;
        }
 
@@ -1515,6 +1518,7 @@ static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
        trace_scsi_dispatch_cmd_start(cmd);
        rtn = host->hostt->queuecommand(host, cmd);
        if (rtn) {
+               atomic_dec(&cmd->device->iorequest_cnt);
                trace_scsi_dispatch_cmd_error(cmd, rtn);
                if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
                    rtn != SCSI_MLQUEUE_TARGET_BUSY)
@@ -1761,7 +1765,6 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
                goto out_dec_host_busy;
        }
 
-       atomic_inc(&cmd->device->iorequest_cnt);
        return BLK_STS_OK;
 
 out_dec_host_busy:
index 5b230e1..8ffb75b 100644 (file)
@@ -109,7 +109,9 @@ enum {
        TASK_ATTRIBUTE_HEADOFQUEUE              = 0x1,
        TASK_ATTRIBUTE_ORDERED                  = 0x2,
        TASK_ATTRIBUTE_ACA                      = 0x4,
+};
 
+enum {
        SS_STS_NORMAL                           = 0x80000000,
        SS_STS_DONE                             = 0x40000000,
        SS_STS_HANDSHAKE                        = 0x20000000,
@@ -121,7 +123,9 @@ enum {
        SS_I2H_REQUEST_RESET                    = 0x2000,
 
        SS_MU_OPERATIONAL                       = 0x80000000,
+};
 
+enum {
        STEX_CDB_LENGTH                         = 16,
        STATUS_VAR_LEN                          = 128,
 
index d9ce379..e6bc622 100644 (file)
@@ -1780,7 +1780,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
 
        length = scsi_bufflen(scmnd);
        payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
-       payload_sz = sizeof(cmd_request->mpb);
+       payload_sz = 0;
 
        if (scsi_sg_count(scmnd)) {
                unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset);
@@ -1789,10 +1789,10 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
                unsigned long hvpfn, hvpfns_to_add;
                int j, i = 0, sg_count;
 
-               if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
+               payload_sz = (hvpg_count * sizeof(u64) +
+                             sizeof(struct vmbus_packet_mpb_array));
 
-                       payload_sz = (hvpg_count * sizeof(u64) +
-                                     sizeof(struct vmbus_packet_mpb_array));
+               if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
                        payload = kzalloc(payload_sz, GFP_ATOMIC);
                        if (!payload)
                                return SCSI_MLQUEUE_DEVICE_BUSY;
index 7268c2f..e0d0966 100644 (file)
@@ -36,7 +36,7 @@ config UCC
 config CPM_TSA
        tristate "CPM TSA support"
        depends on OF && HAS_IOMEM
-       depends on CPM1 || COMPILE_TEST
+       depends on CPM1 || (CPM && COMPILE_TEST)
        help
          Freescale CPM Time Slot Assigner (TSA)
          controller.
@@ -47,7 +47,7 @@ config CPM_TSA
 config CPM_QMC
        tristate "CPM QMC support"
        depends on OF && HAS_IOMEM
-       depends on CPM1 || (FSL_SOC && COMPILE_TEST)
+       depends on CPM1 || (FSL_SOC && CPM && COMPILE_TEST)
        depends on CPM_TSA
        help
          Freescale CPM QUICC Multichannel Controller
index ac85d55..26e6633 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
+#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of_irq.h>
 #include <linux/of_address.h>
@@ -301,49 +302,43 @@ static int cdns_spi_setup_transfer(struct spi_device *spi,
 }
 
 /**
- * cdns_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible
+ * cdns_spi_process_fifo - Fills the TX FIFO, and drain the RX FIFO
  * @xspi:      Pointer to the cdns_spi structure
+ * @ntx:       Number of bytes to pack into the TX FIFO
+ * @nrx:       Number of bytes to drain from the RX FIFO
  */
-static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
+static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
 {
-       unsigned long trans_cnt = 0;
+       ntx = clamp(ntx, 0, xspi->tx_bytes);
+       nrx = clamp(nrx, 0, xspi->rx_bytes);
 
-       while ((trans_cnt < xspi->tx_fifo_depth) &&
-              (xspi->tx_bytes > 0)) {
+       xspi->tx_bytes -= ntx;
+       xspi->rx_bytes -= nrx;
 
+       while (ntx || nrx) {
                /* When xspi in busy condition, bytes may send failed,
                 * then spi control did't work thoroughly, add one byte delay
                 */
-               if (cdns_spi_read(xspi, CDNS_SPI_ISR) &
-                   CDNS_SPI_IXR_TXFULL)
+               if (cdns_spi_read(xspi, CDNS_SPI_ISR) & CDNS_SPI_IXR_TXFULL)
                        udelay(10);
 
-               if (xspi->txbuf)
-                       cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
-               else
-                       cdns_spi_write(xspi, CDNS_SPI_TXD, 0);
+               if (ntx) {
+                       if (xspi->txbuf)
+                               cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
+                       else
+                               cdns_spi_write(xspi, CDNS_SPI_TXD, 0);
 
-               xspi->tx_bytes--;
-               trans_cnt++;
-       }
-}
+                       ntx--;
+               }
 
-/**
- * cdns_spi_read_rx_fifo - Reads the RX FIFO with as many bytes as possible
- * @xspi:       Pointer to the cdns_spi structure
- * @count:     Read byte count
- */
-static void cdns_spi_read_rx_fifo(struct cdns_spi *xspi, unsigned long count)
-{
-       u8 data;
-
-       /* Read out the data from the RX FIFO */
-       while (count > 0) {
-               data = cdns_spi_read(xspi, CDNS_SPI_RXD);
-               if (xspi->rxbuf)
-                       *xspi->rxbuf++ = data;
-               xspi->rx_bytes--;
-               count--;
+               if (nrx) {
+                       u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
+
+                       if (xspi->rxbuf)
+                               *xspi->rxbuf++ = data;
+
+                       nrx--;
+               }
        }
 }
 
@@ -381,33 +376,22 @@ static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
                spi_finalize_current_transfer(ctlr);
                status = IRQ_HANDLED;
        } else if (intr_status & CDNS_SPI_IXR_TXOW) {
-               int trans_cnt = cdns_spi_read(xspi, CDNS_SPI_THLD);
+               int threshold = cdns_spi_read(xspi, CDNS_SPI_THLD);
+               int trans_cnt = xspi->rx_bytes - xspi->tx_bytes;
+
+               if (threshold > 1)
+                       trans_cnt -= threshold;
+
                /* Set threshold to one if number of pending are
                 * less than half fifo
                 */
                if (xspi->tx_bytes < xspi->tx_fifo_depth >> 1)
                        cdns_spi_write(xspi, CDNS_SPI_THLD, 1);
 
-               while (trans_cnt) {
-                       cdns_spi_read_rx_fifo(xspi, 1);
-
-                       if (xspi->tx_bytes) {
-                               if (xspi->txbuf)
-                                       cdns_spi_write(xspi, CDNS_SPI_TXD,
-                                                      *xspi->txbuf++);
-                               else
-                                       cdns_spi_write(xspi, CDNS_SPI_TXD, 0);
-                               xspi->tx_bytes--;
-                       }
-                       trans_cnt--;
-               }
-               if (!xspi->tx_bytes) {
-                       /* Fixed delay due to controller limitation with
-                        * RX_NEMPTY incorrect status
-                        * Xilinx AR:65885 contains more details
-                        */
-                       udelay(10);
-                       cdns_spi_read_rx_fifo(xspi, xspi->rx_bytes);
+               if (xspi->tx_bytes) {
+                       cdns_spi_process_fifo(xspi, trans_cnt, trans_cnt);
+               } else {
+                       cdns_spi_process_fifo(xspi, 0, trans_cnt);
                        cdns_spi_write(xspi, CDNS_SPI_IDR,
                                       CDNS_SPI_IXR_DEFAULT);
                        spi_finalize_current_transfer(ctlr);
@@ -450,16 +434,17 @@ static int cdns_transfer_one(struct spi_controller *ctlr,
        xspi->tx_bytes = transfer->len;
        xspi->rx_bytes = transfer->len;
 
-       if (!spi_controller_is_slave(ctlr))
+       if (!spi_controller_is_slave(ctlr)) {
                cdns_spi_setup_transfer(spi, transfer);
+       } else {
+               /* Set TX empty threshold to half of FIFO depth
+                * only if TX bytes are more than half FIFO depth.
+                */
+               if (xspi->tx_bytes > xspi->tx_fifo_depth)
+                       cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1);
+       }
 
-       /* Set TX empty threshold to half of FIFO depth
-        * only if TX bytes are more than half FIFO depth.
-        */
-       if (xspi->tx_bytes > (xspi->tx_fifo_depth >> 1))
-               cdns_spi_write(xspi, CDNS_SPI_THLD, xspi->tx_fifo_depth >> 1);
-
-       cdns_spi_fill_tx_fifo(xspi);
+       cdns_spi_process_fifo(xspi, xspi->tx_fifo_depth, 0);
        spi_transfer_delay_exec(transfer);
 
        cdns_spi_write(xspi, CDNS_SPI_IER, CDNS_SPI_IXR_DEFAULT);
index 5e6faa9..5f2aee6 100644 (file)
@@ -264,17 +264,17 @@ static void dw_spi_elba_set_cs(struct spi_device *spi, bool enable)
        struct regmap *syscon = dwsmmio->priv;
        u8 cs;
 
-       cs = spi->chip_select;
+       cs = spi_get_chipselect(spi, 0);
        if (cs < 2)
-               dw_spi_elba_override_cs(syscon, spi->chip_select, enable);
+               dw_spi_elba_override_cs(syscon, spi_get_chipselect(spi, 0), enable);
 
        /*
         * The DW SPI controller needs a native CS bit selected to start
         * the serial engine.
         */
-       spi->chip_select = 0;
+       spi_set_chipselect(spi, 0, 0);
        dw_spi_set_cs(spi, enable);
-       spi->chip_select = cs;
+       spi_get_chipselect(spi, cs);
 }
 
 static int dw_spi_elba_init(struct platform_device *pdev,
index f2341ab..4b70038 100644 (file)
@@ -910,9 +910,14 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
        ret = fsl_lpspi_dma_init(&pdev->dev, fsl_lpspi, controller);
        if (ret == -EPROBE_DEFER)
                goto out_pm_get;
-
        if (ret < 0)
                dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret);
+       else
+               /*
+                * disable LPSPI module IRQ when enable DMA mode successfully,
+                * to prevent the unexpected LPSPI module IRQ events.
+                */
+               disable_irq(irq);
 
        ret = devm_spi_register_controller(&pdev->dev, controller);
        if (ret < 0) {
index ba7be50..a98b781 100644 (file)
@@ -294,6 +294,8 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
        mas->cs_flag = set_flag;
        /* set xfer_mode to FIFO to complete cs_done in isr */
        mas->cur_xfer_mode = GENI_SE_FIFO;
+       geni_se_select_mode(se, mas->cur_xfer_mode);
+
        reinit_completion(&mas->cs_done);
        if (set_flag)
                geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
index 21c321f..d7432e2 100644 (file)
@@ -1275,6 +1275,9 @@ static int mtk_spi_remove(struct platform_device *pdev)
        struct mtk_spi *mdata = spi_master_get_devdata(master);
        int ret;
 
+       if (mdata->use_spimem && !completion_done(&mdata->spimem_done))
+               complete(&mdata->spimem_done);
+
        ret = pm_runtime_resume_and_get(&pdev->dev);
        if (ret < 0)
                return ret;
index 944ef6b..00e5e88 100644 (file)
@@ -1028,23 +1028,8 @@ static int spi_qup_probe(struct platform_device *pdev)
                return -ENXIO;
        }
 
-       ret = clk_prepare_enable(cclk);
-       if (ret) {
-               dev_err(dev, "cannot enable core clock\n");
-               return ret;
-       }
-
-       ret = clk_prepare_enable(iclk);
-       if (ret) {
-               clk_disable_unprepare(cclk);
-               dev_err(dev, "cannot enable iface clock\n");
-               return ret;
-       }
-
        master = spi_alloc_master(dev, sizeof(struct spi_qup));
        if (!master) {
-               clk_disable_unprepare(cclk);
-               clk_disable_unprepare(iclk);
                dev_err(dev, "cannot allocate master\n");
                return -ENOMEM;
        }
@@ -1092,6 +1077,19 @@ static int spi_qup_probe(struct platform_device *pdev)
        spin_lock_init(&controller->lock);
        init_completion(&controller->done);
 
+       ret = clk_prepare_enable(cclk);
+       if (ret) {
+               dev_err(dev, "cannot enable core clock\n");
+               goto error_dma;
+       }
+
+       ret = clk_prepare_enable(iclk);
+       if (ret) {
+               clk_disable_unprepare(cclk);
+               dev_err(dev, "cannot enable iface clock\n");
+               goto error_dma;
+       }
+
        iomode = readl_relaxed(base + QUP_IO_M_MODES);
 
        size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
@@ -1121,7 +1119,7 @@ static int spi_qup_probe(struct platform_device *pdev)
        ret = spi_qup_set_state(controller, QUP_STATE_RESET);
        if (ret) {
                dev_err(dev, "cannot set RESET state\n");
-               goto error_dma;
+               goto error_clk;
        }
 
        writel_relaxed(0, base + QUP_OPERATIONAL);
@@ -1145,7 +1143,7 @@ static int spi_qup_probe(struct platform_device *pdev)
        ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
                               IRQF_TRIGGER_HIGH, pdev->name, controller);
        if (ret)
-               goto error_dma;
+               goto error_clk;
 
        pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
        pm_runtime_use_autosuspend(dev);
@@ -1160,11 +1158,12 @@ static int spi_qup_probe(struct platform_device *pdev)
 
 disable_pm:
        pm_runtime_disable(&pdev->dev);
+error_clk:
+       clk_disable_unprepare(cclk);
+       clk_disable_unprepare(iclk);
 error_dma:
        spi_qup_release_dma(master);
 error:
-       clk_disable_unprepare(cclk);
-       clk_disable_unprepare(iclk);
        spi_master_put(master);
        return ret;
 }
index 63de214..c079368 100644 (file)
@@ -373,7 +373,7 @@ static int ov2680_get_fmt(struct v4l2_subdev *sd,
 static int ov2680_detect(struct i2c_client *client)
 {
        struct i2c_adapter *adapter = client->adapter;
-       u32 high, low;
+       u32 high = 0, low = 0;
        int ret;
        u16 id;
        u8 revision;
@@ -383,7 +383,7 @@ static int ov2680_detect(struct i2c_client *client)
 
        ret = ov_read_reg8(client, OV2680_SC_CMMN_CHIP_ID_H, &high);
        if (ret) {
-               dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
+               dev_err(&client->dev, "sensor_id_high read failed (%d)\n", ret);
                return -ENODEV;
        }
        ret = ov_read_reg8(client, OV2680_SC_CMMN_CHIP_ID_L, &low);
index 32700cb..ca2efcc 100644 (file)
@@ -354,7 +354,7 @@ static int imx8mq_mipi_csi_start_stream(struct csi_state *state,
                                        struct v4l2_subdev_state *sd_state)
 {
        int ret;
-       u32 hs_settle;
+       u32 hs_settle = 0;
 
        ret = imx8mq_mipi_csi_sw_reset(state);
        if (ret)
index 834cce5..b516c28 100644 (file)
@@ -364,8 +364,6 @@ struct iscsi_np *iscsit_add_np(
        init_completion(&np->np_restart_comp);
        INIT_LIST_HEAD(&np->np_list);
 
-       timer_setup(&np->np_login_timer, iscsi_handle_login_thread_timeout, 0);
-
        ret = iscsi_target_setup_login_socket(np, sockaddr);
        if (ret != 0) {
                kfree(np);
index 274bdd7..90b870f 100644 (file)
@@ -811,59 +811,6 @@ void iscsi_post_login_handler(
        iscsit_dec_conn_usage_count(conn);
 }
 
-void iscsi_handle_login_thread_timeout(struct timer_list *t)
-{
-       struct iscsi_np *np = from_timer(np, t, np_login_timer);
-
-       spin_lock_bh(&np->np_thread_lock);
-       pr_err("iSCSI Login timeout on Network Portal %pISpc\n",
-                       &np->np_sockaddr);
-
-       if (np->np_login_timer_flags & ISCSI_TF_STOP) {
-               spin_unlock_bh(&np->np_thread_lock);
-               return;
-       }
-
-       if (np->np_thread)
-               send_sig(SIGINT, np->np_thread, 1);
-
-       np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
-       spin_unlock_bh(&np->np_thread_lock);
-}
-
-static void iscsi_start_login_thread_timer(struct iscsi_np *np)
-{
-       /*
-        * This used the TA_LOGIN_TIMEOUT constant because at this
-        * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout
-        */
-       spin_lock_bh(&np->np_thread_lock);
-       np->np_login_timer_flags &= ~ISCSI_TF_STOP;
-       np->np_login_timer_flags |= ISCSI_TF_RUNNING;
-       mod_timer(&np->np_login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ);
-
-       pr_debug("Added timeout timer to iSCSI login request for"
-                       " %u seconds.\n", TA_LOGIN_TIMEOUT);
-       spin_unlock_bh(&np->np_thread_lock);
-}
-
-static void iscsi_stop_login_thread_timer(struct iscsi_np *np)
-{
-       spin_lock_bh(&np->np_thread_lock);
-       if (!(np->np_login_timer_flags & ISCSI_TF_RUNNING)) {
-               spin_unlock_bh(&np->np_thread_lock);
-               return;
-       }
-       np->np_login_timer_flags |= ISCSI_TF_STOP;
-       spin_unlock_bh(&np->np_thread_lock);
-
-       del_timer_sync(&np->np_login_timer);
-
-       spin_lock_bh(&np->np_thread_lock);
-       np->np_login_timer_flags &= ~ISCSI_TF_RUNNING;
-       spin_unlock_bh(&np->np_thread_lock);
-}
-
 int iscsit_setup_np(
        struct iscsi_np *np,
        struct sockaddr_storage *sockaddr)
@@ -1123,10 +1070,13 @@ static struct iscsit_conn *iscsit_alloc_conn(struct iscsi_np *np)
        spin_lock_init(&conn->nopin_timer_lock);
        spin_lock_init(&conn->response_queue_lock);
        spin_lock_init(&conn->state_lock);
+       spin_lock_init(&conn->login_worker_lock);
+       spin_lock_init(&conn->login_timer_lock);
 
        timer_setup(&conn->nopin_response_timer,
                    iscsit_handle_nopin_response_timeout, 0);
        timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0);
+       timer_setup(&conn->login_timer, iscsit_login_timeout, 0);
 
        if (iscsit_conn_set_transport(conn, np->np_transport) < 0)
                goto free_conn;
@@ -1304,7 +1254,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
                goto new_sess_out;
        }
 
-       iscsi_start_login_thread_timer(np);
+       iscsit_start_login_timer(conn, current);
 
        pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
        conn->conn_state = TARG_CONN_STATE_XPT_UP;
@@ -1417,8 +1367,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        if (ret < 0)
                goto new_sess_out;
 
-       iscsi_stop_login_thread_timer(np);
-
        if (ret == 1) {
                tpg_np = conn->tpg_np;
 
@@ -1434,7 +1382,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
 new_sess_out:
        new_sess = true;
 old_sess_out:
-       iscsi_stop_login_thread_timer(np);
+       iscsit_stop_login_timer(conn);
        tpg_np = conn->tpg_np;
        iscsi_target_login_sess_out(conn, zero_tsih, new_sess);
        new_sess = false;
@@ -1448,7 +1396,6 @@ old_sess_out:
        return 1;
 
 exit:
-       iscsi_stop_login_thread_timer(np);
        spin_lock_bh(&np->np_thread_lock);
        np->np_thread_state = ISCSI_NP_THREAD_EXIT;
        spin_unlock_bh(&np->np_thread_lock);
index 24040c1..fa3fb5f 100644 (file)
@@ -535,25 +535,6 @@ static void iscsi_target_login_drop(struct iscsit_conn *conn, struct iscsi_login
        iscsi_target_login_sess_out(conn, zero_tsih, true);
 }
 
-struct conn_timeout {
-       struct timer_list timer;
-       struct iscsit_conn *conn;
-};
-
-static void iscsi_target_login_timeout(struct timer_list *t)
-{
-       struct conn_timeout *timeout = from_timer(timeout, t, timer);
-       struct iscsit_conn *conn = timeout->conn;
-
-       pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
-
-       if (conn->login_kworker) {
-               pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n",
-                        conn->login_kworker->comm, conn->login_kworker->pid);
-               send_sig(SIGINT, conn->login_kworker, 1);
-       }
-}
-
 static void iscsi_target_do_login_rx(struct work_struct *work)
 {
        struct iscsit_conn *conn = container_of(work,
@@ -562,12 +543,15 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
        struct iscsi_np *np = login->np;
        struct iscsi_portal_group *tpg = conn->tpg;
        struct iscsi_tpg_np *tpg_np = conn->tpg_np;
-       struct conn_timeout timeout;
        int rc, zero_tsih = login->zero_tsih;
        bool state;
 
        pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
                        conn, current->comm, current->pid);
+
+       spin_lock(&conn->login_worker_lock);
+       set_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags);
+       spin_unlock(&conn->login_worker_lock);
        /*
         * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
         * before initial PDU processing in iscsi_target_start_negotiation()
@@ -597,19 +581,16 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
                goto err;
        }
 
-       conn->login_kworker = current;
        allow_signal(SIGINT);
-
-       timeout.conn = conn;
-       timer_setup_on_stack(&timeout.timer, iscsi_target_login_timeout, 0);
-       mod_timer(&timeout.timer, jiffies + TA_LOGIN_TIMEOUT * HZ);
-       pr_debug("Starting login timer for %s/%d\n", current->comm, current->pid);
+       rc = iscsit_set_login_timer_kworker(conn, current);
+       if (rc < 0) {
+               /* The login timer has already expired */
+               pr_debug("iscsi_target_do_login_rx, login failed\n");
+               goto err;
+       }
 
        rc = conn->conn_transport->iscsit_get_login_rx(conn, login);
-       del_timer_sync(&timeout.timer);
-       destroy_timer_on_stack(&timeout.timer);
        flush_signals(current);
-       conn->login_kworker = NULL;
 
        if (rc < 0)
                goto err;
@@ -646,7 +627,17 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
                if (iscsi_target_sk_check_and_clear(conn,
                                                    LOGIN_FLAGS_WRITE_ACTIVE))
                        goto err;
+
+               /*
+                * Set the login timer thread pointer to NULL to prevent the
+                * login process from getting stuck if the initiator
+                * stops sending data.
+                */
+               rc = iscsit_set_login_timer_kworker(conn, NULL);
+               if (rc < 0)
+                       goto err;
        } else if (rc == 1) {
+               iscsit_stop_login_timer(conn);
                cancel_delayed_work(&conn->login_work);
                iscsi_target_nego_release(conn);
                iscsi_post_login_handler(np, conn, zero_tsih);
@@ -656,6 +647,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work)
 
 err:
        iscsi_target_restore_sock_callbacks(conn);
+       iscsit_stop_login_timer(conn);
        cancel_delayed_work(&conn->login_work);
        iscsi_target_login_drop(conn, login);
        iscsit_deaccess_np(np, tpg, tpg_np);
@@ -1130,6 +1122,7 @@ int iscsi_target_locate_portal(
        iscsi_target_set_sock_callbacks(conn);
 
        login->np = np;
+       conn->tpg = NULL;
 
        login_req = (struct iscsi_login_req *) login->req;
        payload_length = ntoh24(login_req->dlength);
@@ -1197,7 +1190,6 @@ int iscsi_target_locate_portal(
         */
        sessiontype = strncmp(s_buf, DISCOVERY, 9);
        if (!sessiontype) {
-               conn->tpg = iscsit_global->discovery_tpg;
                if (!login->leading_connection)
                        goto get_target;
 
@@ -1214,9 +1206,11 @@ int iscsi_target_locate_portal(
                 * Serialize access across the discovery struct iscsi_portal_group to
                 * process login attempt.
                 */
+               conn->tpg = iscsit_global->discovery_tpg;
                if (iscsit_access_np(np, conn->tpg) < 0) {
                        iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_SVC_UNAVAILABLE);
+                       conn->tpg = NULL;
                        ret = -1;
                        goto out;
                }
@@ -1368,14 +1362,30 @@ int iscsi_target_start_negotiation(
         * and perform connection cleanup now.
         */
        ret = iscsi_target_do_login(conn, login);
-       if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
-               ret = -1;
+       if (!ret) {
+               spin_lock(&conn->login_worker_lock);
+
+               if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
+                       ret = -1;
+               else if (!test_bit(LOGIN_FLAGS_WORKER_RUNNING, &conn->login_flags)) {
+                       if (iscsit_set_login_timer_kworker(conn, NULL) < 0) {
+                               /*
+                                * The timeout has expired already.
+                                * Schedule login_work to perform the cleanup.
+                                */
+                               schedule_delayed_work(&conn->login_work, 0);
+                       }
+               }
+
+               spin_unlock(&conn->login_worker_lock);
+       }
 
        if (ret < 0) {
                iscsi_target_restore_sock_callbacks(conn);
                iscsi_remove_failed_auth_entry(conn);
        }
        if (ret != 0) {
+               iscsit_stop_login_timer(conn);
                cancel_delayed_work_sync(&conn->login_work);
                iscsi_target_nego_release(conn);
        }
index 26dc8ed..b14835f 100644 (file)
@@ -1040,6 +1040,57 @@ void iscsit_stop_nopin_timer(struct iscsit_conn *conn)
        spin_unlock_bh(&conn->nopin_timer_lock);
 }
 
+void iscsit_login_timeout(struct timer_list *t)
+{
+       struct iscsit_conn *conn = from_timer(conn, t, login_timer);
+       struct iscsi_login *login = conn->login;
+
+       pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
+
+       spin_lock_bh(&conn->login_timer_lock);
+       login->login_failed = 1;
+
+       if (conn->login_kworker) {
+               pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n",
+                        conn->login_kworker->comm, conn->login_kworker->pid);
+               send_sig(SIGINT, conn->login_kworker, 1);
+       } else {
+               schedule_delayed_work(&conn->login_work, 0);
+       }
+       spin_unlock_bh(&conn->login_timer_lock);
+}
+
+void iscsit_start_login_timer(struct iscsit_conn *conn, struct task_struct *kthr)
+{
+       pr_debug("Login timer started\n");
+
+       conn->login_kworker = kthr;
+       mod_timer(&conn->login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ);
+}
+
+int iscsit_set_login_timer_kworker(struct iscsit_conn *conn, struct task_struct *kthr)
+{
+       struct iscsi_login *login = conn->login;
+       int ret = 0;
+
+       spin_lock_bh(&conn->login_timer_lock);
+       if (login->login_failed) {
+               /* The timer has already expired */
+               ret = -1;
+       } else {
+               conn->login_kworker = kthr;
+       }
+       spin_unlock_bh(&conn->login_timer_lock);
+
+       return ret;
+}
+
+void iscsit_stop_login_timer(struct iscsit_conn *conn)
+{
+       pr_debug("Login timer stopped\n");
+       timer_delete_sync(&conn->login_timer);
+}
+
 int iscsit_send_tx_data(
        struct iscsit_cmd *cmd,
        struct iscsit_conn *conn,
index 33ea799..24b8e57 100644 (file)
@@ -56,6 +56,10 @@ extern void iscsit_handle_nopin_timeout(struct timer_list *t);
 extern void __iscsit_start_nopin_timer(struct iscsit_conn *);
 extern void iscsit_start_nopin_timer(struct iscsit_conn *);
 extern void iscsit_stop_nopin_timer(struct iscsit_conn *);
+extern void iscsit_login_timeout(struct timer_list *t);
+extern void iscsit_start_login_timer(struct iscsit_conn *, struct task_struct *kthr);
+extern void iscsit_stop_login_timer(struct iscsit_conn *);
+extern int iscsit_set_login_timer_kworker(struct iscsit_conn *, struct task_struct *kthr);
 extern int iscsit_send_tx_data(struct iscsit_cmd *, struct iscsit_conn *, int);
 extern int iscsit_fe_sendpage_sg(struct iscsit_cmd *, struct iscsit_conn *);
 extern int iscsit_tx_login_rsp(struct iscsit_conn *, u8, u8);
index 49702cb..3861ae0 100644 (file)
@@ -1004,8 +1004,10 @@ static u32 get_async_notif_value(optee_invoke_fn *invoke_fn, bool *value_valid,
 
        invoke_fn(OPTEE_SMC_GET_ASYNC_NOTIF_VALUE, 0, 0, 0, 0, 0, 0, 0, &res);
 
-       if (res.a0)
+       if (res.a0) {
+               *value_valid = false;
                return 0;
+       }
        *value_valid = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_VALID);
        *value_pending = (res.a2 & OPTEE_SMC_ASYNC_NOTIF_VALUE_PENDING);
        return res.a1;
index 810231b..5e11642 100644 (file)
@@ -131,7 +131,7 @@ static ssize_t available_uuids_show(struct device *dev,
 
        for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; i++) {
                if (priv->uuid_bitmap & (1 << i))
-                       length += sysfs_emit_at(buf, length, int3400_thermal_uuids[i]);
+                       length += sysfs_emit_at(buf, length, "%s\n", int3400_thermal_uuids[i]);
        }
 
        return length;
@@ -149,7 +149,7 @@ static ssize_t current_uuid_show(struct device *dev,
 
        for (i = 0; i <= INT3400_THERMAL_CRITICAL; i++) {
                if (priv->os_uuid_mask & BIT(i))
-                       length += sysfs_emit_at(buf, length, int3400_thermal_uuids[i]);
+                       length += sysfs_emit_at(buf, length, "%s\n", int3400_thermal_uuids[i]);
        }
 
        if (length)
index d76e923..c0aee5d 100644 (file)
@@ -54,6 +54,21 @@ static int ring_interrupt_index(const struct tb_ring *ring)
        return bit;
 }
 
+static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
+{
+       if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+               return;
+       iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
+}
+
+static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
+{
+       if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+               ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring);
+       else
+               iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring);
+}
+
 /*
  * ring_interrupt_active() - activate/deactivate interrupts for a single ring
  *
@@ -61,8 +76,8 @@ static int ring_interrupt_index(const struct tb_ring *ring)
  */
 static void ring_interrupt_active(struct tb_ring *ring, bool active)
 {
-       int reg = REG_RING_INTERRUPT_BASE +
-                 ring_interrupt_index(ring) / 32 * 4;
+       int index = ring_interrupt_index(ring) / 32 * 4;
+       int reg = REG_RING_INTERRUPT_BASE + index;
        int interrupt_bit = ring_interrupt_index(ring) & 31;
        int mask = 1 << interrupt_bit;
        u32 old, new;
@@ -123,7 +138,11 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
                                         "interrupt for %s %d is already %s\n",
                                         RING_TYPE(ring), ring->hop,
                                         active ? "enabled" : "disabled");
-       iowrite32(new, ring->nhi->iobase + reg);
+
+       if (active)
+               iowrite32(new, ring->nhi->iobase + reg);
+       else
+               nhi_mask_interrupt(ring->nhi, mask, index);
 }
 
 /*
@@ -136,11 +155,11 @@ static void nhi_disable_interrupts(struct tb_nhi *nhi)
        int i = 0;
        /* disable interrupts */
        for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
-               iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
+               nhi_mask_interrupt(nhi, ~0, 4 * i);
 
        /* clear interrupt status bits */
        for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
-               ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
+               nhi_clear_interrupt(nhi, 4 * i);
 }
 
 /* ring helper methods */
index faef165..6ba2958 100644 (file)
@@ -93,6 +93,8 @@ struct ring_desc {
 #define REG_RING_INTERRUPT_BASE        0x38200
 #define RING_INTERRUPT_REG_COUNT(nhi) ((31 + 2 * nhi->hop_count) / 32)
 
+#define REG_RING_INTERRUPT_MASK_CLEAR_BASE     0x38208
+
 #define REG_INT_THROTTLING_RATE        0x38c00
 
 /* Interrupt Vector Allocation */
index f801b1f..af0e1c0 100644 (file)
@@ -1012,7 +1012,7 @@ static int brcmuart_probe(struct platform_device *pdev)
        of_property_read_u32(np, "clock-frequency", &clk_rate);
 
        /* See if a Baud clock has been specified */
-       baud_mux_clk = of_clk_get_by_name(np, "sw_baud");
+       baud_mux_clk = devm_clk_get(dev, "sw_baud");
        if (IS_ERR(baud_mux_clk)) {
                if (PTR_ERR(baud_mux_clk) == -EPROBE_DEFER) {
                        ret = -EPROBE_DEFER;
@@ -1032,7 +1032,7 @@ static int brcmuart_probe(struct platform_device *pdev)
        if (clk_rate == 0) {
                dev_err(dev, "clock-frequency or clk not defined\n");
                ret = -EINVAL;
-               goto release_dma;
+               goto err_clk_disable;
        }
 
        dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not ");
@@ -1119,6 +1119,8 @@ err1:
        serial8250_unregister_port(priv->line);
 err:
        brcmuart_free_bufs(dev, priv);
+err_clk_disable:
+       clk_disable_unprepare(baud_mux_clk);
 release_dma:
        if (priv->dma_enabled)
                brcmuart_arbitration(priv, 0);
@@ -1133,6 +1135,7 @@ static int brcmuart_remove(struct platform_device *pdev)
        hrtimer_cancel(&priv->hrt);
        serial8250_unregister_port(priv->line);
        brcmuart_free_bufs(&pdev->dev, priv);
+       clk_disable_unprepare(priv->baud_mux_clk);
        if (priv->dma_enabled)
                brcmuart_arbitration(priv, 0);
        return 0;
index 64770c6..b406cba 100644 (file)
 #define PCI_DEVICE_ID_COMMTECH_4224PCIE                0x0020
 #define PCI_DEVICE_ID_COMMTECH_4228PCIE                0x0021
 #define PCI_DEVICE_ID_COMMTECH_4222PCIE                0x0022
+
 #define PCI_DEVICE_ID_EXAR_XR17V4358           0x4358
 #define PCI_DEVICE_ID_EXAR_XR17V8358           0x8358
 
+#define PCI_SUBDEVICE_ID_USR_2980              0x0128
+#define PCI_SUBDEVICE_ID_USR_2981              0x0129
+
 #define PCI_DEVICE_ID_SEALEVEL_710xC           0x1001
 #define PCI_DEVICE_ID_SEALEVEL_720xC           0x1002
 #define PCI_DEVICE_ID_SEALEVEL_740xC           0x1004
@@ -829,6 +833,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
                (kernel_ulong_t)&bd                     \
        }
 
+#define USR_DEVICE(devid, sdevid, bd) {                        \
+       PCI_DEVICE_SUB(                                 \
+               PCI_VENDOR_ID_USR,                      \
+               PCI_DEVICE_ID_EXAR_##devid,             \
+               PCI_VENDOR_ID_EXAR,                     \
+               PCI_SUBDEVICE_ID_USR_##sdevid), 0, 0,   \
+               (kernel_ulong_t)&bd                     \
+       }
+
 static const struct pci_device_id exar_pci_tbl[] = {
        EXAR_DEVICE(ACCESSIO, COM_2S, pbn_exar_XR17C15x),
        EXAR_DEVICE(ACCESSIO, COM_4S, pbn_exar_XR17C15x),
@@ -853,6 +866,10 @@ static const struct pci_device_id exar_pci_tbl[] = {
 
        IBM_DEVICE(XR17C152, SATURN_SERIAL_ONE_PORT, pbn_exar_ibm_saturn),
 
+       /* USRobotics USR298x-OEM PCI Modems */
+       USR_DEVICE(XR17C152, 2980, pbn_exar_XR17C15x),
+       USR_DEVICE(XR17C152, 2981, pbn_exar_XR17C15x),
+
        /* Exar Corp. XR17C15[248] Dual/Quad/Octal UART */
        EXAR_DEVICE(EXAR, XR17C152, pbn_exar_XR17C15x),
        EXAR_DEVICE(EXAR, XR17C154, pbn_exar_XR17C15x),
index c55be6f..e80c4f6 100644 (file)
@@ -1920,6 +1920,8 @@ pci_moxa_setup(struct serial_private *priv,
 #define PCI_SUBDEVICE_ID_SIIG_DUAL_30  0x2530
 #define PCI_VENDOR_ID_ADVANTECH                0x13fe
 #define PCI_DEVICE_ID_INTEL_CE4100_UART 0x2e66
+#define PCI_DEVICE_ID_ADVANTECH_PCI1600        0x1600
+#define PCI_DEVICE_ID_ADVANTECH_PCI1600_1611   0x1611
 #define PCI_DEVICE_ID_ADVANTECH_PCI3620        0x3620
 #define PCI_DEVICE_ID_ADVANTECH_PCI3618        0x3618
 #define PCI_DEVICE_ID_ADVANTECH_PCIf618        0xf618
@@ -4085,6 +4087,9 @@ static SIMPLE_DEV_PM_OPS(pciserial_pm_ops, pciserial_suspend_one,
                         pciserial_resume_one);
 
 static const struct pci_device_id serial_pci_tbl[] = {
+       {       PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI1600,
+               PCI_DEVICE_ID_ADVANTECH_PCI1600_1611, PCI_ANY_ID, 0, 0,
+               pbn_b0_4_921600 },
        /* Advantech use PCI_DEVICE_ID_ADVANTECH_PCI3620 (0x3620) as 'PCI_SUBVENDOR_ID' */
        {       PCI_VENDOR_ID_ADVANTECH, PCI_DEVICE_ID_ADVANTECH_PCI3620,
                PCI_DEVICE_ID_ADVANTECH_PCI3620, 0x0001, 0, 0,
index fe8d79c..c153ba3 100644 (file)
@@ -669,6 +669,7 @@ EXPORT_SYMBOL_GPL(serial8250_em485_supported);
 /**
  * serial8250_em485_config() - generic ->rs485_config() callback
  * @port: uart port
+ * @termios: termios structure
  * @rs485: rs485 settings
  *
  * Generic callback usable by 8250 uart drivers to activate rs485 settings
index 2509e7f..89956bb 100644 (file)
@@ -113,13 +113,15 @@ static int tegra_uart_probe(struct platform_device *pdev)
 
        ret = serial8250_register_8250_port(&port8250);
        if (ret < 0)
-               goto err_clkdisable;
+               goto err_ctrl_assert;
 
        platform_set_drvdata(pdev, uart);
        uart->line = ret;
 
        return 0;
 
+err_ctrl_assert:
+       reset_control_assert(uart->rst);
 err_clkdisable:
        clk_disable_unprepare(uart->clk);
 
index 398e5aa..3e3fb37 100644 (file)
@@ -762,7 +762,7 @@ config SERIAL_PMACZILOG_CONSOLE
 
 config SERIAL_CPM
        tristate "CPM SCC/SMC serial port support"
-       depends on CPM2 || CPM1 || (PPC32 && COMPILE_TEST)
+       depends on CPM2 || CPM1
        select SERIAL_CORE
        help
          This driver supports the SCC and SMC serial ports on Motorola 
index 59e25f2..4b2512e 100644 (file)
@@ -606,10 +606,11 @@ static int arc_serial_probe(struct platform_device *pdev)
        }
        uart->baud = val;
 
-       port->membase = of_iomap(np, 0);
-       if (!port->membase)
+       port->membase = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(port->membase)) {
                /* No point of dev_err since UART itself is hosed here */
-               return -ENXIO;
+               return PTR_ERR(port->membase);
+       }
 
        port->irq = irq_of_parse_and_map(np, 0);
 
index 0577618..46c03ed 100644 (file)
@@ -19,8 +19,6 @@ struct gpio_desc;
 #include "cpm_uart_cpm2.h"
 #elif defined(CONFIG_CPM1)
 #include "cpm_uart_cpm1.h"
-#elif defined(CONFIG_COMPILE_TEST)
-#include "cpm_uart_cpm2.h"
 #endif
 
 #define SERIAL_CPM_MAJOR       204
index c91916e..7486a2b 100644 (file)
@@ -1495,34 +1495,36 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state)
 
 static void lpuart32_break_ctl(struct uart_port *port, int break_state)
 {
-       unsigned long temp, modem;
-       struct tty_struct *tty;
-       unsigned int cflag = 0;
-
-       tty = tty_port_tty_get(&port->state->port);
-       if (tty) {
-               cflag = tty->termios.c_cflag;
-               tty_kref_put(tty);
-       }
+       unsigned long temp;
 
-       temp = lpuart32_read(port, UARTCTRL) & ~UARTCTRL_SBK;
-       modem = lpuart32_read(port, UARTMODIR);
+       temp = lpuart32_read(port, UARTCTRL);
 
+       /*
+        * LPUART IP now has two known bugs, one is CTS has higher priority than the
+        * break signal, which causes the break signal sending through UARTCTRL_SBK
+        * may impacted by the CTS input if the HW flow control is enabled. It
+        * exists on all platforms we support in this driver.
+        * Another bug is i.MX8QM LPUART may have an additional break character
+        * being sent after SBK was cleared.
+        * To avoid above two bugs, we use Transmit Data Inversion function to send
+        * the break signal instead of UARTCTRL_SBK.
+        */
        if (break_state != 0) {
-               temp |= UARTCTRL_SBK;
                /*
-                * LPUART CTS has higher priority than SBK, need to disable CTS before
-                * asserting SBK to avoid any interference if flow control is enabled.
+                * Disable the transmitter to prevent any data from being sent out
+                * during break, then invert the TX line to send break.
                 */
-               if (cflag & CRTSCTS && modem & UARTMODIR_TXCTSE)
-                       lpuart32_write(port, modem & ~UARTMODIR_TXCTSE, UARTMODIR);
+               temp &= ~UARTCTRL_TE;
+               lpuart32_write(port, temp, UARTCTRL);
+               temp |= UARTCTRL_TXINV;
+               lpuart32_write(port, temp, UARTCTRL);
        } else {
-               /* Re-enable the CTS when break off. */
-               if (cflag & CRTSCTS && !(modem & UARTMODIR_TXCTSE))
-                       lpuart32_write(port, modem | UARTMODIR_TXCTSE, UARTMODIR);
+               /* Disable the TXINV to turn off break and re-enable transmitter. */
+               temp &= ~UARTCTRL_TXINV;
+               lpuart32_write(port, temp, UARTCTRL);
+               temp |= UARTCTRL_TE;
+               lpuart32_write(port, temp, UARTCTRL);
        }
-
-       lpuart32_write(port, temp, UARTCTRL);
 }
 
 static void lpuart_setup_watermark(struct lpuart_port *sport)
index 08dc3e2..8582479 100644 (file)
@@ -1664,19 +1664,18 @@ static int qcom_geni_serial_probe(struct platform_device *pdev)
        uport->private_data = &port->private_data;
        platform_set_drvdata(pdev, port);
 
-       ret = uart_add_one_port(drv, uport);
-       if (ret)
-               return ret;
-
        irq_set_status_flags(uport->irq, IRQ_NOAUTOEN);
        ret = devm_request_irq(uport->dev, uport->irq, qcom_geni_serial_isr,
                        IRQF_TRIGGER_HIGH, port->name, uport);
        if (ret) {
                dev_err(uport->dev, "Failed to get IRQ ret %d\n", ret);
-               uart_remove_one_port(drv, uport);
                return ret;
        }
 
+       ret = uart_add_one_port(drv, uport);
+       if (ret)
+               return ret;
+
        /*
         * Set pm_runtime status as ACTIVE so that wakeup_irq gets
         * enabled/disabled from dev_pm_arm_wake_irq during system
index 498ba9c..829c4be 100644 (file)
@@ -656,10 +656,17 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
                        }
                }
 
-               /* The vcs_size might have changed while we slept to grab
-                * the user buffer, so recheck.
+               /* The vc might have been freed or vcs_size might have changed
+                * while we slept to grab the user buffer, so recheck.
                 * Return data written up to now on failure.
                 */
+               vc = vcs_vc(inode, &viewed);
+               if (!vc) {
+                       if (written)
+                               break;
+                       ret = -ENXIO;
+                       goto unlock_out;
+               }
                size = vcs_size(vc, attr, false);
                if (size < 0) {
                        if (written)
index 202ff71..51b3c6a 100644 (file)
@@ -150,7 +150,8 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
        u32 hba_maxq, rem, tot_queues;
        struct Scsi_Host *host = hba->host;
 
-       hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities);
+       /* maxq is 0 based value */
+       hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1;
 
        tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues +
                        rw_queues;
@@ -265,7 +266,7 @@ static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
        addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
                hba->ucdl_dma_addr;
 
-       return div_u64(addr, sizeof(struct utp_transfer_cmd_desc));
+       return div_u64(addr, ufshcd_get_ucd_size(hba));
 }
 
 static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
index 45fd374..e7e79f5 100644 (file)
@@ -2849,10 +2849,10 @@ static void ufshcd_map_queues(struct Scsi_Host *shost)
 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
 {
        struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
-               i * sizeof_utp_transfer_cmd_desc(hba);
+               i * ufshcd_get_ucd_size(hba);
        struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
        dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
-               i * sizeof_utp_transfer_cmd_desc(hba);
+               i * ufshcd_get_ucd_size(hba);
        u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
                                       response_upiu);
        u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
@@ -3761,7 +3761,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
        size_t utmrdl_size, utrdl_size, ucdl_size;
 
        /* Allocate memory for UTP command descriptors */
-       ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs;
+       ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs;
        hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
                                                  ucdl_size,
                                                  &hba->ucdl_dma_addr,
@@ -3861,7 +3861,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
        prdt_offset =
                offsetof(struct utp_transfer_cmd_desc, prd_table);
 
-       cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
+       cmd_desc_size = ufshcd_get_ucd_size(hba);
        cmd_desc_dma_addr = hba->ucdl_dma_addr;
 
        for (i = 0; i < hba->nutrs; i++) {
@@ -8452,7 +8452,7 @@ static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
 {
        size_t ucdl_size, utrdl_size;
 
-       ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs;
+       ucdl_size = ufshcd_get_ucd_size(hba) * nutrs;
        dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
                           hba->ucdl_dma_addr);
 
index ccfaebc..1dcadef 100644 (file)
@@ -2097,6 +2097,19 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable)
        else
                priv_ep->trb_burst_size = 16;
 
+       /*
+        * In versions preceding DEV_VER_V2, for example, iMX8QM, there exit the bugs
+        * in the DMA. These bugs occur when the trb_burst_size exceeds 16 and the
+        * address is not aligned to 128 Bytes (which is a product of the 64-bit AXI
+        * and AXI maximum burst length of 16 or 0xF+1, dma_axi_ctrl0[3:0]). This
+        * results in data corruption when it crosses the 4K border. The corruption
+        * specifically occurs from the position (4K - (address & 0x7F)) to 4K.
+        *
+        * So force trb_burst_size to 16 at such platform.
+        */
+       if (priv_dev->dev_ver < DEV_VER_V2)
+               priv_ep->trb_burst_size = 16;
+
        mult = min_t(u8, mult, EP_CFG_MULT_MAX);
        buffering = min_t(u8, buffering, EP_CFG_BUFFERING_MAX);
        maxburst = min_t(u8, maxburst, EP_CFG_MAXBURST_MAX);
index 4bb6d30..311007b 100644 (file)
@@ -1928,6 +1928,8 @@ static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
 
        if (request.req.wLength > USBTMC_BUFSIZE)
                return -EMSGSIZE;
+       if (request.req.wLength == 0)   /* Length-0 requests are never IN */
+               request.req.bRequestType &= ~USB_DIR_IN;
 
        is_in = request.req.bRequestType & USB_DIR_IN;
 
index fbb087b..268ccbe 100644 (file)
@@ -172,3 +172,44 @@ void hcd_buffer_free(
        }
        dma_free_coherent(hcd->self.sysdev, size, addr, dma);
 }
+
+void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
+               size_t size, gfp_t mem_flags, dma_addr_t *dma)
+{
+       if (size == 0)
+               return NULL;
+
+       if (hcd->localmem_pool)
+               return gen_pool_dma_alloc_align(hcd->localmem_pool,
+                               size, dma, PAGE_SIZE);
+
+       /* some USB hosts just use PIO */
+       if (!hcd_uses_dma(hcd)) {
+               *dma = DMA_MAPPING_ERROR;
+               return (void *)__get_free_pages(mem_flags,
+                               get_order(size));
+       }
+
+       return dma_alloc_coherent(hcd->self.sysdev,
+                       size, dma, mem_flags);
+}
+
+void hcd_buffer_free_pages(struct usb_hcd *hcd,
+               size_t size, void *addr, dma_addr_t dma)
+{
+       if (!addr)
+               return;
+
+       if (hcd->localmem_pool) {
+               gen_pool_free(hcd->localmem_pool,
+                               (unsigned long)addr, size);
+               return;
+       }
+
+       if (!hcd_uses_dma(hcd)) {
+               free_pages((unsigned long)addr, get_order(size));
+               return;
+       }
+
+       dma_free_coherent(hcd->self.sysdev, size, addr, dma);
+}
index e501a03..fcf6881 100644 (file)
@@ -186,6 +186,7 @@ static int connected(struct usb_dev_state *ps)
 static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
 {
        struct usb_dev_state *ps = usbm->ps;
+       struct usb_hcd *hcd = bus_to_hcd(ps->dev->bus);
        unsigned long flags;
 
        spin_lock_irqsave(&ps->lock, flags);
@@ -194,8 +195,8 @@ static void dec_usb_memory_use_count(struct usb_memory *usbm, int *count)
                list_del(&usbm->memlist);
                spin_unlock_irqrestore(&ps->lock, flags);
 
-               usb_free_coherent(ps->dev, usbm->size, usbm->mem,
-                               usbm->dma_handle);
+               hcd_buffer_free_pages(hcd, usbm->size,
+                               usbm->mem, usbm->dma_handle);
                usbfs_decrease_memory_usage(
                        usbm->size + sizeof(struct usb_memory));
                kfree(usbm);
@@ -234,7 +235,7 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
        size_t size = vma->vm_end - vma->vm_start;
        void *mem;
        unsigned long flags;
-       dma_addr_t dma_handle;
+       dma_addr_t dma_handle = DMA_MAPPING_ERROR;
        int ret;
 
        ret = usbfs_increase_memory_usage(size + sizeof(struct usb_memory));
@@ -247,8 +248,8 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
                goto error_decrease_mem;
        }
 
-       mem = usb_alloc_coherent(ps->dev, size, GFP_USER | __GFP_NOWARN,
-                       &dma_handle);
+       mem = hcd_buffer_alloc_pages(hcd,
+                       size, GFP_USER | __GFP_NOWARN, &dma_handle);
        if (!mem) {
                ret = -ENOMEM;
                goto error_free_usbm;
@@ -264,7 +265,14 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma)
        usbm->vma_use_count = 1;
        INIT_LIST_HEAD(&usbm->memlist);
 
-       if (hcd->localmem_pool || !hcd_uses_dma(hcd)) {
+       /*
+        * In DMA-unavailable cases, hcd_buffer_alloc_pages allocates
+        * normal pages and assigns DMA_MAPPING_ERROR to dma_handle. Check
+        * whether we are in such cases, and then use remap_pfn_range (or
+        * dma_mmap_coherent) to map normal (or DMA) pages into the user
+        * space, respectively.
+        */
+       if (dma_handle == DMA_MAPPING_ERROR) {
                if (remap_pfn_range(vma, vma->vm_start,
                                    virt_to_phys(usbm->mem) >> PAGE_SHIFT,
                                    size, vma->vm_page_prot) < 0) {
index 0beaab9..7b2ce01 100644 (file)
@@ -1137,7 +1137,7 @@ static int dwc3_core_init(struct dwc3 *dwc)
 
        dwc3_set_incr_burst_type(dwc);
 
-       dwc3_phy_power_on(dwc);
+       ret = dwc3_phy_power_on(dwc);
        if (ret)
                goto err_exit_phy;
 
index d56457c..1f043c3 100644 (file)
@@ -1116,6 +1116,7 @@ struct dwc3_scratchpad_array {
  * @dis_metastability_quirk: set to disable metastability quirk.
  * @dis_split_quirk: set to disable split boundary.
  * @wakeup_configured: set if the device is configured for remote wakeup.
+ * @suspended: set to track suspend event due to U3/L2.
  * @imod_interval: set the interrupt moderation interval in 250ns
  *                     increments or 0 to disable.
  * @max_cfg_eps: current max number of IN eps used across all USB configs.
@@ -1332,6 +1333,7 @@ struct dwc3 {
        unsigned                dis_split_quirk:1;
        unsigned                async_callbacks:1;
        unsigned                wakeup_configured:1;
+       unsigned                suspended:1;
 
        u16                     imod_interval;
 
index e4a2560..ebf0346 100644 (file)
@@ -332,6 +332,11 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
        unsigned int            current_mode;
        unsigned long           flags;
        u32                     reg;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        reg = dwc3_readl(dwc->regs, DWC3_GSTS);
@@ -350,6 +355,8 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
        }
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -395,6 +402,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = s->private;
        unsigned long           flags;
        u32                     reg;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        reg = dwc3_readl(dwc->regs, DWC3_GCTL);
@@ -414,6 +426,8 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
                seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
        }
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -463,6 +477,11 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = s->private;
        unsigned long           flags;
        u32                     reg;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -493,6 +512,8 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
                seq_printf(s, "UNKNOWN %d\n", reg);
        }
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -509,6 +530,7 @@ static ssize_t dwc3_testmode_write(struct file *file,
        unsigned long           flags;
        u32                     testmode = 0;
        char                    buf[32];
+       int                     ret;
 
        if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
                return -EFAULT;
@@ -526,10 +548,16 @@ static ssize_t dwc3_testmode_write(struct file *file,
        else
                testmode = 0;
 
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
+
        spin_lock_irqsave(&dwc->lock, flags);
        dwc3_gadget_set_test_mode(dwc, testmode);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return count;
 }
 
@@ -548,12 +576,18 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
        enum dwc3_link_state    state;
        u32                     reg;
        u8                      speed;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        reg = dwc3_readl(dwc->regs, DWC3_GSTS);
        if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
                seq_puts(s, "Not available\n");
                spin_unlock_irqrestore(&dwc->lock, flags);
+               pm_runtime_put_sync(dwc->dev);
                return 0;
        }
 
@@ -566,6 +600,8 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
                   dwc3_gadget_hs_link_string(state));
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -584,6 +620,7 @@ static ssize_t dwc3_link_state_write(struct file *file,
        char                    buf[32];
        u32                     reg;
        u8                      speed;
+       int                     ret;
 
        if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
                return -EFAULT;
@@ -603,10 +640,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
        else
                return -EINVAL;
 
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
+
        spin_lock_irqsave(&dwc->lock, flags);
        reg = dwc3_readl(dwc->regs, DWC3_GSTS);
        if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
                spin_unlock_irqrestore(&dwc->lock, flags);
+               pm_runtime_put_sync(dwc->dev);
                return -EINVAL;
        }
 
@@ -616,12 +658,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
        if (speed < DWC3_DSTS_SUPERSPEED &&
            state != DWC3_LINK_STATE_RECOV) {
                spin_unlock_irqrestore(&dwc->lock, flags);
+               pm_runtime_put_sync(dwc->dev);
                return -EINVAL;
        }
 
        dwc3_gadget_set_link_state(dwc, state);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return count;
 }
 
@@ -645,6 +690,11 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
        unsigned long           flags;
        u32                     mdwidth;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_TXFIFO);
@@ -657,6 +707,8 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -667,6 +719,11 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
        unsigned long           flags;
        u32                     mdwidth;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_RXFIFO);
@@ -679,6 +736,8 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -688,12 +747,19 @@ static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = dep->dwc;
        unsigned long           flags;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_TXREQQ);
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -703,12 +769,19 @@ static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = dep->dwc;
        unsigned long           flags;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_RXREQQ);
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -718,12 +791,19 @@ static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = dep->dwc;
        unsigned long           flags;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ);
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -733,12 +813,19 @@ static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = dep->dwc;
        unsigned long           flags;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ);
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -748,12 +835,19 @@ static int dwc3_event_queue_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = dep->dwc;
        unsigned long           flags;
        u32                     val;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        val = dwc3_core_fifo_space(dep, DWC3_EVENTQ);
        seq_printf(s, "%u\n", val);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -798,6 +892,11 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
        struct dwc3             *dwc = dep->dwc;
        unsigned long           flags;
        int                     i;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        if (dep->number <= 1) {
@@ -827,6 +926,8 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
 out:
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -839,6 +940,11 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
        u32                     lower_32_bits;
        u32                     upper_32_bits;
        u32                     reg;
+       int                     ret;
+
+       ret = pm_runtime_resume_and_get(dwc->dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&dwc->lock, flags);
        reg = DWC3_GDBGLSPMUX_EPSELECT(dep->number);
@@ -851,6 +957,8 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
        seq_printf(s, "0x%016llx\n", ep_info);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
+       pm_runtime_put_sync(dwc->dev);
+
        return 0;
 }
 
@@ -910,6 +1018,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
        dwc->regset->regs = dwc3_regs;
        dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
        dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
+       dwc->regset->dev = dwc->dev;
 
        root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
        dwc->debug_root = root;
index c0ca4d1..d831f5a 100644 (file)
@@ -2440,6 +2440,7 @@ static int dwc3_gadget_func_wakeup(struct usb_gadget *g, int intf_id)
                        return -EINVAL;
                }
                dwc3_resume_gadget(dwc);
+               dwc->suspended = false;
                dwc->link_state = DWC3_LINK_STATE_U0;
        }
 
@@ -2699,6 +2700,21 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
        return ret;
 }
 
+static int dwc3_gadget_soft_connect(struct dwc3 *dwc)
+{
+       /*
+        * In the Synopsys DWC_usb31 1.90a programming guide section
+        * 4.1.9, it specifies that for a reconnect after a
+        * device-initiated disconnect requires a core soft reset
+        * (DCTL.CSftRst) before enabling the run/stop bit.
+        */
+       dwc3_core_soft_reset(dwc);
+
+       dwc3_event_buffers_setup(dwc);
+       __dwc3_gadget_start(dwc);
+       return dwc3_gadget_run_stop(dwc, true);
+}
+
 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 {
        struct dwc3             *dwc = gadget_to_dwc(g);
@@ -2737,21 +2753,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 
        synchronize_irq(dwc->irq_gadget);
 
-       if (!is_on) {
+       if (!is_on)
                ret = dwc3_gadget_soft_disconnect(dwc);
-       } else {
-               /*
-                * In the Synopsys DWC_usb31 1.90a programming guide section
-                * 4.1.9, it specifies that for a reconnect after a
-                * device-initiated disconnect requires a core soft reset
-                * (DCTL.CSftRst) before enabling the run/stop bit.
-                */
-               dwc3_core_soft_reset(dwc);
-
-               dwc3_event_buffers_setup(dwc);
-               __dwc3_gadget_start(dwc);
-               ret = dwc3_gadget_run_stop(dwc, true);
-       }
+       else
+               ret = dwc3_gadget_soft_connect(dwc);
 
        pm_runtime_put(dwc->dev);
 
@@ -3938,6 +3943,8 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
 {
        int                     reg;
 
+       dwc->suspended = false;
+
        dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RX_DET);
 
        reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -3962,6 +3969,8 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
 {
        u32                     reg;
 
+       dwc->suspended = false;
+
        /*
         * Ideally, dwc3_reset_gadget() would trigger the function
         * drivers to stop any active transfers through ep disable.
@@ -4180,6 +4189,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
 
 static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc, unsigned int evtinfo)
 {
+       dwc->suspended = false;
+
        /*
         * TODO take core out of low power mode when that's
         * implemented.
@@ -4277,6 +4288,7 @@ static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
                if (dwc->gadget->wakeup_armed) {
                        dwc3_gadget_enable_linksts_evts(dwc, false);
                        dwc3_resume_gadget(dwc);
+                       dwc->suspended = false;
                }
                break;
        case DWC3_LINK_STATE_U1:
@@ -4303,8 +4315,10 @@ static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
 {
        enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
 
-       if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
+       if (!dwc->suspended && next == DWC3_LINK_STATE_U3) {
+               dwc->suspended = true;
                dwc3_suspend_gadget(dwc);
+       }
 
        dwc->link_state = next;
 }
@@ -4655,42 +4669,39 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
 int dwc3_gadget_suspend(struct dwc3 *dwc)
 {
        unsigned long flags;
+       int ret;
 
        if (!dwc->gadget_driver)
                return 0;
 
-       dwc3_gadget_run_stop(dwc, false);
+       ret = dwc3_gadget_soft_disconnect(dwc);
+       if (ret)
+               goto err;
 
        spin_lock_irqsave(&dwc->lock, flags);
        dwc3_disconnect_gadget(dwc);
-       __dwc3_gadget_stop(dwc);
        spin_unlock_irqrestore(&dwc->lock, flags);
 
        return 0;
+
+err:
+       /*
+        * Attempt to reset the controller's state. Likely no
+        * communication can be established until the host
+        * performs a port reset.
+        */
+       if (dwc->softconnect)
+               dwc3_gadget_soft_connect(dwc);
+
+       return ret;
 }
 
 int dwc3_gadget_resume(struct dwc3 *dwc)
 {
-       int                     ret;
-
        if (!dwc->gadget_driver || !dwc->softconnect)
                return 0;
 
-       ret = __dwc3_gadget_start(dwc);
-       if (ret < 0)
-               goto err0;
-
-       ret = dwc3_gadget_run_stop(dwc, true);
-       if (ret < 0)
-               goto err1;
-
-       return 0;
-
-err1:
-       __dwc3_gadget_stop(dwc);
-
-err0:
-       return ret;
+       return dwc3_gadget_soft_connect(dwc);
 }
 
 void dwc3_gadget_process_pending_events(struct dwc3 *dwc)
index a13c946..f41a385 100644 (file)
@@ -3535,6 +3535,7 @@ static void ffs_func_unbind(struct usb_configuration *c,
        /* Drain any pending AIO completions */
        drain_workqueue(ffs->io_completion_wq);
 
+       ffs_event_add(ffs, FUNCTIONFS_UNBIND);
        if (!--opts->refcnt)
                functionfs_unbind(ffs);
 
@@ -3559,7 +3560,6 @@ static void ffs_func_unbind(struct usb_configuration *c,
        func->function.ssp_descriptors = NULL;
        func->interfaces_nums = NULL;
 
-       ffs_event_add(ffs, FUNCTIONFS_UNBIND);
 }
 
 static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
index 6956ad8..a366abb 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/if_vlan.h>
+#include <linux/string_helpers.h>
 #include <linux/usb/composite.h>
 
 #include "u_ether.h"
@@ -965,6 +966,8 @@ int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
        dev = netdev_priv(net);
        snprintf(host_addr, len, "%pm", dev->host_mac);
 
+       string_upper(host_addr, host_addr);
+
        return strlen(host_addr);
 }
 EXPORT_SYMBOL_GPL(gether_get_host_addr_cdc);
index c80f9bd..a36913a 100644 (file)
@@ -170,6 +170,9 @@ static int udc_pci_probe(
                retval = -ENODEV;
                goto err_probe;
        }
+
+       udc = dev;
+
        return 0;
 
 err_probe:
index 4641153..52e6d2e 100644 (file)
@@ -37,10 +37,6 @@ static const struct bus_type gadget_bus_type;
  * @vbus: for udcs who care about vbus status, this value is real vbus status;
  * for udcs who do not care about vbus status, this value is always true
  * @started: the UDC's started state. True if the UDC had started.
- * @connect_lock: protects udc->vbus, udc->started, gadget->connect, gadget->deactivate related
- * functions. usb_gadget_connect_locked, usb_gadget_disconnect_locked,
- * usb_udc_connect_control_locked, usb_gadget_udc_start_locked, usb_gadget_udc_stop_locked are
- * called with this lock held.
  *
  * This represents the internal data structure which is used by the UDC-class
  * to hold information about udc driver and gadget together.
@@ -52,7 +48,6 @@ struct usb_udc {
        struct list_head                list;
        bool                            vbus;
        bool                            started;
-       struct mutex                    connect_lock;
 };
 
 static struct class *udc_class;
@@ -692,9 +687,17 @@ out:
 }
 EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
 
-/* Internal version of usb_gadget_connect needs to be called with connect_lock held. */
-static int usb_gadget_connect_locked(struct usb_gadget *gadget)
-       __must_hold(&gadget->udc->connect_lock)
+/**
+ * usb_gadget_connect - software-controlled connect to USB host
+ * @gadget:the peripheral being connected
+ *
+ * Enables the D+ (or potentially D-) pullup.  The host will start
+ * enumerating this gadget when the pullup is active and a VBUS session
+ * is active (the link is powered).
+ *
+ * Returns zero on success, else negative errno.
+ */
+int usb_gadget_connect(struct usb_gadget *gadget)
 {
        int ret = 0;
 
@@ -703,15 +706,10 @@ static int usb_gadget_connect_locked(struct usb_gadget *gadget)
                goto out;
        }
 
-       if (gadget->connected)
-               goto out;
-
-       if (gadget->deactivated || !gadget->udc->started) {
+       if (gadget->deactivated) {
                /*
                 * If gadget is deactivated we only save new state.
                 * Gadget will be connected automatically after activation.
-                *
-                * udc first needs to be started before gadget can be pulled up.
                 */
                gadget->connected = true;
                goto out;
@@ -726,32 +724,22 @@ out:
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(usb_gadget_connect);
 
 /**
- * usb_gadget_connect - software-controlled connect to USB host
- * @gadget:the peripheral being connected
+ * usb_gadget_disconnect - software-controlled disconnect from USB host
+ * @gadget:the peripheral being disconnected
  *
- * Enables the D+ (or potentially D-) pullup.  The host will start
- * enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered).
+ * Disables the D+ (or potentially D-) pullup, which the host may see
+ * as a disconnect (when a VBUS session is active).  Not all systems
+ * support software pullup controls.
+ *
+ * Following a successful disconnect, invoke the ->disconnect() callback
+ * for the current gadget driver so that UDC drivers don't need to.
  *
  * Returns zero on success, else negative errno.
  */
-int usb_gadget_connect(struct usb_gadget *gadget)
-{
-       int ret;
-
-       mutex_lock(&gadget->udc->connect_lock);
-       ret = usb_gadget_connect_locked(gadget);
-       mutex_unlock(&gadget->udc->connect_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(usb_gadget_connect);
-
-/* Internal version of usb_gadget_disconnect needs to be called with connect_lock held. */
-static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
-       __must_hold(&gadget->udc->connect_lock)
+int usb_gadget_disconnect(struct usb_gadget *gadget)
 {
        int ret = 0;
 
@@ -763,12 +751,10 @@ static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
        if (!gadget->connected)
                goto out;
 
-       if (gadget->deactivated || !gadget->udc->started) {
+       if (gadget->deactivated) {
                /*
                 * If gadget is deactivated we only save new state.
                 * Gadget will stay disconnected after activation.
-                *
-                * udc should have been started before gadget being pulled down.
                 */
                gadget->connected = false;
                goto out;
@@ -788,30 +774,6 @@ out:
 
        return ret;
 }
-
-/**
- * usb_gadget_disconnect - software-controlled disconnect from USB host
- * @gadget:the peripheral being disconnected
- *
- * Disables the D+ (or potentially D-) pullup, which the host may see
- * as a disconnect (when a VBUS session is active).  Not all systems
- * support software pullup controls.
- *
- * Following a successful disconnect, invoke the ->disconnect() callback
- * for the current gadget driver so that UDC drivers don't need to.
- *
- * Returns zero on success, else negative errno.
- */
-int usb_gadget_disconnect(struct usb_gadget *gadget)
-{
-       int ret;
-
-       mutex_lock(&gadget->udc->connect_lock);
-       ret = usb_gadget_disconnect_locked(gadget);
-       mutex_unlock(&gadget->udc->connect_lock);
-
-       return ret;
-}
 EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
 
 /**
@@ -832,11 +794,10 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
        if (gadget->deactivated)
                goto out;
 
-       mutex_lock(&gadget->udc->connect_lock);
        if (gadget->connected) {
-               ret = usb_gadget_disconnect_locked(gadget);
+               ret = usb_gadget_disconnect(gadget);
                if (ret)
-                       goto unlock;
+                       goto out;
 
                /*
                 * If gadget was being connected before deactivation, we want
@@ -846,8 +807,6 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
        }
        gadget->deactivated = true;
 
-unlock:
-       mutex_unlock(&gadget->udc->connect_lock);
 out:
        trace_usb_gadget_deactivate(gadget, ret);
 
@@ -871,7 +830,6 @@ int usb_gadget_activate(struct usb_gadget *gadget)
        if (!gadget->deactivated)
                goto out;
 
-       mutex_lock(&gadget->udc->connect_lock);
        gadget->deactivated = false;
 
        /*
@@ -879,8 +837,7 @@ int usb_gadget_activate(struct usb_gadget *gadget)
         * while it was being deactivated, we call usb_gadget_connect().
         */
        if (gadget->connected)
-               ret = usb_gadget_connect_locked(gadget);
-       mutex_unlock(&gadget->udc->connect_lock);
+               ret = usb_gadget_connect(gadget);
 
 out:
        trace_usb_gadget_activate(gadget, ret);
@@ -1121,13 +1078,12 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);
 
 /* ------------------------------------------------------------------------- */
 
-/* Acquire connect_lock before calling this function. */
-static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
+static void usb_udc_connect_control(struct usb_udc *udc)
 {
-       if (udc->vbus && udc->started)
-               usb_gadget_connect_locked(udc->gadget);
+       if (udc->vbus)
+               usb_gadget_connect(udc->gadget);
        else
-               usb_gadget_disconnect_locked(udc->gadget);
+               usb_gadget_disconnect(udc->gadget);
 }
 
 /**
@@ -1143,12 +1099,10 @@ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
 {
        struct usb_udc *udc = gadget->udc;
 
-       mutex_lock(&udc->connect_lock);
        if (udc) {
                udc->vbus = status;
-               usb_udc_connect_control_locked(udc);
+               usb_udc_connect_control(udc);
        }
-       mutex_unlock(&udc->connect_lock);
 }
 EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
 
@@ -1170,7 +1124,7 @@ void usb_gadget_udc_reset(struct usb_gadget *gadget,
 EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
 
 /**
- * usb_gadget_udc_start_locked - tells usb device controller to start up
+ * usb_gadget_udc_start - tells usb device controller to start up
  * @udc: The UDC to be started
  *
  * This call is issued by the UDC Class driver when it's about
@@ -1181,11 +1135,8 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
  * necessary to have it powered on.
  *
  * Returns zero on success, else negative errno.
- *
- * Caller should acquire connect_lock before invoking this function.
  */
-static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
-       __must_hold(&udc->connect_lock)
+static inline int usb_gadget_udc_start(struct usb_udc *udc)
 {
        int ret;
 
@@ -1202,7 +1153,7 @@ static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
 }
 
 /**
- * usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore
+ * usb_gadget_udc_stop - tells usb device controller we don't need it anymore
  * @udc: The UDC to be stopped
  *
  * This call is issued by the UDC Class driver after calling
@@ -1211,11 +1162,8 @@ static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
  * The details are implementation specific, but it can go as
  * far as powering off UDC completely and disable its data
  * line pullups.
- *
- * Caller should acquire connect lock before invoking this function.
  */
-static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc)
-       __must_hold(&udc->connect_lock)
+static inline void usb_gadget_udc_stop(struct usb_udc *udc)
 {
        if (!udc->started) {
                dev_err(&udc->dev, "UDC had already stopped\n");
@@ -1374,7 +1322,6 @@ int usb_add_gadget(struct usb_gadget *gadget)
 
        udc->gadget = gadget;
        gadget->udc = udc;
-       mutex_init(&udc->connect_lock);
 
        udc->started = false;
 
@@ -1576,15 +1523,11 @@ static int gadget_bind_driver(struct device *dev)
        if (ret)
                goto err_bind;
 
-       mutex_lock(&udc->connect_lock);
-       ret = usb_gadget_udc_start_locked(udc);
-       if (ret) {
-               mutex_unlock(&udc->connect_lock);
+       ret = usb_gadget_udc_start(udc);
+       if (ret)
                goto err_start;
-       }
        usb_gadget_enable_async_callbacks(udc);
-       usb_udc_connect_control_locked(udc);
-       mutex_unlock(&udc->connect_lock);
+       usb_udc_connect_control(udc);
 
        kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
        return 0;
@@ -1615,14 +1558,12 @@ static void gadget_unbind_driver(struct device *dev)
 
        kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
 
-       mutex_lock(&udc->connect_lock);
-       usb_gadget_disconnect_locked(gadget);
+       usb_gadget_disconnect(gadget);
        usb_gadget_disable_async_callbacks(udc);
        if (gadget->irq)
                synchronize_irq(gadget->irq);
        udc->driver->unbind(gadget);
-       usb_gadget_udc_stop_locked(udc);
-       mutex_unlock(&udc->connect_lock);
+       usb_gadget_udc_stop(udc);
 
        mutex_lock(&udc_lock);
        driver->is_bound = false;
@@ -1708,15 +1649,11 @@ static ssize_t soft_connect_store(struct device *dev,
        }
 
        if (sysfs_streq(buf, "connect")) {
-               mutex_lock(&udc->connect_lock);
-               usb_gadget_udc_start_locked(udc);
-               usb_gadget_connect_locked(udc->gadget);
-               mutex_unlock(&udc->connect_lock);
+               usb_gadget_udc_start(udc);
+               usb_gadget_connect(udc->gadget);
        } else if (sysfs_streq(buf, "disconnect")) {
-               mutex_lock(&udc->connect_lock);
-               usb_gadget_disconnect_locked(udc->gadget);
-               usb_gadget_udc_stop_locked(udc);
-               mutex_unlock(&udc->connect_lock);
+               usb_gadget_disconnect(udc->gadget);
+               usb_gadget_udc_stop(udc);
        } else {
                dev_err(dev, "unsupported command '%s'\n", buf);
                ret = -EINVAL;
index 3592f75..7bd2fdd 100644 (file)
@@ -119,11 +119,13 @@ static int uhci_pci_init(struct usb_hcd *hcd)
 
        uhci->rh_numports = uhci_count_ports(hcd);
 
-       /* Intel controllers report the OverCurrent bit active on.
-        * VIA controllers report it active off, so we'll adjust the
-        * bit value.  (It's not standardized in the UHCI spec.)
+       /*
+        * Intel controllers report the OverCurrent bit active on.  VIA
+        * and ZHAOXIN controllers report it active off, so we'll adjust
+        * the bit value.  (It's not standardized in the UHCI spec.)
         */
-       if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA)
+       if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_VIA ||
+                       to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_ZHAOXIN)
                uhci->oc_low = 1;
 
        /* HP's server management chip requires a longer port reset delay. */
index ddb79f2..79b3691 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/module.h>
 #include <linux/acpi.h>
 #include <linux/reset.h>
+#include <linux/suspend.h>
 
 #include "xhci.h"
 #include "xhci-trace.h"
@@ -387,7 +388,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
 
        if (pdev->vendor == PCI_VENDOR_ID_AMD &&
                pdev->device == PCI_DEVICE_ID_AMD_RENOIR_XHCI)
-               xhci->quirks |= XHCI_BROKEN_D3COLD;
+               xhci->quirks |= XHCI_BROKEN_D3COLD_S2I;
 
        if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
                xhci->quirks |= XHCI_LPM_SUPPORT;
@@ -801,9 +802,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
         * Systems with the TI redriver that loses port status change events
         * need to have the registers polled during D3, so avoid D3cold.
         */
-       if (xhci->quirks & (XHCI_COMP_MODE_QUIRK | XHCI_BROKEN_D3COLD))
+       if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
                pci_d3cold_disable(pdev);
 
+#ifdef CONFIG_SUSPEND
+       /* d3cold is broken, but only when s2idle is used */
+       if (pm_suspend_target_state == PM_SUSPEND_TO_IDLE &&
+           xhci->quirks & (XHCI_BROKEN_D3COLD_S2I))
+               pci_d3cold_disable(pdev);
+#endif
+
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
                xhci_pme_quirk(hcd);
 
index 1ad12d5..2bc82b3 100644 (file)
@@ -276,6 +276,26 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
        trace_xhci_inc_enq(ring);
 }
 
+static int xhci_num_trbs_to(struct xhci_segment *start_seg, union xhci_trb *start,
+                           struct xhci_segment *end_seg, union xhci_trb *end,
+                           unsigned int num_segs)
+{
+       union xhci_trb *last_on_seg;
+       int num = 0;
+       int i = 0;
+
+       do {
+               if (start_seg == end_seg && end >= start)
+                       return num + (end - start);
+               last_on_seg = &start_seg->trbs[TRBS_PER_SEGMENT - 1];
+               num += last_on_seg - start;
+               start_seg = start_seg->next;
+               start = start_seg->trbs;
+       } while (i++ <= num_segs);
+
+       return -EINVAL;
+}
+
 /*
  * Check to see if there's room to enqueue num_trbs on the ring and make sure
  * enqueue pointer will not advance into dequeue segment. See rules above.
@@ -2140,6 +2160,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
                     u32 trb_comp_code)
 {
        struct xhci_ep_ctx *ep_ctx;
+       int trbs_freed;
 
        ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
 
@@ -2209,9 +2230,15 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
        }
 
        /* Update ring dequeue pointer */
+       trbs_freed = xhci_num_trbs_to(ep_ring->deq_seg, ep_ring->dequeue,
+                                     td->last_trb_seg, td->last_trb,
+                                     ep_ring->num_segs);
+       if (trbs_freed < 0)
+               xhci_dbg(xhci, "Failed to count freed trbs at TD finish\n");
+       else
+               ep_ring->num_trbs_free += trbs_freed;
        ep_ring->dequeue = td->last_trb;
        ep_ring->deq_seg = td->last_trb_seg;
-       ep_ring->num_trbs_free += td->num_trbs - 1;
        inc_deq(xhci, ep_ring);
 
        return xhci_td_cleanup(xhci, td, ep_ring, td->status);
index 08d7219..6b690ec 100644 (file)
@@ -1901,7 +1901,7 @@ struct xhci_hcd {
 #define XHCI_DISABLE_SPARSE    BIT_ULL(38)
 #define XHCI_SG_TRB_CACHE_SIZE_QUIRK   BIT_ULL(39)
 #define XHCI_NO_SOFT_RETRY     BIT_ULL(40)
-#define XHCI_BROKEN_D3COLD     BIT_ULL(41)
+#define XHCI_BROKEN_D3COLD_S2I BIT_ULL(41)
 #define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
 #define XHCI_SUSPEND_RESUME_CLKS       BIT_ULL(43)
 #define XHCI_RESET_TO_DEFAULT  BIT_ULL(44)
index 8931df5..c54e980 100644 (file)
@@ -406,22 +406,25 @@ static DEF_SCSI_QCMD(queuecommand)
  ***********************************************************************/
 
 /* Command timeout and abort */
-static int command_abort(struct scsi_cmnd *srb)
+static int command_abort_matching(struct us_data *us, struct scsi_cmnd *srb_match)
 {
-       struct us_data *us = host_to_us(srb->device->host);
-
-       usb_stor_dbg(us, "%s called\n", __func__);
-
        /*
         * us->srb together with the TIMED_OUT, RESETTING, and ABORTING
         * bits are protected by the host lock.
         */
        scsi_lock(us_to_host(us));
 
-       /* Is this command still active? */
-       if (us->srb != srb) {
+       /* is there any active pending command to abort ? */
+       if (!us->srb) {
                scsi_unlock(us_to_host(us));
                usb_stor_dbg(us, "-- nothing to abort\n");
+               return SUCCESS;
+       }
+
+       /* Does the command match the passed srb if any ? */
+       if (srb_match && us->srb != srb_match) {
+               scsi_unlock(us_to_host(us));
+               usb_stor_dbg(us, "-- pending command mismatch\n");
                return FAILED;
        }
 
@@ -444,6 +447,14 @@ static int command_abort(struct scsi_cmnd *srb)
        return SUCCESS;
 }
 
+static int command_abort(struct scsi_cmnd *srb)
+{
+       struct us_data *us = host_to_us(srb->device->host);
+
+       usb_stor_dbg(us, "%s called\n", __func__);
+       return command_abort_matching(us, srb);
+}
+
 /*
  * This invokes the transport reset mechanism to reset the state of the
  * device
@@ -455,6 +466,9 @@ static int device_reset(struct scsi_cmnd *srb)
 
        usb_stor_dbg(us, "%s called\n", __func__);
 
+       /* abort any pending command before reset */
+       command_abort_matching(us, NULL);
+
        /* lock the device pointers and do the reset */
        mutex_lock(&(us->dev_mutex));
        result = us->transport_reset(us);
index 8f3e884..66de880 100644 (file)
@@ -516,6 +516,10 @@ static ssize_t pin_assignment_show(struct device *dev,
 
        mutex_unlock(&dp->lock);
 
+       /* get_current_pin_assignments can return 0 when no matching pin assignments are found */
+       if (len == 0)
+               len++;
+
        buf[len - 1] = '\n';
        return len;
 }
index 8b075ca..603dbd4 100644 (file)
@@ -886,6 +886,9 @@ static void tps6598x_remove(struct i2c_client *client)
 {
        struct tps6598x *tps = i2c_get_clientdata(client);
 
+       if (!client->irq)
+               cancel_delayed_work_sync(&tps->wq_poll);
+
        tps6598x_disconnect(tps, 0);
        typec_unregister_port(tps->port);
        usb_role_switch_put(tps->role_sw);
@@ -917,7 +920,7 @@ static int __maybe_unused tps6598x_resume(struct device *dev)
                enable_irq(client->irq);
        }
 
-       if (client->irq)
+       if (!client->irq)
                queue_delayed_work(system_power_efficient_wq, &tps->wq_poll,
                                   msecs_to_jiffies(POLL_INTERVAL));
 
index 3d4dd94..0d2f805 100644 (file)
@@ -860,6 +860,11 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
                if (ret)
                        goto pin_unwind;
 
+               if (!pfn_valid(phys_pfn)) {
+                       ret = -EINVAL;
+                       goto pin_unwind;
+               }
+
                ret = vfio_add_to_pfn_list(dma, iova, phys_pfn);
                if (ret) {
                        if (put_pfn(phys_pfn, dma->prot) && do_accounting)
index a92af08..0742730 100644 (file)
@@ -256,7 +256,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
                 * test_and_set_bit() implies a memory barrier.
                 */
                llist_add(&work->node, &dev->worker->work_list);
-               wake_up_process(dev->worker->vtsk->task);
+               vhost_task_wake(dev->worker->vtsk);
        }
 }
 EXPORT_SYMBOL_GPL(vhost_work_queue);
@@ -333,31 +333,19 @@ static void vhost_vq_reset(struct vhost_dev *dev,
        __vhost_vq_meta_reset(vq);
 }
 
-static int vhost_worker(void *data)
+static bool vhost_worker(void *data)
 {
        struct vhost_worker *worker = data;
        struct vhost_work *work, *work_next;
        struct llist_node *node;
 
-       for (;;) {
-               /* mb paired w/ kthread_stop */
-               set_current_state(TASK_INTERRUPTIBLE);
-
-               if (vhost_task_should_stop(worker->vtsk)) {
-                       __set_current_state(TASK_RUNNING);
-                       break;
-               }
-
-               node = llist_del_all(&worker->work_list);
-               if (!node)
-                       schedule();
-
+       node = llist_del_all(&worker->work_list);
+       if (node) {
                node = llist_reverse_order(node);
                /* make sure flag is seen after deletion */
                smp_wmb();
                llist_for_each_entry_safe(work, work_next, node, node) {
                        clear_bit(VHOST_WORK_QUEUED, &work->flags);
-                       __set_current_state(TASK_RUNNING);
                        kcov_remote_start_common(worker->kcov_handle);
                        work->fn(work);
                        kcov_remote_stop();
@@ -365,7 +353,7 @@ static int vhost_worker(void *data)
                }
        }
 
-       return 0;
+       return !!node;
 }
 
 static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
index 96e9157..0fdf5f4 100644 (file)
@@ -124,7 +124,7 @@ config FB_PROVIDE_GET_FB_UNMAPPED_AREA
        depends on FB
        help
          Allow generic frame-buffer to provide get_fb_unmapped_area
-         function.
+         function to provide shareable character device support on nommu.
 
 menuconfig FB_FOREIGN_ENDIAN
        bool "Framebuffer foreign endianness support"
index 024d0ee..08d15e4 100644 (file)
@@ -590,7 +590,7 @@ err_fb_alloc:
        return retval;
 }
 
-static int arcfb_remove(struct platform_device *dev)
+static void arcfb_remove(struct platform_device *dev)
 {
        struct fb_info *info = platform_get_drvdata(dev);
 
@@ -601,12 +601,11 @@ static int arcfb_remove(struct platform_device *dev)
                vfree((void __force *)info->screen_base);
                framebuffer_release(info);
        }
-       return 0;
 }
 
 static struct platform_driver arcfb_driver = {
        .probe  = arcfb_probe,
-       .remove = arcfb_remove,
+       .remove_new = arcfb_remove,
        .driver = {
                .name   = "arcfb",
        },
index b02e4e6..cba2b11 100644 (file)
@@ -3498,11 +3498,6 @@ static int atyfb_setup_generic(struct pci_dev *pdev, struct fb_info *info,
        if (ret)
                goto atyfb_setup_generic_fail;
 #endif
-       if (!(aty_ld_le32(CRTC_GEN_CNTL, par) & CRTC_EXT_DISP_EN))
-               par->clk_wr_offset = (inb(R_GENMO) & 0x0CU) >> 2;
-       else
-               par->clk_wr_offset = aty_ld_8(CLOCK_CNTL, par) & 0x03U;
-
        /* according to ATI, we should use clock 3 for acelerated mode */
        par->clk_wr_offset = 3;
 
index 519313b..648d6ca 100644 (file)
@@ -520,13 +520,10 @@ failed:
        return -ENODEV;
 }
 
-int au1100fb_drv_remove(struct platform_device *dev)
+void au1100fb_drv_remove(struct platform_device *dev)
 {
        struct au1100fb_device *fbdev = NULL;
 
-       if (!dev)
-               return -ENODEV;
-
        fbdev = platform_get_drvdata(dev);
 
 #if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
@@ -543,8 +540,6 @@ int au1100fb_drv_remove(struct platform_device *dev)
                clk_disable_unprepare(fbdev->lcdclk);
                clk_put(fbdev->lcdclk);
        }
-
-       return 0;
 }
 
 #ifdef CONFIG_PM
@@ -593,9 +588,9 @@ static struct platform_driver au1100fb_driver = {
                .name           = "au1100-lcd",
        },
        .probe          = au1100fb_drv_probe,
-        .remove                = au1100fb_drv_remove,
+       .remove_new     = au1100fb_drv_remove,
        .suspend        = au1100fb_drv_suspend,
-        .resume                = au1100fb_drv_resume,
+       .resume         = au1100fb_drv_resume,
 };
 module_platform_driver(au1100fb_driver);
 
index b6b22fa..aed88ce 100644 (file)
@@ -1765,7 +1765,7 @@ failed:
        return ret;
 }
 
-static int au1200fb_drv_remove(struct platform_device *dev)
+static void au1200fb_drv_remove(struct platform_device *dev)
 {
        struct au1200fb_platdata *pd = platform_get_drvdata(dev);
        struct fb_info *fbi;
@@ -1788,8 +1788,6 @@ static int au1200fb_drv_remove(struct platform_device *dev)
        }
 
        free_irq(platform_get_irq(dev, 0), (void *)dev);
-
-       return 0;
 }
 
 #ifdef CONFIG_PM
@@ -1840,7 +1838,7 @@ static struct platform_driver au1200fb_driver = {
                .pm     = AU1200FB_PMOPS,
        },
        .probe          = au1200fb_drv_probe,
-       .remove         = au1200fb_drv_remove,
+       .remove_new     = au1200fb_drv_remove,
 };
 module_platform_driver(au1200fb_driver);
 
index 55e62dd..b518cac 100644 (file)
@@ -1193,7 +1193,7 @@ err:
 
 }
 
-static int broadsheetfb_remove(struct platform_device *dev)
+static void broadsheetfb_remove(struct platform_device *dev)
 {
        struct fb_info *info = platform_get_drvdata(dev);
 
@@ -1209,12 +1209,11 @@ static int broadsheetfb_remove(struct platform_device *dev)
                module_put(par->board->owner);
                framebuffer_release(info);
        }
-       return 0;
 }
 
 static struct platform_driver broadsheetfb_driver = {
        .probe  = broadsheetfb_probe,
-       .remove = broadsheetfb_remove,
+       .remove_new = broadsheetfb_remove,
        .driver = {
                .name   = "broadsheetfb",
        },
index 9cbadcd..025d663 100644 (file)
@@ -352,7 +352,7 @@ out_err:
        return err;
 }
 
-static int bw2_remove(struct platform_device *op)
+static void bw2_remove(struct platform_device *op)
 {
        struct fb_info *info = dev_get_drvdata(&op->dev);
        struct bw2_par *par = info->par;
@@ -363,8 +363,6 @@ static int bw2_remove(struct platform_device *op)
        of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len);
 
        framebuffer_release(info);
-
-       return 0;
 }
 
 static const struct of_device_id bw2_match[] = {
@@ -381,7 +379,7 @@ static struct platform_driver bw2_driver = {
                .of_match_table = bw2_match,
        },
        .probe          = bw2_probe,
-       .remove         = bw2_remove,
+       .remove_new     = bw2_remove,
 };
 
 static int __init bw2_init(void)
index f98e8f2..8587c9d 100644 (file)
@@ -247,6 +247,9 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
 
        cursor.set = 0;
 
+       if (!vc->vc_font.data)
+               return;
+
        c = scr_readw((u16 *) vc->vc_pos);
        attribute = get_attribute(info, c);
        src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height));
index e808dc8..28739f1 100644 (file)
@@ -1468,7 +1468,7 @@ __releases(&info->lock)
 }
 
 #if defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && !defined(CONFIG_MMU)
-unsigned long get_fb_unmapped_area(struct file *filp,
+static unsigned long get_fb_unmapped_area(struct file *filp,
                                   unsigned long addr, unsigned long len,
                                   unsigned long pgoff, unsigned long flags)
 {
index b4b3670..2082b5c 100644 (file)
@@ -14,6 +14,7 @@
 
 #include "i810_regs.h"
 #include "i810.h"
+#include "i810_main.h"
 
 struct mode_registers std_modes[] = {
        /* 640x480 @ 60Hz */
@@ -276,7 +277,7 @@ void i810fb_fill_var_timings(struct fb_var_screeninfo *var)
        var->upper_margin = total - (yres + var->lower_margin + var->vsync_len);
 }
 
-u32 i810_get_watermark(struct fb_var_screeninfo *var,
+u32 i810_get_watermark(const struct fb_var_screeninfo *var,
                       struct i810fb_par *par)
 {
        struct mode_registers *params = &par->regs;
index 975dd68..ee7d01a 100644 (file)
@@ -1452,9 +1452,13 @@ static int init_imstt(struct fb_info *info)
                      FBINFO_HWACCEL_FILLRECT |
                      FBINFO_HWACCEL_YPAN;
 
-       fb_alloc_cmap(&info->cmap, 0, 0);
+       if (fb_alloc_cmap(&info->cmap, 0, 0)) {
+               framebuffer_release(info);
+               return -ENODEV;
+       }
 
        if (register_framebuffer(info) < 0) {
+               fb_dealloc_cmap(&info->cmap);
                framebuffer_release(info);
                return -ENODEV;
        }
@@ -1531,8 +1535,10 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto error;
        info->pseudo_palette = par->palette;
        ret = init_imstt(info);
-       if (!ret)
-               pci_set_drvdata(pdev, info);
+       if (ret)
+               goto error;
+
+       pci_set_drvdata(pdev, info);
        return ret;
 
 error:
index 727a10a..b15a8ad 100644 (file)
@@ -1291,7 +1291,7 @@ static struct i2c_driver maven_driver={
        .driver = {
                .name   = "maven",
        },
-       .probe_new      = maven_probe,
+       .probe          = maven_probe,
        .remove         = maven_remove,
        .id_table       = maven_id,
 };
index 1eaa35c..477789c 100644 (file)
@@ -491,7 +491,8 @@ static int tpo_td043_probe(struct spi_device *spi)
 
        ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
        if (IS_ERR(ddata->vcc_reg)) {
-               r = dev_err_probe(&spi->dev, r, "failed to get LCD VCC regulator\n");
+               r = dev_err_probe(&spi->dev, PTR_ERR(ddata->vcc_reg),
+                                 "failed to get LCD VCC regulator\n");
                goto err_regulator;
        }
 
index 046b999..132d1a2 100644 (file)
@@ -844,7 +844,7 @@ static const struct i2c_device_id ssd1307fb_i2c_id[] = {
 MODULE_DEVICE_TABLE(i2c, ssd1307fb_i2c_id);
 
 static struct i2c_driver ssd1307fb_driver = {
-       .probe_new = ssd1307fb_probe,
+       .probe = ssd1307fb_probe,
        .remove = ssd1307fb_remove,
        .id_table = ssd1307fb_i2c_id,
        .driver = {
index 14c9215..686a234 100644 (file)
@@ -741,7 +741,7 @@ ngleClearOverlayPlanes(struct stifb_info *fb, int mask, int data)
         packed_len = (fb->info.var.xres << 16) | fb->info.var.yres;
         NGLE_SET_DSTXY(fb, packed_dst);
 
-        /* Write zeroes to overlay planes */
+       /* Write zeroes to overlay planes */
        NGLE_QUICK_SET_IMAGE_BITMAP_OP(fb,
                                       IBOvals(RopSrc, MaskAddrOffset(0),
                                               BitmapExtent08, StaticReg(0),
@@ -1297,14 +1297,14 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
                break;
        default:
 #ifdef FALLBACK_TO_1BPP
-               printk(KERN_WARNING
+               printk(KERN_WARNING
                        "stifb: Unsupported graphics card (id=0x%08x) "
                                "- now trying 1bpp mode instead\n",
                        fb->id);
                bpp = 1;        /* default to 1 bpp */
                break;
 #else
-               printk(KERN_WARNING
+               printk(KERN_WARNING
                        "stifb: Unsupported graphics card (id=0x%08x) "
                                "- skipping.\n",
                        fb->id);
index 216d49c..dabc30a 100644 (file)
@@ -27,6 +27,8 @@
 #include <video/udlfb.h>
 #include "edid.h"
 
+#define OUT_EP_NUM     1       /* The endpoint number we will use */
+
 static const struct fb_fix_screeninfo dlfb_fix = {
        .id =           "udlfb",
        .type =         FB_TYPE_PACKED_PIXELS,
@@ -1541,24 +1543,16 @@ static const struct device_attribute fb_device_attrs[] = {
 static int dlfb_select_std_channel(struct dlfb_data *dlfb)
 {
        int ret;
-       void *buf;
        static const u8 set_def_chn[] = {
                                0x57, 0xCD, 0xDC, 0xA7,
                                0x1C, 0x88, 0x5E, 0x15,
                                0x60, 0xFE, 0xC6, 0x97,
                                0x16, 0x3D, 0x47, 0xF2  };
 
-       buf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
-
-       if (!buf)
-               return -ENOMEM;
-
-       ret = usb_control_msg(dlfb->udev, usb_sndctrlpipe(dlfb->udev, 0),
-                       NR_USB_REQUEST_CHANNEL,
+       ret = usb_control_msg_send(dlfb->udev, 0, NR_USB_REQUEST_CHANNEL,
                        (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
-                       buf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
-
-       kfree(buf);
+                       &set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT,
+                       GFP_KERNEL);
 
        return ret;
 }
@@ -1652,7 +1646,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
        struct fb_info *info;
        int retval;
        struct usb_device *usbdev = interface_to_usbdev(intf);
-       struct usb_endpoint_descriptor *out;
+       static u8 out_ep[] = {OUT_EP_NUM + USB_DIR_OUT, 0};
 
        /* usb initialization */
        dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
@@ -1666,9 +1660,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
        dlfb->udev = usb_get_dev(usbdev);
        usb_set_intfdata(intf, dlfb);
 
-       retval = usb_find_common_endpoints(intf->cur_altsetting, NULL, &out, NULL, NULL);
-       if (retval) {
-               dev_err(&intf->dev, "Device should have at lease 1 bulk endpoint!\n");
+       if (!usb_check_bulk_endpoints(intf, out_ep)) {
+               dev_err(&intf->dev, "Invalid DisplayLink device!\n");
+               retval = -EINVAL;
                goto error;
        }
 
@@ -1927,7 +1921,8 @@ retry:
                }
 
                /* urb->transfer_buffer_length set to actual before submit */
-               usb_fill_bulk_urb(urb, dlfb->udev, usb_sndbulkpipe(dlfb->udev, 1),
+               usb_fill_bulk_urb(urb, dlfb->udev,
+                       usb_sndbulkpipe(dlfb->udev, OUT_EP_NUM),
                        buf, size, dlfb_urb_completion, unode);
                urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 
index 1f5219e..7beaf2c 100644 (file)
@@ -325,8 +325,10 @@ static struct sock_mapping *pvcalls_new_active_socket(
        void *page;
 
        map = kzalloc(sizeof(*map), GFP_KERNEL);
-       if (map == NULL)
+       if (map == NULL) {
+               sock_release(sock);
                return NULL;
+       }
 
        map->fedata = fedata;
        map->sock = sock;
@@ -418,10 +420,8 @@ static int pvcalls_back_connect(struct xenbus_device *dev,
                                        req->u.connect.ref,
                                        req->u.connect.evtchn,
                                        sock);
-       if (!map) {
+       if (!map)
                ret = -EFAULT;
-               sock_release(sock);
-       }
 
 out:
        rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
@@ -561,7 +561,6 @@ static void __pvcalls_back_accept(struct work_struct *work)
                                        sock);
        if (!map) {
                ret = -EFAULT;
-               sock_release(sock);
                goto out_error;
        }
 
index cc07a0c..18d034e 100644 (file)
@@ -368,14 +368,7 @@ config NFS_V4_2_SSC_HELPER
 source "net/sunrpc/Kconfig"
 source "fs/ceph/Kconfig"
 
-source "fs/cifs/Kconfig"
-source "fs/ksmbd/Kconfig"
-
-config SMBFS_COMMON
-       tristate
-       default y if CIFS=y || SMB_SERVER=y
-       default m if CIFS=m || SMB_SERVER=m
-
+source "fs/smb/Kconfig"
 source "fs/coda/Kconfig"
 source "fs/afs/Kconfig"
 source "fs/9p/Kconfig"
index 834f1c3..5bfdbf0 100644 (file)
@@ -95,9 +95,7 @@ obj-$(CONFIG_LOCKD)           += lockd/
 obj-$(CONFIG_NLS)              += nls/
 obj-y                          += unicode/
 obj-$(CONFIG_SYSV_FS)          += sysv/
-obj-$(CONFIG_SMBFS_COMMON)     += smbfs_common/
-obj-$(CONFIG_CIFS)             += cifs/
-obj-$(CONFIG_SMB_SERVER)       += ksmbd/
+obj-$(CONFIG_SMBFS)            += smb/
 obj-$(CONFIG_HPFS_FS)          += hpfs/
 obj-$(CONFIG_NTFS_FS)          += ntfs/
 obj-$(CONFIG_NTFS3_FS)         += ntfs3/
index 4dd97af..5219182 100644 (file)
@@ -1358,6 +1358,7 @@ static int afs_mkdir(struct mnt_idmap *idmap, struct inode *dir,
        op->dentry      = dentry;
        op->create.mode = S_IFDIR | mode;
        op->create.reason = afs_edit_dir_for_mkdir;
+       op->mtime       = current_time(dir);
        op->ops         = &afs_mkdir_operation;
        return afs_do_sync_operation(op);
 }
@@ -1661,6 +1662,7 @@ static int afs_create(struct mnt_idmap *idmap, struct inode *dir,
        op->dentry      = dentry;
        op->create.mode = S_IFREG | mode;
        op->create.reason = afs_edit_dir_for_create;
+       op->mtime       = current_time(dir);
        op->ops         = &afs_create_operation;
        return afs_do_sync_operation(op);
 
@@ -1796,6 +1798,7 @@ static int afs_symlink(struct mnt_idmap *idmap, struct inode *dir,
        op->ops                 = &afs_symlink_operation;
        op->create.reason       = afs_edit_dir_for_symlink;
        op->create.symlink      = content;
+       op->mtime               = current_time(dir);
        return afs_do_sync_operation(op);
 
 error:
index 5379c47..b3ad0f5 100644 (file)
@@ -330,7 +330,7 @@ static void btrfs_end_bio_work(struct work_struct *work)
        if (bbio->inode && !(bbio->bio.bi_opf & REQ_META))
                btrfs_check_read_bio(bbio, bbio->bio.bi_private);
        else
-               bbio->end_io(bbio);
+               btrfs_orig_bbio_end_io(bbio);
 }
 
 static void btrfs_simple_end_io(struct bio *bio)
@@ -811,10 +811,6 @@ void btrfs_submit_repair_write(struct btrfs_bio *bbio, int mirror_num, bool dev_
                goto fail;
 
        if (dev_replace) {
-               if (btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE && btrfs_is_zoned(fs_info)) {
-                       bbio->bio.bi_opf &= ~REQ_OP_WRITE;
-                       bbio->bio.bi_opf |= REQ_OP_ZONE_APPEND;
-               }
                ASSERT(smap.dev == fs_info->dev_replace.srcdev);
                smap.dev = fs_info->dev_replace.tgtdev;
        }
index 957ad1c..590b035 100644 (file)
@@ -2818,10 +2818,20 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
        }
 
        ret = inc_block_group_ro(cache, 0);
-       if (!do_chunk_alloc || ret == -ETXTBSY)
-               goto unlock_out;
        if (!ret)
                goto out;
+       if (ret == -ETXTBSY)
+               goto unlock_out;
+
+       /*
+        * Skip chunk alloction if the bg is SYSTEM, this is to avoid system
+        * chunk allocation storm to exhaust the system chunk array.  Otherwise
+        * we still want to try our best to mark the block group read-only.
+        */
+       if (!do_chunk_alloc && ret == -ENOSPC &&
+           (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM))
+               goto unlock_out;
+
        alloc_flags = btrfs_get_alloc_profile(fs_info, cache->space_info->flags);
        ret = btrfs_chunk_alloc(trans, alloc_flags, CHUNK_ALLOC_FORCE);
        if (ret < 0)
index fbf9006..2b1b227 100644 (file)
@@ -96,7 +96,7 @@ static void csum_tree_block(struct extent_buffer *buf, u8 *result)
        crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
                            first_page_part - BTRFS_CSUM_SIZE);
 
-       for (i = 1; i < num_pages; i++) {
+       for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
                kaddr = page_address(buf->pages[i]);
                crypto_shash_update(shash, kaddr, PAGE_SIZE);
        }
@@ -4936,7 +4936,11 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
                 */
                inode = igrab(&btrfs_inode->vfs_inode);
                if (inode) {
+                       unsigned int nofs_flag;
+
+                       nofs_flag = memalloc_nofs_save();
                        invalidate_inode_pages2(inode->i_mapping);
+                       memalloc_nofs_restore(nofs_flag);
                        iput(inode);
                }
                spin_lock(&root->delalloc_lock);
@@ -5042,7 +5046,12 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache)
 
        inode = cache->io_ctl.inode;
        if (inode) {
+               unsigned int nofs_flag;
+
+               nofs_flag = memalloc_nofs_save();
                invalidate_inode_pages2(inode->i_mapping);
+               memalloc_nofs_restore(nofs_flag);
+
                BTRFS_I(inode)->generation = 0;
                cache->io_ctl.inode = NULL;
                iput(inode);
index cd4cce9..d1cd0a6 100644 (file)
@@ -792,7 +792,9 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio)
                                sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
                                                      bytes_left), GFP_KERNEL);
                                memalloc_nofs_restore(nofs_flag);
-                               BUG_ON(!sums); /* -ENOMEM */
+                               if (!sums)
+                                       return BLK_STS_RESOURCE;
+
                                sums->len = bytes_left;
                                ordered = btrfs_lookup_ordered_extent(inode,
                                                                offset);
index 836725a..7c66651 100644 (file)
@@ -1137,6 +1137,35 @@ static void scrub_write_endio(struct btrfs_bio *bbio)
                wake_up(&stripe->io_wait);
 }
 
+static void scrub_submit_write_bio(struct scrub_ctx *sctx,
+                                  struct scrub_stripe *stripe,
+                                  struct btrfs_bio *bbio, bool dev_replace)
+{
+       struct btrfs_fs_info *fs_info = sctx->fs_info;
+       u32 bio_len = bbio->bio.bi_iter.bi_size;
+       u32 bio_off = (bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT) -
+                     stripe->logical;
+
+       fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
+       atomic_inc(&stripe->pending_io);
+       btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
+       if (!btrfs_is_zoned(fs_info))
+               return;
+       /*
+        * For zoned writeback, queue depth must be 1, thus we must wait for
+        * the write to finish before the next write.
+        */
+       wait_scrub_stripe_io(stripe);
+
+       /*
+        * And also need to update the write pointer if write finished
+        * successfully.
+        */
+       if (!test_bit(bio_off >> fs_info->sectorsize_bits,
+                     &stripe->write_error_bitmap))
+               sctx->write_pointer += bio_len;
+}
+
 /*
  * Submit the write bio(s) for the sectors specified by @write_bitmap.
  *
@@ -1155,7 +1184,6 @@ static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *str
 {
        struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
        struct btrfs_bio *bbio = NULL;
-       const bool zoned = btrfs_is_zoned(fs_info);
        int sector_nr;
 
        for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) {
@@ -1168,13 +1196,7 @@ static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *str
 
                /* Cannot merge with previous sector, submit the current one. */
                if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
-                       fill_writer_pointer_gap(sctx, stripe->physical +
-                                       (sector_nr << fs_info->sectorsize_bits));
-                       atomic_inc(&stripe->pending_io);
-                       btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
-                       /* For zoned writeback, queue depth must be 1. */
-                       if (zoned)
-                               wait_scrub_stripe_io(stripe);
+                       scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
                        bbio = NULL;
                }
                if (!bbio) {
@@ -1187,14 +1209,8 @@ static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *str
                ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
                ASSERT(ret == fs_info->sectorsize);
        }
-       if (bbio) {
-               fill_writer_pointer_gap(sctx, bbio->bio.bi_iter.bi_sector <<
-                                       SECTOR_SHIFT);
-               atomic_inc(&stripe->pending_io);
-               btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace);
-               if (zoned)
-                       wait_scrub_stripe_io(stripe);
-       }
+       if (bbio)
+               scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
 }
 
 /*
@@ -2518,13 +2534,20 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 
                if (ret == 0) {
                        ro_set = 1;
-               } else if (ret == -ENOSPC && !sctx->is_dev_replace) {
+               } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
+                          !(cache->flags & BTRFS_BLOCK_GROUP_RAID56_MASK)) {
                        /*
                         * btrfs_inc_block_group_ro return -ENOSPC when it
                         * failed in creating new chunk for metadata.
                         * It is not a problem for scrub, because
                         * metadata are always cowed, and our scrub paused
                         * commit_transactions.
+                        *
+                        * For RAID56 chunks, we have to mark them read-only
+                        * for scrub, as later we would use our own cache
+                        * out of RAID56 realm.
+                        * Thus we want the RAID56 bg to be marked RO to
+                        * prevent RMW from screwing up out cache.
                         */
                        ro_set = 0;
                } else if (ret == -ETXTBSY) {
index 9b212e8..d2755d5 100644 (file)
@@ -6158,7 +6158,7 @@ static int log_delayed_deletions_incremental(struct btrfs_trans_handle *trans,
 {
        struct btrfs_root *log = inode->root->log_root;
        const struct btrfs_delayed_item *curr;
-       u64 last_range_start;
+       u64 last_range_start = 0;
        u64 last_range_end = 0;
        struct btrfs_key key;
 
index 29cf002..4c0f22a 100644 (file)
@@ -3942,7 +3942,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
        struct dentry *dentry;
        struct ceph_cap *cap;
        char *path;
-       int pathlen = 0, err = 0;
+       int pathlen = 0, err;
        u64 pathbase;
        u64 snap_follows;
 
@@ -3965,6 +3965,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
        cap = __get_cap_for_mds(ci, mds);
        if (!cap) {
                spin_unlock(&ci->i_ceph_lock);
+               err = 0;
                goto out_err;
        }
        dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
index 8700720..0b236eb 100644 (file)
@@ -1111,6 +1111,19 @@ skip_inode:
                                continue;
                        adjust_snap_realm_parent(mdsc, child, realm->ino);
                }
+       } else {
+               /*
+                * In the non-split case both 'num_split_inos' and
+                * 'num_split_realms' should be 0, making this a no-op.
+                * However the MDS happens to populate 'split_realms' list
+                * in one of the UPDATE op cases by mistake.
+                *
+                * Skip both lists just in case to ensure that 'p' is
+                * positioned at the start of realm info, as expected by
+                * ceph_update_snap_trace().
+                */
+               p += sizeof(u64) * num_split_inos;
+               p += sizeof(u64) * num_split_realms;
        }
 
        /*
index ece7bad..88740c5 100644 (file)
@@ -371,7 +371,9 @@ static int zap_process(struct task_struct *start, int exit_code)
                if (t != current && !(t->flags & PF_POSTCOREDUMP)) {
                        sigaddset(&t->pending.signal, SIGKILL);
                        signal_wake_up(t, 1);
-                       nr++;
+                       /* The vhost_worker does not particpate in coredumps */
+                       if ((t->flags & (PF_USER_WORKER | PF_IO_WORKER)) != PF_USER_WORKER)
+                               nr++;
                }
        }
 
index 704fb59..f259d92 100644 (file)
@@ -121,6 +121,7 @@ config EROFS_FS_PCPU_KTHREAD
 config EROFS_FS_PCPU_KTHREAD_HIPRI
        bool "EROFS high priority per-CPU kthread workers"
        depends on EROFS_FS_ZIP && EROFS_FS_PCPU_KTHREAD
+       default y
        help
          This permits EROFS to configure per-CPU kthread workers to run
          at higher priority.
index 99bbc59..a3a98fc 100644 (file)
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
 obj-$(CONFIG_EROFS_FS) += erofs.o
-erofs-objs := super.o inode.o data.o namei.o dir.o utils.o pcpubuf.o sysfs.o
+erofs-objs := super.o inode.o data.o namei.o dir.o utils.o sysfs.o
 erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
-erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o
+erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o pcpubuf.o
 erofs-$(CONFIG_EROFS_FS_ZIP_LZMA) += decompressor_lzma.o
 erofs-$(CONFIG_EROFS_FS_ONDEMAND) += fscache.o
index af0431a..1e39c03 100644 (file)
@@ -472,12 +472,6 @@ static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count)
        return NULL;
 }
 
-void *erofs_get_pcpubuf(unsigned int requiredpages);
-void erofs_put_pcpubuf(void *ptr);
-int erofs_pcpubuf_growsize(unsigned int nrpages);
-void __init erofs_pcpubuf_init(void);
-void erofs_pcpubuf_exit(void);
-
 int erofs_register_sysfs(struct super_block *sb);
 void erofs_unregister_sysfs(struct super_block *sb);
 int __init erofs_init_sysfs(void);
@@ -512,6 +506,11 @@ int z_erofs_load_lz4_config(struct super_block *sb,
                            struct z_erofs_lz4_cfgs *lz4, int len);
 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
                            int flags);
+void *erofs_get_pcpubuf(unsigned int requiredpages);
+void erofs_put_pcpubuf(void *ptr);
+int erofs_pcpubuf_growsize(unsigned int nrpages);
+void __init erofs_pcpubuf_init(void);
+void erofs_pcpubuf_exit(void);
 #else
 static inline void erofs_shrinker_register(struct super_block *sb) {}
 static inline void erofs_shrinker_unregister(struct super_block *sb) {}
@@ -529,6 +528,8 @@ static inline int z_erofs_load_lz4_config(struct super_block *sb,
        }
        return 0;
 }
+static inline void erofs_pcpubuf_init(void) {}
+static inline void erofs_pcpubuf_exit(void) {}
 #endif /* !CONFIG_EROFS_FS_ZIP */
 
 #ifdef CONFIG_EROFS_FS_ZIP_LZMA
index cd80499..bbfe7ce 100644 (file)
@@ -675,7 +675,7 @@ int erofs_xattr_prefixes_init(struct super_block *sb)
        if (!pfs)
                return -ENOMEM;
 
-       if (erofs_sb_has_fragments(sbi))
+       if (sbi->packed_inode)
                buf.inode = sbi->packed_inode;
        else
                erofs_init_metabuf(&buf, sb);
index 45f21db..160b3da 100644 (file)
@@ -369,8 +369,6 @@ static struct kthread_worker *erofs_init_percpu_worker(int cpu)
                return worker;
        if (IS_ENABLED(CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI))
                sched_set_fifo_low(worker->task);
-       else
-               sched_set_normal(worker->task, 0);
        return worker;
 }
 
index 6948d67..8104a21 100644 (file)
@@ -918,11 +918,13 @@ do {                                                                             \
  *                       where the second inode has larger inode number
  *                       than the first
  *  I_DATA_SEM_QUOTA  - Used for quota inodes only
+ *  I_DATA_SEM_EA     - Used for ea_inodes only
  */
 enum {
        I_DATA_SEM_NORMAL = 0,
        I_DATA_SEM_OTHER,
        I_DATA_SEM_QUOTA,
+       I_DATA_SEM_EA
 };
 
 
@@ -2901,7 +2903,8 @@ typedef enum {
        EXT4_IGET_NORMAL =      0,
        EXT4_IGET_SPECIAL =     0x0001, /* OK to iget a system inode */
        EXT4_IGET_HANDLE =      0x0002, /* Inode # is from a handle */
-       EXT4_IGET_BAD =         0x0004  /* Allow to iget a bad inode */
+       EXT4_IGET_BAD =         0x0004, /* Allow to iget a bad inode */
+       EXT4_IGET_EA_INODE =    0x0008  /* Inode should contain an EA value */
 } ext4_iget_flags;
 
 extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
index f65fdb2..2a14320 100644 (file)
@@ -108,6 +108,13 @@ static int ext4_fsync_journal(struct inode *inode, bool datasync,
        journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
        tid_t commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
 
+       /*
+        * Fastcommit does not really support fsync on directories or other
+        * special files. Force a full commit.
+        */
+       if (!S_ISREG(inode->i_mode))
+               return ext4_force_commit(inode->i_sb);
+
        if (journal->j_flags & JBD2_BARRIER &&
            !jbd2_trans_will_send_data_barrier(journal, commit_tid))
                *needs_barrier = true;
index ce5f21b..02de439 100644 (file)
@@ -4641,6 +4641,24 @@ static inline void ext4_inode_set_iversion_queried(struct inode *inode, u64 val)
                inode_set_iversion_queried(inode, val);
 }
 
+static const char *check_igot_inode(struct inode *inode, ext4_iget_flags flags)
+
+{
+       if (flags & EXT4_IGET_EA_INODE) {
+               if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
+                       return "missing EA_INODE flag";
+               if (ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
+                   EXT4_I(inode)->i_file_acl)
+                       return "ea_inode with extended attributes";
+       } else {
+               if ((EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL))
+                       return "unexpected EA_INODE flag";
+       }
+       if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD))
+               return "unexpected bad inode w/o EXT4_IGET_BAD";
+       return NULL;
+}
+
 struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
                          ext4_iget_flags flags, const char *function,
                          unsigned int line)
@@ -4650,6 +4668,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
        struct ext4_inode_info *ei;
        struct ext4_super_block *es = EXT4_SB(sb)->s_es;
        struct inode *inode;
+       const char *err_str;
        journal_t *journal = EXT4_SB(sb)->s_journal;
        long ret;
        loff_t size;
@@ -4677,8 +4696,14 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
        inode = iget_locked(sb, ino);
        if (!inode)
                return ERR_PTR(-ENOMEM);
-       if (!(inode->i_state & I_NEW))
+       if (!(inode->i_state & I_NEW)) {
+               if ((err_str = check_igot_inode(inode, flags)) != NULL) {
+                       ext4_error_inode(inode, function, line, 0, err_str);
+                       iput(inode);
+                       return ERR_PTR(-EFSCORRUPTED);
+               }
                return inode;
+       }
 
        ei = EXT4_I(inode);
        iloc.bh = NULL;
@@ -4944,10 +4969,9 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
        if (IS_CASEFOLDED(inode) && !ext4_has_feature_casefold(inode->i_sb))
                ext4_error_inode(inode, function, line, 0,
                                 "casefold flag without casefold feature");
-       if (is_bad_inode(inode) && !(flags & EXT4_IGET_BAD)) {
-               ext4_error_inode(inode, function, line, 0,
-                                "bad inode without EXT4_IGET_BAD flag");
-               ret = -EUCLEAN;
+       if ((err_str = check_igot_inode(inode, flags)) != NULL) {
+               ext4_error_inode(inode, function, line, 0, err_str);
+               ret = -EFSCORRUPTED;
                goto bad_inode;
        }
 
index 7b2e36d..20f67a2 100644 (file)
@@ -2062,7 +2062,7 @@ static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
        if (bex->fe_len < gex->fe_len)
                return;
 
-       if (finish_group)
+       if (finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
                ext4_mb_use_best_found(ac, e4b);
 }
 
@@ -2074,6 +2074,20 @@ static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
  * in the context. Later, the best found extent will be used, if
  * mballoc can't find good enough extent.
  *
+ * The algorithm used is roughly as follows:
+ *
+ * * If free extent found is exactly as big as goal, then
+ *   stop the scan and use it immediately
+ *
+ * * If free extent found is smaller than goal, then keep retrying
+ *   upto a max of sbi->s_mb_max_to_scan times (default 200). After
+ *   that stop scanning and use whatever we have.
+ *
+ * * If free extent found is bigger than goal, then keep retrying
+ *   upto a max of sbi->s_mb_min_to_scan times (default 10) before
+ *   stopping the scan and using the extent.
+ *
+ *
  * FIXME: real allocation policy is to be designed yet!
  */
 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
index 9680fe7..05fcecc 100644 (file)
@@ -6388,7 +6388,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
        struct ext4_mount_options old_opts;
        ext4_group_t g;
        int err = 0;
-       int enable_rw = 0;
 #ifdef CONFIG_QUOTA
        int enable_quota = 0;
        int i, j;
@@ -6575,7 +6574,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
                        if (err)
                                goto restore_opts;
 
-                       enable_rw = 1;
+                       sb->s_flags &= ~SB_RDONLY;
                        if (ext4_has_feature_mmp(sb)) {
                                err = ext4_multi_mount_protect(sb,
                                                le64_to_cpu(es->s_mmp_block));
@@ -6589,18 +6588,6 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
        }
 
        /*
-        * Reinitialize lazy itable initialization thread based on
-        * current settings
-        */
-       if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
-               ext4_unregister_li_request(sb);
-       else {
-               ext4_group_t first_not_zeroed;
-               first_not_zeroed = ext4_has_uninit_itable(sb);
-               ext4_register_li_request(sb, first_not_zeroed);
-       }
-
-       /*
         * Handle creation of system zone data early because it can fail.
         * Releasing of existing data is done when we are sure remount will
         * succeed.
@@ -6634,8 +6621,17 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb)
        if (!test_opt(sb, BLOCK_VALIDITY) && sbi->s_system_blks)
                ext4_release_system_zone(sb);
 
-       if (enable_rw)
-               sb->s_flags &= ~SB_RDONLY;
+       /*
+        * Reinitialize lazy itable initialization thread based on
+        * current settings
+        */
+       if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
+               ext4_unregister_li_request(sb);
+       else {
+               ext4_group_t first_not_zeroed;
+               first_not_zeroed = ext4_has_uninit_itable(sb);
+               ext4_register_li_request(sb, first_not_zeroed);
+       }
 
        if (!ext4_has_feature_mmp(sb) || sb_rdonly(sb))
                ext4_stop_mmpd(sbi);
index dfc2e22..321e3a8 100644 (file)
@@ -121,7 +121,11 @@ ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
 #ifdef CONFIG_LOCKDEP
 void ext4_xattr_inode_set_class(struct inode *ea_inode)
 {
+       struct ext4_inode_info *ei = EXT4_I(ea_inode);
+
        lockdep_set_subclass(&ea_inode->i_rwsem, 1);
+       (void) ei;      /* shut up clang warning if !CONFIG_LOCKDEP */
+       lockdep_set_subclass(&ei->i_data_sem, I_DATA_SEM_EA);
 }
 #endif
 
@@ -433,7 +437,7 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
                return -EFSCORRUPTED;
        }
 
-       inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
+       inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_EA_INODE);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                ext4_error(parent->i_sb,
@@ -441,23 +445,6 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
                           err);
                return err;
        }
-
-       if (is_bad_inode(inode)) {
-               ext4_error(parent->i_sb,
-                          "error while reading EA inode %lu is_bad_inode",
-                          ea_ino);
-               err = -EIO;
-               goto error;
-       }
-
-       if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
-               ext4_error(parent->i_sb,
-                          "EA inode %lu does not have EXT4_EA_INODE_FL flag",
-                           ea_ino);
-               err = -EINVAL;
-               goto error;
-       }
-
        ext4_xattr_inode_set_class(inode);
 
        /*
@@ -478,9 +465,6 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
 
        *ea_inode = inode;
        return 0;
-error:
-       iput(inode);
-       return err;
 }
 
 /* Remove entry from mbcache when EA inode is getting evicted */
@@ -1556,11 +1540,11 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
 
        while (ce) {
                ea_inode = ext4_iget(inode->i_sb, ce->e_value,
-                                    EXT4_IGET_NORMAL);
-               if (!IS_ERR(ea_inode) &&
-                   !is_bad_inode(ea_inode) &&
-                   (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
-                   i_size_read(ea_inode) == value_len &&
+                                    EXT4_IGET_EA_INODE);
+               if (IS_ERR(ea_inode))
+                       goto next_entry;
+               ext4_xattr_inode_set_class(ea_inode);
+               if (i_size_read(ea_inode) == value_len &&
                    !ext4_xattr_inode_read(ea_inode, ea_data, value_len) &&
                    !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data,
                                                    value_len) &&
@@ -1570,9 +1554,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
                        kvfree(ea_data);
                        return ea_inode;
                }
-
-               if (!IS_ERR(ea_inode))
-                       iput(ea_inode);
+               iput(ea_inode);
+       next_entry:
                ce = mb_cache_entry_find_next(ea_inode_cache, ce);
        }
        kvfree(ea_data);
@@ -2073,8 +2056,9 @@ inserted:
                        else {
                                u32 ref;
 
+#ifdef EXT4_XATTR_DEBUG
                                WARN_ON_ONCE(dquot_initialize_needed(inode));
-
+#endif
                                /* The old block is released after updating
                                   the inode. */
                                error = dquot_alloc_block(inode,
@@ -2137,8 +2121,9 @@ inserted:
                        /* We need to allocate a new block */
                        ext4_fsblk_t goal, block;
 
+#ifdef EXT4_XATTR_DEBUG
                        WARN_ON_ONCE(dquot_initialize_needed(inode));
-
+#endif
                        goal = ext4_group_first_block_no(sb,
                                                EXT4_I(inode)->i_block_group);
                        block = ext4_new_meta_blocks(handle, inode, goal, 0,
index 300844f..cb62c8f 100644 (file)
@@ -784,9 +784,13 @@ static inline bool should_fault_in_pages(struct iov_iter *i,
        if (!user_backed_iter(i))
                return false;
 
+       /*
+        * Try to fault in multiple pages initially.  When that doesn't result
+        * in any progress, fall back to a single page.
+        */
        size = PAGE_SIZE;
        offs = offset_in_page(iocb->ki_pos);
-       if (*prev_count != count || !*window_size) {
+       if (*prev_count != count) {
                size_t nr_dirtied;
 
                nr_dirtied = max(current->nr_dirtied_pause -
@@ -870,6 +874,7 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from,
        struct gfs2_inode *ip = GFS2_I(inode);
        size_t prev_count = 0, window_size = 0;
        size_t written = 0;
+       bool enough_retries;
        ssize_t ret;
 
        /*
@@ -913,11 +918,17 @@ retry:
        if (ret > 0)
                written = ret;
 
+       enough_retries = prev_count == iov_iter_count(from) &&
+                        window_size <= PAGE_SIZE;
        if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
                gfs2_glock_dq(gh);
                window_size -= fault_in_iov_iter_readable(from, window_size);
-               if (window_size)
-                       goto retry;
+               if (window_size) {
+                       if (!enough_retries)
+                               goto retry;
+                       /* fall back to buffered I/O */
+                       ret = 0;
+               }
        }
 out_unlock:
        if (gfs2_holder_queued(gh))
index bb94949..04ba95b 100644 (file)
@@ -77,9 +77,9 @@ static const unsigned long    nlm_grace_period_min = 0;
 static const unsigned long     nlm_grace_period_max = 240;
 static const unsigned long     nlm_timeout_min = 3;
 static const unsigned long     nlm_timeout_max = 20;
-static const int               nlm_port_min = 0, nlm_port_max = 65535;
 
 #ifdef CONFIG_SYSCTL
+static const int               nlm_port_min = 0, nlm_port_max = 65535;
 static struct ctl_table_header * nlm_sysctl_table;
 #endif
 
index e63c1d4..8f3112e 100644 (file)
@@ -317,7 +317,7 @@ static int nfs_readdir_folio_array_append(struct folio *folio,
 
        name = nfs_readdir_copy_name(entry->name, entry->len);
 
-       array = kmap_atomic(folio_page(folio, 0));
+       array = kmap_local_folio(folio, 0);
        if (!name)
                goto out;
        ret = nfs_readdir_array_can_expand(array);
@@ -340,7 +340,7 @@ static int nfs_readdir_folio_array_append(struct folio *folio,
                nfs_readdir_array_set_eof(array);
 out:
        *cookie = array->last_cookie;
-       kunmap_atomic(array);
+       kunmap_local(array);
        return ret;
 }
 
index 18f25ff..d366539 100644 (file)
@@ -5437,10 +5437,18 @@ static bool nfs4_read_plus_not_supported(struct rpc_task *task,
        return false;
 }
 
-static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
+static inline void nfs4_read_plus_scratch_free(struct nfs_pgio_header *hdr)
 {
-       if (hdr->res.scratch)
+       if (hdr->res.scratch) {
                kfree(hdr->res.scratch);
+               hdr->res.scratch = NULL;
+       }
+}
+
+static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
+{
+       nfs4_read_plus_scratch_free(hdr);
+
        if (!nfs4_sequence_done(task, &hdr->res.seq_res))
                return -EAGAIN;
        if (nfs4_read_stateid_changed(task, &hdr->args))
index 7b8f17e..b4fd7a7 100644 (file)
@@ -153,18 +153,6 @@ static int exports_net_open(struct net *net, struct file *file)
        return 0;
 }
 
-static int exports_proc_open(struct inode *inode, struct file *file)
-{
-       return exports_net_open(current->nsproxy->net_ns, file);
-}
-
-static const struct proc_ops exports_proc_ops = {
-       .proc_open      = exports_proc_open,
-       .proc_read      = seq_read,
-       .proc_lseek     = seq_lseek,
-       .proc_release   = seq_release,
-};
-
 static int exports_nfsd_open(struct inode *inode, struct file *file)
 {
        return exports_net_open(inode->i_sb->s_fs_info, file);
@@ -702,16 +690,11 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
        if (err != 0 || fd < 0)
                return -EINVAL;
 
-       if (svc_alien_sock(net, fd)) {
-               printk(KERN_ERR "%s: socket net is different to NFSd's one\n", __func__);
-               return -EINVAL;
-       }
-
        err = nfsd_create_serv(net);
        if (err != 0)
                return err;
 
-       err = svc_addsock(nn->nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
+       err = svc_addsock(nn->nfsd_serv, net, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
 
        if (err >= 0 &&
            !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
@@ -1458,6 +1441,19 @@ static struct file_system_type nfsd_fs_type = {
 MODULE_ALIAS_FS("nfsd");
 
 #ifdef CONFIG_PROC_FS
+
+static int exports_proc_open(struct inode *inode, struct file *file)
+{
+       return exports_net_open(current->nsproxy->net_ns, file);
+}
+
+static const struct proc_ops exports_proc_ops = {
+       .proc_open      = exports_proc_open,
+       .proc_read      = seq_read,
+       .proc_lseek     = seq_lseek,
+       .proc_release   = seq_release,
+};
+
 static int create_proc_exports_entry(void)
 {
        struct proc_dir_entry *entry;
index 4183819..72a906a 100644 (file)
@@ -1365,19 +1365,19 @@ TRACE_EVENT(nfsd_cb_setup,
                __field(u32, cl_id)
                __field(unsigned long, authflavor)
                __sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
-               __array(unsigned char, netid, 8)
+               __string(netid, netid)
        ),
        TP_fast_assign(
                __entry->cl_boot = clp->cl_clientid.cl_boot;
                __entry->cl_id = clp->cl_clientid.cl_id;
-               strlcpy(__entry->netid, netid, sizeof(__entry->netid));
+               __assign_str(netid, netid);
                __entry->authflavor = authflavor;
                __assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
                                  clp->cl_cb_conn.cb_addrlen)
        ),
        TP_printk("addr=%pISpc client %08x:%08x proto=%s flavor=%s",
                __get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
-               __entry->netid, show_nfsd_authflavor(__entry->authflavor))
+               __get_str(netid), show_nfsd_authflavor(__entry->authflavor))
 );
 
 TRACE_EVENT(nfsd_cb_setup_err,
index bb9d471..db67f8e 100644 (file)
@@ -536,7 +536,15 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
 
        inode_lock(inode);
        for (retries = 1;;) {
-               host_err = __nfsd_setattr(dentry, iap);
+               struct iattr attrs;
+
+               /*
+                * notify_change() can alter its iattr argument, making
+                * @iap unsuitable for submission multiple times. Make a
+                * copy for every loop iteration.
+                */
+               attrs = *iap;
+               host_err = __nfsd_setattr(dentry, &attrs);
                if (host_err != -EAGAIN || !retries--)
                        break;
                if (!nfsd_wait_for_delegreturn(rqstp, inode))
index 1310d2d..a8ce522 100644 (file)
@@ -917,6 +917,7 @@ void nilfs_evict_inode(struct inode *inode)
        struct nilfs_transaction_info ti;
        struct super_block *sb = inode->i_sb;
        struct nilfs_inode_info *ii = NILFS_I(inode);
+       struct the_nilfs *nilfs;
        int ret;
 
        if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
@@ -929,6 +930,23 @@ void nilfs_evict_inode(struct inode *inode)
 
        truncate_inode_pages_final(&inode->i_data);
 
+       nilfs = sb->s_fs_info;
+       if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) {
+               /*
+                * If this inode is about to be disposed after the file system
+                * has been degraded to read-only due to file system corruption
+                * or after the writer has been detached, do not make any
+                * changes that cause writes, just clear it.
+                * Do this check after read-locking ns_segctor_sem by
+                * nilfs_transaction_begin() in order to avoid a race with
+                * the writer detach operation.
+                */
+               clear_inode(inode);
+               nilfs_clear_inode(inode);
+               nilfs_transaction_abort(sb);
+               return;
+       }
+
        /* TODO: some of the following operations may fail.  */
        nilfs_truncate_bmap(ii, 0);
        nilfs_mark_inode_dirty(inode);
diff --git a/fs/smb/Kconfig b/fs/smb/Kconfig
new file mode 100644 (file)
index 0000000..ef42578
--- /dev/null
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# smbfs configuration
+
+source "fs/smb/client/Kconfig"
+source "fs/smb/server/Kconfig"
+
+config SMBFS
+       tristate
+       default y if CIFS=y || SMB_SERVER=y
+       default m if CIFS=m || SMB_SERVER=m
diff --git a/fs/smb/Makefile b/fs/smb/Makefile
new file mode 100644 (file)
index 0000000..9a1bf59
--- /dev/null
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SMBFS)            += common/
+obj-$(CONFIG_CIFS)             += client/
+obj-$(CONFIG_SMB_SERVER)       += server/
similarity index 100%
rename from fs/cifs/Kconfig
rename to fs/smb/client/Kconfig
similarity index 100%
rename from fs/cifs/Makefile
rename to fs/smb/client/Makefile
similarity index 100%
rename from fs/cifs/asn1.c
rename to fs/smb/client/asn1.c
similarity index 99%
rename from fs/cifs/cifs_debug.c
rename to fs/smb/client/cifs_debug.c
index d4ed200..5034b86 100644 (file)
@@ -108,7 +108,7 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
        if ((tcon->seal) ||
            (tcon->ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) ||
            (tcon->share_flags & SHI1005_FLAGS_ENCRYPT_DATA))
-               seq_printf(m, " Encrypted");
+               seq_puts(m, " encrypted");
        if (tcon->nocase)
                seq_printf(m, " nocase");
        if (tcon->unix_ext)
@@ -415,8 +415,12 @@ skip_rdma:
 
                        /* dump session id helpful for use with network trace */
                        seq_printf(m, " SessionId: 0x%llx", ses->Suid);
-                       if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
+                       if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) {
                                seq_puts(m, " encrypted");
+                               /* can help in debugging to show encryption type */
+                               if (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
+                                       seq_puts(m, "(gcm256)");
+                       }
                        if (ses->sign)
                                seq_puts(m, " signed");
 
similarity index 100%
rename from fs/cifs/cifs_swn.c
rename to fs/smb/client/cifs_swn.c
similarity index 100%
rename from fs/cifs/cifs_swn.h
rename to fs/smb/client/cifs_swn.h
similarity index 100%
rename from fs/cifs/cifsacl.c
rename to fs/smb/client/cifsacl.c
similarity index 100%
rename from fs/cifs/cifsacl.h
rename to fs/smb/client/cifsacl.h
similarity index 99%
rename from fs/cifs/cifsencrypt.c
rename to fs/smb/client/cifsencrypt.c
index 357bd27..ef4c2e3 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/random.h>
 #include <linux/highmem.h>
 #include <linux/fips.h>
-#include "../smbfs_common/arc4.h"
+#include "../common/arc4.h"
 #include <crypto/aead.h>
 
 /*
similarity index 100%
rename from fs/cifs/cifsfs.c
rename to fs/smb/client/cifsfs.c
similarity index 100%
rename from fs/cifs/cifsfs.h
rename to fs/smb/client/cifsfs.h
similarity index 99%
rename from fs/cifs/cifsglob.h
rename to fs/smb/client/cifsglob.h
index 414685c..0d84bb1 100644 (file)
@@ -24,7 +24,7 @@
 #include "cifsacl.h"
 #include <crypto/internal/hash.h>
 #include <uapi/linux/cifs/cifs_mount.h>
-#include "../smbfs_common/smb2pdu.h"
+#include "../common/smb2pdu.h"
 #include "smb2pdu.h"
 #include <linux/filelock.h>
 
@@ -424,8 +424,8 @@ struct smb_version_operations {
        /* check for STATUS_NETWORK_SESSION_EXPIRED */
        bool (*is_session_expired)(char *);
        /* send oplock break response */
-       int (*oplock_response)(struct cifs_tcon *, struct cifs_fid *,
-                              struct cifsInodeInfo *);
+       int (*oplock_response)(struct cifs_tcon *tcon, __u64 persistent_fid, __u64 volatile_fid,
+                       __u16 net_fid, struct cifsInodeInfo *cifs_inode);
        /* query remote filesystem */
        int (*queryfs)(const unsigned int, struct cifs_tcon *,
                       struct cifs_sb_info *, struct kstatfs *);
similarity index 99%
rename from fs/cifs/cifspdu.h
rename to fs/smb/client/cifspdu.h
index 445e3ea..e17222f 100644 (file)
@@ -11,7 +11,7 @@
 
 #include <net/sock.h>
 #include <asm/unaligned.h>
-#include "../smbfs_common/smbfsctl.h"
+#include "../common/smbfsctl.h"
 
 #define CIFS_PROT   0
 #define POSIX_PROT  (CIFS_PROT+1)
similarity index 100%
rename from fs/cifs/cifsroot.c
rename to fs/smb/client/cifsroot.c
similarity index 100%
rename from fs/cifs/cifssmb.c
rename to fs/smb/client/cifssmb.c
similarity index 100%
rename from fs/cifs/connect.c
rename to fs/smb/client/connect.c
similarity index 99%
rename from fs/cifs/dfs.c
rename to fs/smb/client/dfs.c
index a93dbca..2f93bf8 100644 (file)
@@ -303,7 +303,7 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs)
        if (!nodfs) {
                rc = dfs_get_referral(mnt_ctx, ctx->UNC + 1, NULL, NULL);
                if (rc) {
-                       if (rc != -ENOENT && rc != -EOPNOTSUPP)
+                       if (rc != -ENOENT && rc != -EOPNOTSUPP && rc != -EIO)
                                goto out;
                        nodfs = true;
                }
similarity index 100%
rename from fs/cifs/dfs.h
rename to fs/smb/client/dfs.h
similarity index 100%
rename from fs/cifs/dir.c
rename to fs/smb/client/dir.c
similarity index 100%
rename from fs/cifs/export.c
rename to fs/smb/client/export.c
similarity index 99%
rename from fs/cifs/file.c
rename to fs/smb/client/file.c
index c5fcefd..df88b8c 100644 (file)
@@ -3353,9 +3353,10 @@ static size_t cifs_limit_bvec_subset(const struct iov_iter *iter, size_t max_siz
        while (n && ix < nbv) {
                len = min3(n, bvecs[ix].bv_len - skip, max_size);
                span += len;
+               max_size -= len;
                nsegs++;
                ix++;
-               if (span >= max_size || nsegs >= max_segs)
+               if (max_size == 0 || nsegs >= max_segs)
                        break;
                skip = 0;
                n -= len;
@@ -4881,9 +4882,9 @@ void cifs_oplock_break(struct work_struct *work)
        struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
        struct TCP_Server_Info *server = tcon->ses->server;
        int rc = 0;
-       bool purge_cache = false;
-       struct cifs_deferred_close *dclose;
-       bool is_deferred = false;
+       bool purge_cache = false, oplock_break_cancelled;
+       __u64 persistent_fid, volatile_fid;
+       __u16 net_fid;
 
        wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
                        TASK_UNINTERRUPTIBLE);
@@ -4924,28 +4925,28 @@ oplock_break_ack:
         * file handles but cached, then schedule deferred close immediately.
         * So, new open will not use cached handle.
         */
-       spin_lock(&CIFS_I(inode)->deferred_lock);
-       is_deferred = cifs_is_deferred_close(cfile, &dclose);
-       spin_unlock(&CIFS_I(inode)->deferred_lock);
 
-       if (!CIFS_CACHE_HANDLE(cinode) && is_deferred &&
-                       cfile->deferred_close_scheduled && delayed_work_pending(&cfile->deferred)) {
+       if (!CIFS_CACHE_HANDLE(cinode) && !list_empty(&cinode->deferred_closes))
                cifs_close_deferred_file(cinode);
-       }
 
+       persistent_fid = cfile->fid.persistent_fid;
+       volatile_fid = cfile->fid.volatile_fid;
+       net_fid = cfile->fid.netfid;
+       oplock_break_cancelled = cfile->oplock_break_cancelled;
+
+       _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
        /*
         * releasing stale oplock after recent reconnect of smb session using
         * a now incorrect file handle is not a data integrity issue but do
         * not bother sending an oplock release if session to server still is
         * disconnected since oplock already released by the server
         */
-       if (!cfile->oplock_break_cancelled) {
-               rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
-                                                            cinode);
+       if (!oplock_break_cancelled) {
+               rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+                               volatile_fid, net_fid, cinode);
                cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
        }
 
-       _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false);
        cifs_done_oplock_break(cinode);
 }
 
similarity index 99%
rename from fs/cifs/fs_context.c
rename to fs/smb/client/fs_context.c
index ace11a1..1bda756 100644 (file)
@@ -904,6 +904,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
                        ctx->sfu_remap = false; /* disable SFU mapping */
                }
                break;
+       case Opt_mapchars:
+               if (result.negated)
+                       ctx->sfu_remap = false;
+               else {
+                       ctx->sfu_remap = true;
+                       ctx->remap = false; /* disable SFM (mapposix) mapping */
+               }
+               break;
        case Opt_user_xattr:
                if (result.negated)
                        ctx->no_xattr = 1;
similarity index 100%
rename from fs/cifs/fscache.c
rename to fs/smb/client/fscache.c
similarity index 100%
rename from fs/cifs/fscache.h
rename to fs/smb/client/fscache.h
similarity index 100%
rename from fs/cifs/inode.c
rename to fs/smb/client/inode.c
similarity index 98%
rename from fs/cifs/ioctl.c
rename to fs/smb/client/ioctl.c
index cb3be58..fff092b 100644 (file)
@@ -321,7 +321,11 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
        struct tcon_link *tlink;
        struct cifs_sb_info *cifs_sb;
        __u64   ExtAttrBits = 0;
+#ifdef CONFIG_CIFS_POSIX
+#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
        __u64   caps;
+#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+#endif /* CONFIG_CIFS_POSIX */
 
        xid = get_xid();
 
@@ -331,9 +335,9 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                        if (pSMBFile == NULL)
                                break;
                        tcon = tlink_tcon(pSMBFile->tlink);
-                       caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
 #ifdef CONFIG_CIFS_POSIX
 #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+                       caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
                        if (CIFS_UNIX_EXTATTR_CAP & caps) {
                                __u64   ExtAttrMask = 0;
                                rc = CIFSGetExtAttr(xid, tcon,
similarity index 100%
rename from fs/cifs/link.c
rename to fs/smb/client/link.c
similarity index 100%
rename from fs/cifs/misc.c
rename to fs/smb/client/misc.c
similarity index 100%
rename from fs/cifs/netlink.c
rename to fs/smb/client/netlink.c
similarity index 100%
rename from fs/cifs/netlink.h
rename to fs/smb/client/netlink.h
similarity index 100%
rename from fs/cifs/netmisc.c
rename to fs/smb/client/netmisc.c
similarity index 100%
rename from fs/cifs/nterr.c
rename to fs/smb/client/nterr.c
similarity index 100%
rename from fs/cifs/nterr.h
rename to fs/smb/client/nterr.h
similarity index 100%
rename from fs/cifs/ntlmssp.h
rename to fs/smb/client/ntlmssp.h
similarity index 100%
rename from fs/cifs/readdir.c
rename to fs/smb/client/readdir.c
similarity index 100%
rename from fs/cifs/sess.c
rename to fs/smb/client/sess.c
similarity index 99%
rename from fs/cifs/smb1ops.c
rename to fs/smb/client/smb1ops.c
index abda614..7d1b3fc 100644 (file)
@@ -897,12 +897,11 @@ cifs_close_dir(const unsigned int xid, struct cifs_tcon *tcon,
 }
 
 static int
-cifs_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
-                    struct cifsInodeInfo *cinode)
+cifs_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
+               __u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
 {
-       return CIFSSMBLock(0, tcon, fid->netfid, current->tgid, 0, 0, 0, 0,
-                          LOCKING_ANDX_OPLOCK_RELEASE, false,
-                          CIFS_CACHE_READ(cinode) ? 1 : 0);
+       return CIFSSMBLock(0, tcon, net_fid, current->tgid, 0, 0, 0, 0,
+                          LOCKING_ANDX_OPLOCK_RELEASE, false, CIFS_CACHE_READ(cinode) ? 1 : 0);
 }
 
 static int
similarity index 100%
rename from fs/cifs/smb2file.c
rename to fs/smb/client/smb2file.c
similarity index 100%
rename from fs/cifs/smb2glob.h
rename to fs/smb/client/smb2glob.h
similarity index 100%
rename from fs/cifs/smb2misc.c
rename to fs/smb/client/smb2misc.c
similarity index 99%
rename from fs/cifs/smb2ops.c
rename to fs/smb/client/smb2ops.c
index a295e4c..6e3be58 100644 (file)
@@ -618,7 +618,6 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
                 * Add a new one instead
                 */
                spin_lock(&ses->iface_lock);
-               iface = niface = NULL;
                list_for_each_entry_safe(iface, niface, &ses->iface_list,
                                         iface_head) {
                        ret = iface_cmp(iface, &tmp_iface);
@@ -2383,15 +2382,14 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
 }
 
 static int
-smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
-                    struct cifsInodeInfo *cinode)
+smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
+               __u64 volatile_fid, __u16 net_fid, struct cifsInodeInfo *cinode)
 {
        if (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
                return SMB2_lease_break(0, tcon, cinode->lease_key,
                                        smb2_get_lease_state(cinode));
 
-       return SMB2_oplock_break(0, tcon, fid->persistent_fid,
-                                fid->volatile_fid,
+       return SMB2_oplock_break(0, tcon, persistent_fid, volatile_fid,
                                 CIFS_CACHE_READ(cinode) ? 1 : 0);
 }
 
similarity index 99%
rename from fs/cifs/smb2pdu.c
rename to fs/smb/client/smb2pdu.c
index 9ed61b6..7063b39 100644 (file)
@@ -3725,7 +3725,7 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
                if (*out_data == NULL) {
                        rc = -ENOMEM;
                        goto cnotify_exit;
-               } else
+               } else if (plen)
                        *plen = le32_to_cpu(smb_rsp->OutputBufferLength);
        }
 
similarity index 100%
rename from fs/cifs/smb2pdu.h
rename to fs/smb/client/smb2pdu.h
similarity index 98%
rename from fs/cifs/smbencrypt.c
rename to fs/smb/client/smbencrypt.c
index 4a04877..f0ce264 100644 (file)
@@ -24,7 +24,7 @@
 #include "cifsglob.h"
 #include "cifs_debug.h"
 #include "cifsproto.h"
-#include "../smbfs_common/md4.h"
+#include "../common/md4.h"
 
 #ifndef false
 #define false 0
similarity index 100%
rename from fs/cifs/smberr.h
rename to fs/smb/client/smberr.h
similarity index 100%
rename from fs/cifs/trace.c
rename to fs/smb/client/trace.c
similarity index 100%
rename from fs/cifs/trace.h
rename to fs/smb/client/trace.h
similarity index 100%
rename from fs/cifs/unc.c
rename to fs/smb/client/unc.c
similarity index 100%
rename from fs/cifs/winucase.c
rename to fs/smb/client/winucase.c
similarity index 100%
rename from fs/cifs/xattr.c
rename to fs/smb/client/xattr.c
similarity index 59%
rename from fs/smbfs_common/Makefile
rename to fs/smb/common/Makefile
index cafc61a..c66dbbc 100644 (file)
@@ -3,5 +3,5 @@
 # Makefile for Linux filesystem routines that are shared by client and server.
 #
 
-obj-$(CONFIG_SMBFS_COMMON) += cifs_arc4.o
-obj-$(CONFIG_SMBFS_COMMON) += cifs_md4.o
+obj-$(CONFIG_SMBFS) += cifs_arc4.o
+obj-$(CONFIG_SMBFS) += cifs_md4.o
similarity index 100%
rename from fs/smbfs_common/arc4.h
rename to fs/smb/common/arc4.h
similarity index 100%
rename from fs/smbfs_common/md4.h
rename to fs/smb/common/md4.h
similarity index 100%
rename from fs/ksmbd/Kconfig
rename to fs/smb/server/Kconfig
similarity index 100%
rename from fs/ksmbd/Makefile
rename to fs/smb/server/Makefile
similarity index 100%
rename from fs/ksmbd/asn1.c
rename to fs/smb/server/asn1.c
similarity index 100%
rename from fs/ksmbd/asn1.h
rename to fs/smb/server/asn1.h
similarity index 99%
rename from fs/ksmbd/auth.c
rename to fs/smb/server/auth.c
index df8fb07..5e5e120 100644 (file)
@@ -29,7 +29,7 @@
 #include "mgmt/user_config.h"
 #include "crypto_ctx.h"
 #include "transport_ipc.h"
-#include "../smbfs_common/arc4.h"
+#include "../common/arc4.h"
 
 /*
  * Fixed format data defining GSS header and fixed string
similarity index 100%
rename from fs/ksmbd/auth.h
rename to fs/smb/server/auth.h
similarity index 99%
rename from fs/ksmbd/connection.c
rename to fs/smb/server/connection.c
index 4ed379f..4882a81 100644 (file)
@@ -351,7 +351,8 @@ int ksmbd_conn_handler_loop(void *p)
                        break;
 
                /* 4 for rfc1002 length field */
-               size = pdu_size + 4;
+               /* 1 for implied bcc[0] */
+               size = pdu_size + 4 + 1;
                conn->request_buf = kvmalloc(size, GFP_KERNEL);
                if (!conn->request_buf)
                        break;
similarity index 100%
rename from fs/ksmbd/glob.h
rename to fs/smb/server/glob.h
similarity index 100%
rename from fs/ksmbd/misc.c
rename to fs/smb/server/misc.c
similarity index 100%
rename from fs/ksmbd/misc.h
rename to fs/smb/server/misc.h
similarity index 100%
rename from fs/ksmbd/ndr.c
rename to fs/smb/server/ndr.c
similarity index 100%
rename from fs/ksmbd/ndr.h
rename to fs/smb/server/ndr.h
similarity index 100%
rename from fs/ksmbd/nterr.h
rename to fs/smb/server/nterr.h
similarity index 100%
rename from fs/ksmbd/ntlmssp.h
rename to fs/smb/server/ntlmssp.h
similarity index 97%
rename from fs/ksmbd/oplock.c
rename to fs/smb/server/oplock.c
index 2e54ded..db181bd 100644 (file)
@@ -157,13 +157,42 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
        rcu_read_lock();
        opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info,
                                        op_entry);
-       if (opinfo && !atomic_inc_not_zero(&opinfo->refcount))
-               opinfo = NULL;
+       if (opinfo) {
+               if (!atomic_inc_not_zero(&opinfo->refcount))
+                       opinfo = NULL;
+               else {
+                       atomic_inc(&opinfo->conn->r_count);
+                       if (ksmbd_conn_releasing(opinfo->conn)) {
+                               atomic_dec(&opinfo->conn->r_count);
+                               atomic_dec(&opinfo->refcount);
+                               opinfo = NULL;
+                       }
+               }
+       }
+
        rcu_read_unlock();
 
        return opinfo;
 }
 
+static void opinfo_conn_put(struct oplock_info *opinfo)
+{
+       struct ksmbd_conn *conn;
+
+       if (!opinfo)
+               return;
+
+       conn = opinfo->conn;
+       /*
+        * Checking waitqueue to dropping pending requests on
+        * disconnection. waitqueue_active is safe because it
+        * uses atomic operation for condition.
+        */
+       if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
+               wake_up(&conn->r_count_q);
+       opinfo_put(opinfo);
+}
+
 void opinfo_put(struct oplock_info *opinfo)
 {
        if (!atomic_dec_and_test(&opinfo->refcount))
@@ -666,13 +695,6 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
 
 out:
        ksmbd_free_work_struct(work);
-       /*
-        * Checking waitqueue to dropping pending requests on
-        * disconnection. waitqueue_active is safe because it
-        * uses atomic operation for condition.
-        */
-       if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
-               wake_up(&conn->r_count_q);
 }
 
 /**
@@ -706,7 +728,6 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo)
        work->conn = conn;
        work->sess = opinfo->sess;
 
-       atomic_inc(&conn->r_count);
        if (opinfo->op_state == OPLOCK_ACK_WAIT) {
                INIT_WORK(&work->work, __smb2_oplock_break_noti);
                ksmbd_queue_work(work);
@@ -776,13 +797,6 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
 
 out:
        ksmbd_free_work_struct(work);
-       /*
-        * Checking waitqueue to dropping pending requests on
-        * disconnection. waitqueue_active is safe because it
-        * uses atomic operation for condition.
-        */
-       if (!atomic_dec_return(&conn->r_count) && waitqueue_active(&conn->r_count_q))
-               wake_up(&conn->r_count_q);
 }
 
 /**
@@ -822,7 +836,6 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo)
        work->conn = conn;
        work->sess = opinfo->sess;
 
-       atomic_inc(&conn->r_count);
        if (opinfo->op_state == OPLOCK_ACK_WAIT) {
                list_for_each_safe(tmp, t, &opinfo->interim_list) {
                        struct ksmbd_work *in_work;
@@ -1144,8 +1157,10 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
        }
        prev_opinfo = opinfo_get_list(ci);
        if (!prev_opinfo ||
-           (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx))
+           (prev_opinfo->level == SMB2_OPLOCK_LEVEL_NONE && lctx)) {
+               opinfo_conn_put(prev_opinfo);
                goto set_lev;
+       }
        prev_op_has_lease = prev_opinfo->is_lease;
        if (prev_op_has_lease)
                prev_op_state = prev_opinfo->o_lease->state;
@@ -1153,19 +1168,19 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
        if (share_ret < 0 &&
            prev_opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
                err = share_ret;
-               opinfo_put(prev_opinfo);
+               opinfo_conn_put(prev_opinfo);
                goto err_out;
        }
 
        if (prev_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
            prev_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
-               opinfo_put(prev_opinfo);
+               opinfo_conn_put(prev_opinfo);
                goto op_break_not_needed;
        }
 
        list_add(&work->interim_entry, &prev_opinfo->interim_list);
        err = oplock_break(prev_opinfo, SMB2_OPLOCK_LEVEL_II);
-       opinfo_put(prev_opinfo);
+       opinfo_conn_put(prev_opinfo);
        if (err == -ENOENT)
                goto set_lev;
        /* Check all oplock was freed by close */
@@ -1228,14 +1243,14 @@ static void smb_break_all_write_oplock(struct ksmbd_work *work,
                return;
        if (brk_opinfo->level != SMB2_OPLOCK_LEVEL_BATCH &&
            brk_opinfo->level != SMB2_OPLOCK_LEVEL_EXCLUSIVE) {
-               opinfo_put(brk_opinfo);
+               opinfo_conn_put(brk_opinfo);
                return;
        }
 
        brk_opinfo->open_trunc = is_trunc;
        list_add(&work->interim_entry, &brk_opinfo->interim_list);
        oplock_break(brk_opinfo, SMB2_OPLOCK_LEVEL_II);
-       opinfo_put(brk_opinfo);
+       opinfo_conn_put(brk_opinfo);
 }
 
 /**
@@ -1263,6 +1278,13 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
        list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) {
                if (!atomic_inc_not_zero(&brk_op->refcount))
                        continue;
+
+               atomic_inc(&brk_op->conn->r_count);
+               if (ksmbd_conn_releasing(brk_op->conn)) {
+                       atomic_dec(&brk_op->conn->r_count);
+                       continue;
+               }
+
                rcu_read_unlock();
                if (brk_op->is_lease && (brk_op->o_lease->state &
                    (~(SMB2_LEASE_READ_CACHING_LE |
@@ -1292,7 +1314,7 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
                brk_op->open_trunc = is_trunc;
                oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE);
 next:
-               opinfo_put(brk_op);
+               opinfo_conn_put(brk_op);
                rcu_read_lock();
        }
        rcu_read_unlock();
@@ -1449,11 +1471,12 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
  * smb2_find_context_vals() - find a particular context info in open request
  * @open_req:  buffer containing smb2 file open(create) request
  * @tag:       context name to search for
+ * @tag_len:   the length of tag
  *
  * Return:     pointer to requested context, NULL if @str context not found
  *             or error pointer if name length is invalid.
  */
-struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
+struct create_context *smb2_find_context_vals(void *open_req, const char *tag, int tag_len)
 {
        struct create_context *cc;
        unsigned int next = 0;
@@ -1492,7 +1515,7 @@ struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
                        return ERR_PTR(-EINVAL);
 
                name = (char *)cc + name_off;
-               if (memcmp(name, tag, name_len) == 0)
+               if (name_len == tag_len && !memcmp(name, tag, name_len))
                        return cc;
 
                remain_len -= next;
similarity index 99%
rename from fs/ksmbd/oplock.h
rename to fs/smb/server/oplock.h
index 0975344..4b0fe6d 100644 (file)
@@ -118,7 +118,7 @@ void create_durable_v2_rsp_buf(char *cc, struct ksmbd_file *fp);
 void create_mxac_rsp_buf(char *cc, int maximal_access);
 void create_disk_id_rsp_buf(char *cc, __u64 file_id, __u64 vol_id);
 void create_posix_rsp_buf(char *cc, struct ksmbd_file *fp);
-struct create_context *smb2_find_context_vals(void *open_req, const char *str);
+struct create_context *smb2_find_context_vals(void *open_req, const char *tag, int tag_len);
 struct oplock_info *lookup_lease_in_table(struct ksmbd_conn *conn,
                                          char *lease_key);
 int find_same_lease_key(struct ksmbd_session *sess, struct ksmbd_inode *ci,
similarity index 100%
rename from fs/ksmbd/server.c
rename to fs/smb/server/server.c
similarity index 100%
rename from fs/ksmbd/server.h
rename to fs/smb/server/server.h
similarity index 98%
rename from fs/ksmbd/smb2misc.c
rename to fs/smb/server/smb2misc.c
index fbdde42..0ffe663 100644 (file)
@@ -416,8 +416,11 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work)
 
                /*
                 * Allow a message that padded to 8byte boundary.
+                * Linux 4.19.217 with smb 3.0.2 are sometimes
+                * sending messages where the cls_len is exactly
+                * 8 bytes less than len.
                 */
-               if (clc_len < len && (len - clc_len) < 8)
+               if (clc_len < len && (len - clc_len) <= 8)
                        goto validate_credit;
 
                pr_err_ratelimited(
similarity index 100%
rename from fs/ksmbd/smb2ops.c
rename to fs/smb/server/smb2ops.c
similarity index 99%
rename from fs/ksmbd/smb2pdu.c
rename to fs/smb/server/smb2pdu.c
index cb93fd2..7a81541 100644 (file)
@@ -326,13 +326,9 @@ int smb2_set_rsp_credits(struct ksmbd_work *work)
        if (hdr->Command == SMB2_NEGOTIATE)
                aux_max = 1;
        else
-               aux_max = conn->vals->max_credits - credit_charge;
+               aux_max = conn->vals->max_credits - conn->total_credits;
        credits_granted = min_t(unsigned short, credits_requested, aux_max);
 
-       if (conn->vals->max_credits - conn->total_credits < credits_granted)
-               credits_granted = conn->vals->max_credits -
-                       conn->total_credits;
-
        conn->total_credits += credits_granted;
        work->credits_granted += credits_granted;
 
@@ -849,13 +845,14 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
 
 static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
                                  struct smb2_preauth_neg_context *pneg_ctxt,
-                                 int len_of_ctxts)
+                                 int ctxt_len)
 {
        /*
         * sizeof(smb2_preauth_neg_context) assumes SMB311_SALT_SIZE Salt,
         * which may not be present. Only check for used HashAlgorithms[1].
         */
-       if (len_of_ctxts < MIN_PREAUTH_CTXT_DATA_LEN)
+       if (ctxt_len <
+           sizeof(struct smb2_neg_context) + MIN_PREAUTH_CTXT_DATA_LEN)
                return STATUS_INVALID_PARAMETER;
 
        if (pneg_ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512)
@@ -867,15 +864,23 @@ static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
 
 static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
                                struct smb2_encryption_neg_context *pneg_ctxt,
-                               int len_of_ctxts)
+                               int ctxt_len)
 {
-       int cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount);
-       int i, cphs_size = cph_cnt * sizeof(__le16);
+       int cph_cnt;
+       int i, cphs_size;
+
+       if (sizeof(struct smb2_encryption_neg_context) > ctxt_len) {
+               pr_err("Invalid SMB2_ENCRYPTION_CAPABILITIES context size\n");
+               return;
+       }
 
        conn->cipher_type = 0;
 
+       cph_cnt = le16_to_cpu(pneg_ctxt->CipherCount);
+       cphs_size = cph_cnt * sizeof(__le16);
+
        if (sizeof(struct smb2_encryption_neg_context) + cphs_size >
-           len_of_ctxts) {
+           ctxt_len) {
                pr_err("Invalid cipher count(%d)\n", cph_cnt);
                return;
        }
@@ -923,15 +928,22 @@ static void decode_compress_ctxt(struct ksmbd_conn *conn,
 
 static void decode_sign_cap_ctxt(struct ksmbd_conn *conn,
                                 struct smb2_signing_capabilities *pneg_ctxt,
-                                int len_of_ctxts)
+                                int ctxt_len)
 {
-       int sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount);
-       int i, sign_alos_size = sign_algo_cnt * sizeof(__le16);
+       int sign_algo_cnt;
+       int i, sign_alos_size;
+
+       if (sizeof(struct smb2_signing_capabilities) > ctxt_len) {
+               pr_err("Invalid SMB2_SIGNING_CAPABILITIES context length\n");
+               return;
+       }
 
        conn->signing_negotiated = false;
+       sign_algo_cnt = le16_to_cpu(pneg_ctxt->SigningAlgorithmCount);
+       sign_alos_size = sign_algo_cnt * sizeof(__le16);
 
        if (sizeof(struct smb2_signing_capabilities) + sign_alos_size >
-           len_of_ctxts) {
+           ctxt_len) {
                pr_err("Invalid signing algorithm count(%d)\n", sign_algo_cnt);
                return;
        }
@@ -969,18 +981,16 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
        len_of_ctxts = len_of_smb - offset;
 
        while (i++ < neg_ctxt_cnt) {
-               int clen;
-
-               /* check that offset is not beyond end of SMB */
-               if (len_of_ctxts == 0)
-                       break;
+               int clen, ctxt_len;
 
                if (len_of_ctxts < sizeof(struct smb2_neg_context))
                        break;
 
                pctx = (struct smb2_neg_context *)((char *)pctx + offset);
                clen = le16_to_cpu(pctx->DataLength);
-               if (clen + sizeof(struct smb2_neg_context) > len_of_ctxts)
+               ctxt_len = clen + sizeof(struct smb2_neg_context);
+
+               if (ctxt_len > len_of_ctxts)
                        break;
 
                if (pctx->ContextType == SMB2_PREAUTH_INTEGRITY_CAPABILITIES) {
@@ -991,7 +1001,7 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
 
                        status = decode_preauth_ctxt(conn,
                                                     (struct smb2_preauth_neg_context *)pctx,
-                                                    len_of_ctxts);
+                                                    ctxt_len);
                        if (status != STATUS_SUCCESS)
                                break;
                } else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) {
@@ -1002,7 +1012,7 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
 
                        decode_encrypt_ctxt(conn,
                                            (struct smb2_encryption_neg_context *)pctx,
-                                           len_of_ctxts);
+                                           ctxt_len);
                } else if (pctx->ContextType == SMB2_COMPRESSION_CAPABILITIES) {
                        ksmbd_debug(SMB,
                                    "deassemble SMB2_COMPRESSION_CAPABILITIES context\n");
@@ -1021,9 +1031,10 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
                } else if (pctx->ContextType == SMB2_SIGNING_CAPABILITIES) {
                        ksmbd_debug(SMB,
                                    "deassemble SMB2_SIGNING_CAPABILITIES context\n");
+
                        decode_sign_cap_ctxt(conn,
                                             (struct smb2_signing_capabilities *)pctx,
-                                            len_of_ctxts);
+                                            ctxt_len);
                }
 
                /* offsets must be 8 byte aligned */
@@ -1057,16 +1068,16 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
                return rc;
        }
 
-       if (req->DialectCount == 0) {
-               pr_err("malformed packet\n");
+       smb2_buf_len = get_rfc1002_len(work->request_buf);
+       smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects);
+       if (smb2_neg_size > smb2_buf_len) {
                rsp->hdr.Status = STATUS_INVALID_PARAMETER;
                rc = -EINVAL;
                goto err_out;
        }
 
-       smb2_buf_len = get_rfc1002_len(work->request_buf);
-       smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects);
-       if (smb2_neg_size > smb2_buf_len) {
+       if (req->DialectCount == 0) {
+               pr_err("malformed packet\n");
                rsp->hdr.Status = STATUS_INVALID_PARAMETER;
                rc = -EINVAL;
                goto err_out;
@@ -1356,7 +1367,7 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
        struct authenticate_message *authblob;
        struct ksmbd_user *user;
        char *name;
-       unsigned int auth_msg_len, name_off, name_len, secbuf_len;
+       unsigned int name_off, name_len, secbuf_len;
 
        secbuf_len = le16_to_cpu(req->SecurityBufferLength);
        if (secbuf_len < sizeof(struct authenticate_message)) {
@@ -1366,9 +1377,8 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
        authblob = user_authblob(conn, req);
        name_off = le32_to_cpu(authblob->UserName.BufferOffset);
        name_len = le16_to_cpu(authblob->UserName.Length);
-       auth_msg_len = le16_to_cpu(req->SecurityBufferOffset) + secbuf_len;
 
-       if (auth_msg_len < (u64)name_off + name_len)
+       if (secbuf_len < (u64)name_off + name_len)
                return NULL;
 
        name = smb_strndup_from_utf16((const char *)authblob + name_off,
@@ -2464,7 +2474,7 @@ static int smb2_create_sd_buffer(struct ksmbd_work *work,
                return -ENOENT;
 
        /* Parse SD BUFFER create contexts */
-       context = smb2_find_context_vals(req, SMB2_CREATE_SD_BUFFER);
+       context = smb2_find_context_vals(req, SMB2_CREATE_SD_BUFFER, 4);
        if (!context)
                return -ENOENT;
        else if (IS_ERR(context))
@@ -2666,7 +2676,7 @@ int smb2_open(struct ksmbd_work *work)
 
        if (req->CreateContextsOffset) {
                /* Parse non-durable handle create contexts */
-               context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER);
+               context = smb2_find_context_vals(req, SMB2_CREATE_EA_BUFFER, 4);
                if (IS_ERR(context)) {
                        rc = PTR_ERR(context);
                        goto err_out1;
@@ -2686,7 +2696,7 @@ int smb2_open(struct ksmbd_work *work)
                }
 
                context = smb2_find_context_vals(req,
-                                                SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST);
+                                                SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST, 4);
                if (IS_ERR(context)) {
                        rc = PTR_ERR(context);
                        goto err_out1;
@@ -2697,7 +2707,7 @@ int smb2_open(struct ksmbd_work *work)
                }
 
                context = smb2_find_context_vals(req,
-                                                SMB2_CREATE_TIMEWARP_REQUEST);
+                                                SMB2_CREATE_TIMEWARP_REQUEST, 4);
                if (IS_ERR(context)) {
                        rc = PTR_ERR(context);
                        goto err_out1;
@@ -2709,7 +2719,7 @@ int smb2_open(struct ksmbd_work *work)
 
                if (tcon->posix_extensions) {
                        context = smb2_find_context_vals(req,
-                                                        SMB2_CREATE_TAG_POSIX);
+                                                        SMB2_CREATE_TAG_POSIX, 16);
                        if (IS_ERR(context)) {
                                rc = PTR_ERR(context);
                                goto err_out1;
@@ -3107,7 +3117,7 @@ int smb2_open(struct ksmbd_work *work)
                struct create_alloc_size_req *az_req;
 
                az_req = (struct create_alloc_size_req *)smb2_find_context_vals(req,
-                                       SMB2_CREATE_ALLOCATION_SIZE);
+                                       SMB2_CREATE_ALLOCATION_SIZE, 4);
                if (IS_ERR(az_req)) {
                        rc = PTR_ERR(az_req);
                        goto err_out;
@@ -3134,7 +3144,7 @@ int smb2_open(struct ksmbd_work *work)
                                            err);
                }
 
-               context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID);
+               context = smb2_find_context_vals(req, SMB2_CREATE_QUERY_ON_DISK_ID, 4);
                if (IS_ERR(context)) {
                        rc = PTR_ERR(context);
                        goto err_out;
@@ -4359,21 +4369,6 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
        return 0;
 }
 
-static unsigned long long get_allocation_size(struct inode *inode,
-                                             struct kstat *stat)
-{
-       unsigned long long alloc_size = 0;
-
-       if (!S_ISDIR(stat->mode)) {
-               if ((inode->i_blocks << 9) <= stat->size)
-                       alloc_size = stat->size;
-               else
-                       alloc_size = inode->i_blocks << 9;
-       }
-
-       return alloc_size;
-}
-
 static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
                                   struct ksmbd_file *fp, void *rsp_org)
 {
@@ -4388,7 +4383,7 @@ static void get_file_standard_info(struct smb2_query_info_rsp *rsp,
        sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
        delete_pending = ksmbd_inode_pending_delete(fp);
 
-       sinfo->AllocationSize = cpu_to_le64(get_allocation_size(inode, &stat));
+       sinfo->AllocationSize = cpu_to_le64(inode->i_blocks << 9);
        sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
        sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending);
        sinfo->DeletePending = delete_pending;
@@ -4453,7 +4448,7 @@ static int get_file_all_info(struct ksmbd_work *work,
        file_info->Attributes = fp->f_ci->m_fattr;
        file_info->Pad1 = 0;
        file_info->AllocationSize =
-               cpu_to_le64(get_allocation_size(inode, &stat));
+               cpu_to_le64(inode->i_blocks << 9);
        file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
        file_info->NumberOfLinks =
                        cpu_to_le32(get_nlink(&stat) - delete_pending);
@@ -4642,7 +4637,7 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
        file_info->ChangeTime = cpu_to_le64(time);
        file_info->Attributes = fp->f_ci->m_fattr;
        file_info->AllocationSize =
-               cpu_to_le64(get_allocation_size(inode, &stat));
+               cpu_to_le64(inode->i_blocks << 9);
        file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
        file_info->Reserved = cpu_to_le32(0);
        rsp->OutputBufferLength =
@@ -5507,7 +5502,7 @@ static int smb2_create_link(struct ksmbd_work *work,
 {
        char *link_name = NULL, *target_name = NULL, *pathname = NULL;
        struct path path;
-       bool file_present = true;
+       bool file_present = false;
        int rc;
 
        if (buf_len < (u64)sizeof(struct smb2_file_link_info) +
@@ -5540,8 +5535,8 @@ static int smb2_create_link(struct ksmbd_work *work,
        if (rc) {
                if (rc != -ENOENT)
                        goto out;
-               file_present = false;
-       }
+       } else
+               file_present = true;
 
        if (file_info->ReplaceIfExists) {
                if (file_present) {
similarity index 100%
rename from fs/ksmbd/smb2pdu.h
rename to fs/smb/server/smb2pdu.h
similarity index 99%
rename from fs/ksmbd/smb_common.h
rename to fs/smb/server/smb_common.h
index 9130d2e..6b0d5f1 100644 (file)
@@ -10,7 +10,7 @@
 
 #include "glob.h"
 #include "nterr.h"
-#include "../smbfs_common/smb2pdu.h"
+#include "../common/smb2pdu.h"
 #include "smb2pdu.h"
 
 /* ksmbd's Specific ERRNO */
similarity index 100%
rename from fs/ksmbd/smbacl.c
rename to fs/smb/server/smbacl.c
similarity index 100%
rename from fs/ksmbd/smbacl.h
rename to fs/smb/server/smbacl.h
similarity index 98%
rename from fs/ksmbd/smbfsctl.h
rename to fs/smb/server/smbfsctl.h
index b98418a..ecdf8f6 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: LGPL-2.1+ */
 /*
- *   fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
+ *   fs/smb/server/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
  *
  *   Copyright (c) International Business Machines  Corp., 2002,2009
  *   Author(s): Steve French (sfrench@us.ibm.com)
similarity index 99%
rename from fs/ksmbd/smbstatus.h
rename to fs/smb/server/smbstatus.h
index 108a8b6..8963deb 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: LGPL-2.1+ */
 /*
- *   fs/cifs/smb2status.h
+ *   fs/server/smb2status.h
  *
  *   SMB2 Status code (network error) definitions
  *   Definitions are from MS-ERREF
similarity index 100%
rename from fs/ksmbd/unicode.c
rename to fs/smb/server/unicode.c
similarity index 100%
rename from fs/ksmbd/unicode.h
rename to fs/smb/server/unicode.h
similarity index 100%
rename from fs/ksmbd/uniupr.h
rename to fs/smb/server/uniupr.h
similarity index 99%
rename from fs/ksmbd/vfs.c
rename to fs/smb/server/vfs.c
index 778c152..6f30291 100644 (file)
@@ -86,12 +86,14 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
        err = vfs_path_parent_lookup(filename, flags,
                                     &parent_path, &last, &type,
                                     root_share_path);
-       putname(filename);
-       if (err)
+       if (err) {
+               putname(filename);
                return err;
+       }
 
        if (unlikely(type != LAST_NORM)) {
                path_put(&parent_path);
+               putname(filename);
                return -ENOENT;
        }
 
@@ -108,12 +110,14 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
        path->dentry = d;
        path->mnt = share_conf->vfs_path.mnt;
        path_put(&parent_path);
+       putname(filename);
 
        return 0;
 
 err_out:
        inode_unlock(parent_path.dentry->d_inode);
        path_put(&parent_path);
+       putname(filename);
        return -ENOENT;
 }
 
@@ -743,6 +747,7 @@ retry:
        rd.new_dir              = new_path.dentry->d_inode,
        rd.new_dentry           = new_dentry,
        rd.flags                = flags,
+       rd.delegated_inode      = NULL,
        err = vfs_rename(&rd);
        if (err)
                ksmbd_debug(VFS, "vfs_rename failed err %d\n", err);
similarity index 100%
rename from fs/ksmbd/vfs.h
rename to fs/smb/server/vfs.h
similarity index 100%
rename from fs/ksmbd/xattr.h
rename to fs/smb/server/xattr.h
index fcf67d8..e7bbb7f 100644 (file)
@@ -985,9 +985,16 @@ int xattr_list_one(char **buffer, ssize_t *remaining_size, const char *name)
        return 0;
 }
 
-/*
+/**
+ * generic_listxattr - run through a dentry's xattr list() operations
+ * @dentry: dentry to list the xattrs
+ * @buffer: result buffer
+ * @buffer_size: size of @buffer
+ *
  * Combine the results of the list() operation from every xattr_handler in the
- * list.
+ * xattr_handler stack.
+ *
+ * Note that this will not include the entries for POSIX ACLs.
  */
 ssize_t
 generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
@@ -996,10 +1003,6 @@ generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
        ssize_t remaining_size = buffer_size;
        int err = 0;
 
-       err = posix_acl_listxattr(d_inode(dentry), &buffer, &remaining_size);
-       if (err)
-               return err;
-
        for_each_xattr_handler(handlers, handler) {
                if (!handler->name || (handler->list && !handler->list(dentry)))
                        continue;
index 9b373a0..ee84835 100644 (file)
@@ -984,7 +984,10 @@ xfs_ag_shrink_space(
                if (err2 != -ENOSPC)
                        goto resv_err;
 
-               __xfs_free_extent_later(*tpp, args.fsbno, delta, NULL, true);
+               err2 = __xfs_free_extent_later(*tpp, args.fsbno, delta, NULL,
+                               true);
+               if (err2)
+                       goto resv_err;
 
                /*
                 * Roll the transaction before trying to re-init the per-ag
index fdfa08c..c20fe99 100644 (file)
@@ -628,6 +628,25 @@ xfs_alloc_fixup_trees(
        return 0;
 }
 
+/*
+ * We do not verify the AGFL contents against AGF-based index counters here,
+ * even though we may have access to the perag that contains shadow copies. We
+ * don't know if the AGF based counters have been checked, and if they have they
+ * still may be inconsistent because they haven't yet been reset on the first
+ * allocation after the AGF has been read in.
+ *
+ * This means we can only check that all agfl entries contain valid or null
+ * values because we can't reliably determine the active range to exclude
+ * NULLAGBNO as a valid value.
+ *
+ * However, we can't even do that for v4 format filesystems because there are
+ * old versions of mkfs out there that does not initialise the AGFL to known,
+ * verifiable values. HEnce we can't tell the difference between a AGFL block
+ * allocated by mkfs and a corrupted AGFL block here on v4 filesystems.
+ *
+ * As a result, we can only fully validate AGFL block numbers when we pull them
+ * from the freelist in xfs_alloc_get_freelist().
+ */
 static xfs_failaddr_t
 xfs_agfl_verify(
        struct xfs_buf  *bp)
@@ -637,12 +656,6 @@ xfs_agfl_verify(
        __be32          *agfl_bno = xfs_buf_to_agfl_bno(bp);
        int             i;
 
-       /*
-        * There is no verification of non-crc AGFLs because mkfs does not
-        * initialise the AGFL to zero or NULL. Hence the only valid part of the
-        * AGFL is what the AGF says is active. We can't get to the AGF, so we
-        * can't verify just those entries are valid.
-        */
        if (!xfs_has_crc(mp))
                return NULL;
 
@@ -2321,12 +2334,16 @@ xfs_free_agfl_block(
 }
 
 /*
- * Check the agfl fields of the agf for inconsistency or corruption. The purpose
- * is to detect an agfl header padding mismatch between current and early v5
- * kernels. This problem manifests as a 1-slot size difference between the
- * on-disk flcount and the active [first, last] range of a wrapped agfl. This
- * may also catch variants of agfl count corruption unrelated to padding. Either
- * way, we'll reset the agfl and warn the user.
+ * Check the agfl fields of the agf for inconsistency or corruption.
+ *
+ * The original purpose was to detect an agfl header padding mismatch between
+ * current and early v5 kernels. This problem manifests as a 1-slot size
+ * difference between the on-disk flcount and the active [first, last] range of
+ * a wrapped agfl.
+ *
+ * However, we need to use these same checks to catch agfl count corruptions
+ * unrelated to padding. This could occur on any v4 or v5 filesystem, so either
+ * way, we need to reset the agfl and warn the user.
  *
  * Return true if a reset is required before the agfl can be used, false
  * otherwise.
@@ -2342,10 +2359,6 @@ xfs_agfl_needs_reset(
        int                     agfl_size = xfs_agfl_size(mp);
        int                     active;
 
-       /* no agfl header on v4 supers */
-       if (!xfs_has_crc(mp))
-               return false;
-
        /*
         * The agf read verifier catches severe corruption of these fields.
         * Repeat some sanity checks to cover a packed -> unpacked mismatch if
@@ -2418,7 +2431,7 @@ xfs_agfl_reset(
  * the real allocation can proceed. Deferring the free disconnects freeing up
  * the AGFL slot from freeing the block.
  */
-STATIC void
+static int
 xfs_defer_agfl_block(
        struct xfs_trans                *tp,
        xfs_agnumber_t                  agno,
@@ -2437,17 +2450,21 @@ xfs_defer_agfl_block(
        xefi->xefi_blockcount = 1;
        xefi->xefi_owner = oinfo->oi_owner;
 
+       if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbno(mp, xefi->xefi_startblock)))
+               return -EFSCORRUPTED;
+
        trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1);
 
        xfs_extent_free_get_group(mp, xefi);
        xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_AGFL_FREE, &xefi->xefi_list);
+       return 0;
 }
 
 /*
  * Add the extent to the list of extents to be free at transaction end.
  * The list is maintained sorted (by block number).
  */
-void
+int
 __xfs_free_extent_later(
        struct xfs_trans                *tp,
        xfs_fsblock_t                   bno,
@@ -2474,6 +2491,9 @@ __xfs_free_extent_later(
 #endif
        ASSERT(xfs_extfree_item_cache != NULL);
 
+       if (XFS_IS_CORRUPT(mp, !xfs_verify_fsbext(mp, bno, len)))
+               return -EFSCORRUPTED;
+
        xefi = kmem_cache_zalloc(xfs_extfree_item_cache,
                               GFP_KERNEL | __GFP_NOFAIL);
        xefi->xefi_startblock = bno;
@@ -2497,6 +2517,7 @@ __xfs_free_extent_later(
 
        xfs_extent_free_get_group(mp, xefi);
        xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &xefi->xefi_list);
+       return 0;
 }
 
 #ifdef DEBUG
@@ -2657,7 +2678,9 @@ xfs_alloc_fix_freelist(
                        goto out_agbp_relse;
 
                /* defer agfl frees */
-               xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
+               error = xfs_defer_agfl_block(tp, args->agno, bno, &targs.oinfo);
+               if (error)
+                       goto out_agbp_relse;
        }
 
        targs.tp = tp;
@@ -2767,6 +2790,9 @@ xfs_alloc_get_freelist(
         */
        agfl_bno = xfs_buf_to_agfl_bno(agflbp);
        bno = be32_to_cpu(agfl_bno[be32_to_cpu(agf->agf_flfirst)]);
+       if (XFS_IS_CORRUPT(tp->t_mountp, !xfs_verify_agbno(pag, bno)))
+               return -EFSCORRUPTED;
+
        be32_add_cpu(&agf->agf_flfirst, 1);
        xfs_trans_brelse(tp, agflbp);
        if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp))
@@ -2889,6 +2915,19 @@ xfs_alloc_put_freelist(
        return 0;
 }
 
+/*
+ * Verify the AGF is consistent.
+ *
+ * We do not verify the AGFL indexes in the AGF are fully consistent here
+ * because of issues with variable on-disk structure sizes. Instead, we check
+ * the agfl indexes for consistency when we initialise the perag from the AGF
+ * information after a read completes.
+ *
+ * If the index is inconsistent, then we mark the perag as needing an AGFL
+ * reset. The first AGFL update performed then resets the AGFL indexes and
+ * refills the AGFL with known good free blocks, allowing the filesystem to
+ * continue operating normally at the cost of a few leaked free space blocks.
+ */
 static xfs_failaddr_t
 xfs_agf_verify(
        struct xfs_buf          *bp)
@@ -2962,7 +3001,6 @@ xfs_agf_verify(
                return __this_address;
 
        return NULL;
-
 }
 
 static void
@@ -3187,7 +3225,8 @@ xfs_alloc_vextent_check_args(
  */
 static int
 xfs_alloc_vextent_prepare_ag(
-       struct xfs_alloc_arg    *args)
+       struct xfs_alloc_arg    *args,
+       uint32_t                flags)
 {
        bool                    need_pag = !args->pag;
        int                     error;
@@ -3196,7 +3235,7 @@ xfs_alloc_vextent_prepare_ag(
                args->pag = xfs_perag_get(args->mp, args->agno);
 
        args->agbp = NULL;
-       error = xfs_alloc_fix_freelist(args, 0);
+       error = xfs_alloc_fix_freelist(args, flags);
        if (error) {
                trace_xfs_alloc_vextent_nofix(args);
                if (need_pag)
@@ -3336,7 +3375,7 @@ xfs_alloc_vextent_this_ag(
                return error;
        }
 
-       error = xfs_alloc_vextent_prepare_ag(args);
+       error = xfs_alloc_vextent_prepare_ag(args, 0);
        if (!error && args->agbp)
                error = xfs_alloc_ag_vextent_size(args);
 
@@ -3380,7 +3419,7 @@ restart:
        for_each_perag_wrap_range(mp, start_agno, restart_agno,
                        mp->m_sb.sb_agcount, agno, args->pag) {
                args->agno = agno;
-               error = xfs_alloc_vextent_prepare_ag(args);
+               error = xfs_alloc_vextent_prepare_ag(args, flags);
                if (error)
                        break;
                if (!args->agbp) {
@@ -3546,7 +3585,7 @@ xfs_alloc_vextent_exact_bno(
                return error;
        }
 
-       error = xfs_alloc_vextent_prepare_ag(args);
+       error = xfs_alloc_vextent_prepare_ag(args, 0);
        if (!error && args->agbp)
                error = xfs_alloc_ag_vextent_exact(args);
 
@@ -3587,7 +3626,7 @@ xfs_alloc_vextent_near_bno(
        if (needs_perag)
                args->pag = xfs_perag_grab(mp, args->agno);
 
-       error = xfs_alloc_vextent_prepare_ag(args);
+       error = xfs_alloc_vextent_prepare_ag(args, 0);
        if (!error && args->agbp)
                error = xfs_alloc_ag_vextent_near(args);
 
index 5dbb255..85ac470 100644 (file)
@@ -230,7 +230,7 @@ xfs_buf_to_agfl_bno(
        return bp->b_addr;
 }
 
-void __xfs_free_extent_later(struct xfs_trans *tp, xfs_fsblock_t bno,
+int __xfs_free_extent_later(struct xfs_trans *tp, xfs_fsblock_t bno,
                xfs_filblks_t len, const struct xfs_owner_info *oinfo,
                bool skip_discard);
 
@@ -254,14 +254,14 @@ void xfs_extent_free_get_group(struct xfs_mount *mp,
 #define XFS_EFI_ATTR_FORK      (1U << 1) /* freeing attr fork block */
 #define XFS_EFI_BMBT_BLOCK     (1U << 2) /* freeing bmap btree block */
 
-static inline void
+static inline int
 xfs_free_extent_later(
        struct xfs_trans                *tp,
        xfs_fsblock_t                   bno,
        xfs_filblks_t                   len,
        const struct xfs_owner_info     *oinfo)
 {
-       __xfs_free_extent_later(tp, bno, len, oinfo, false);
+       return __xfs_free_extent_later(tp, bno, len, oinfo, false);
 }
 
 
index cd8870a..fef3569 100644 (file)
@@ -572,8 +572,12 @@ xfs_bmap_btree_to_extents(
        cblock = XFS_BUF_TO_BLOCK(cbp);
        if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
                return error;
+
        xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
-       xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo);
+       error = xfs_free_extent_later(cur->bc_tp, cbno, 1, &oinfo);
+       if (error)
+               return error;
+
        ip->i_nblocks--;
        xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
        xfs_trans_binval(tp, cbp);
@@ -5230,10 +5234,12 @@ xfs_bmap_del_extent_real(
                if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
                        xfs_refcount_decrease_extent(tp, del);
                } else {
-                       __xfs_free_extent_later(tp, del->br_startblock,
+                       error = __xfs_free_extent_later(tp, del->br_startblock,
                                        del->br_blockcount, NULL,
                                        (bflags & XFS_BMAPI_NODISCARD) ||
                                        del->br_state == XFS_EXT_UNWRITTEN);
+                       if (error)
+                               goto done;
                }
        }
 
index 1b40e5f..36564ae 100644 (file)
@@ -268,11 +268,14 @@ xfs_bmbt_free_block(
        struct xfs_trans        *tp = cur->bc_tp;
        xfs_fsblock_t           fsbno = XFS_DADDR_TO_FSB(mp, xfs_buf_daddr(bp));
        struct xfs_owner_info   oinfo;
+       int                     error;
 
        xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_ino.whichfork);
-       xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo);
-       ip->i_nblocks--;
+       error = xfs_free_extent_later(cur->bc_tp, fsbno, 1, &oinfo);
+       if (error)
+               return error;
 
+       ip->i_nblocks--;
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
        xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
        return 0;
index a16d5de..34600f9 100644 (file)
@@ -1834,7 +1834,7 @@ retry:
  * might be sparse and only free the regions that are allocated as part of the
  * chunk.
  */
-STATIC void
+static int
 xfs_difree_inode_chunk(
        struct xfs_trans                *tp,
        xfs_agnumber_t                  agno,
@@ -1851,10 +1851,10 @@ xfs_difree_inode_chunk(
 
        if (!xfs_inobt_issparse(rec->ir_holemask)) {
                /* not sparse, calculate extent info directly */
-               xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, sagbno),
-                                 M_IGEO(mp)->ialloc_blks,
-                                 &XFS_RMAP_OINFO_INODES);
-               return;
+               return xfs_free_extent_later(tp,
+                               XFS_AGB_TO_FSB(mp, agno, sagbno),
+                               M_IGEO(mp)->ialloc_blks,
+                               &XFS_RMAP_OINFO_INODES);
        }
 
        /* holemask is only 16-bits (fits in an unsigned long) */
@@ -1871,6 +1871,8 @@ xfs_difree_inode_chunk(
                                                XFS_INOBT_HOLEMASK_BITS);
        nextbit = startidx + 1;
        while (startidx < XFS_INOBT_HOLEMASK_BITS) {
+               int error;
+
                nextbit = find_next_zero_bit(holemask, XFS_INOBT_HOLEMASK_BITS,
                                             nextbit);
                /*
@@ -1896,8 +1898,11 @@ xfs_difree_inode_chunk(
 
                ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
                ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
-               xfs_free_extent_later(tp, XFS_AGB_TO_FSB(mp, agno, agbno),
-                                 contigblk, &XFS_RMAP_OINFO_INODES);
+               error = xfs_free_extent_later(tp,
+                               XFS_AGB_TO_FSB(mp, agno, agbno),
+                               contigblk, &XFS_RMAP_OINFO_INODES);
+               if (error)
+                       return error;
 
                /* reset range to current bit and carry on... */
                startidx = endidx = nextbit;
@@ -1905,6 +1910,7 @@ xfs_difree_inode_chunk(
 next:
                nextbit++;
        }
+       return 0;
 }
 
 STATIC int
@@ -2003,7 +2009,9 @@ xfs_difree_inobt(
                        goto error0;
                }
 
-               xfs_difree_inode_chunk(tp, pag->pag_agno, &rec);
+               error = xfs_difree_inode_chunk(tp, pag->pag_agno, &rec);
+               if (error)
+                       goto error0;
        } else {
                xic->deleted = false;
 
index f13e080..269573c 100644 (file)
@@ -324,7 +324,6 @@ struct xfs_inode_log_format_32 {
 #define XFS_ILOG_DOWNER        0x200   /* change the data fork owner on replay */
 #define XFS_ILOG_AOWNER        0x400   /* change the attr fork owner on replay */
 
-
 /*
  * The timestamps are dirty, but not necessarily anything else in the inode
  * core.  Unlike the other fields above this one must never make it to disk
@@ -333,6 +332,14 @@ struct xfs_inode_log_format_32 {
  */
 #define XFS_ILOG_TIMESTAMP     0x4000
 
+/*
+ * The version field has been changed, but not necessarily anything else of
+ * interest. This must never make it to disk - it is used purely to ensure that
+ * the inode item ->precommit operation can update the fsync flag triggers
+ * in the inode item correctly.
+ */
+#define XFS_ILOG_IVERSION      0x8000
+
 #define        XFS_ILOG_NONCORE        (XFS_ILOG_DDATA | XFS_ILOG_DEXT | \
                                 XFS_ILOG_DBROOT | XFS_ILOG_DEV | \
                                 XFS_ILOG_ADATA | XFS_ILOG_AEXT | \
index c1c6577..b6e2143 100644 (file)
@@ -1151,8 +1151,10 @@ xfs_refcount_adjust_extents(
                                fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
                                                cur->bc_ag.pag->pag_agno,
                                                tmp.rc_startblock);
-                               xfs_free_extent_later(cur->bc_tp, fsbno,
+                               error = xfs_free_extent_later(cur->bc_tp, fsbno,
                                                  tmp.rc_blockcount, NULL);
+                               if (error)
+                                       goto out_error;
                        }
 
                        (*agbno) += tmp.rc_blockcount;
@@ -1210,8 +1212,10 @@ xfs_refcount_adjust_extents(
                        fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
                                        cur->bc_ag.pag->pag_agno,
                                        ext.rc_startblock);
-                       xfs_free_extent_later(cur->bc_tp, fsbno,
+                       error = xfs_free_extent_later(cur->bc_tp, fsbno,
                                        ext.rc_blockcount, NULL);
+                       if (error)
+                               goto out_error;
                }
 
 skip:
@@ -1976,7 +1980,10 @@ xfs_refcount_recover_cow_leftovers(
                                rr->rr_rrec.rc_blockcount);
 
                /* Free the block. */
-               xfs_free_extent_later(tp, fsb, rr->rr_rrec.rc_blockcount, NULL);
+               error = xfs_free_extent_later(tp, fsb,
+                               rr->rr_rrec.rc_blockcount, NULL);
+               if (error)
+                       goto out_trans;
 
                error = xfs_trans_commit(tp);
                if (error)
index 8b55470..cb4796b 100644 (file)
@@ -40,9 +40,8 @@ xfs_trans_ijoin(
        iip->ili_lock_flags = lock_flags;
        ASSERT(!xfs_iflags_test(ip, XFS_ISTALE));
 
-       /*
-        * Get a log_item_desc to point at the new item.
-        */
+       /* Reset the per-tx dirty context and add the item to the tx. */
+       iip->ili_dirty_flags = 0;
        xfs_trans_add_item(tp, &iip->ili_item);
 }
 
@@ -76,17 +75,10 @@ xfs_trans_ichgtime(
 /*
  * This is called to mark the fields indicated in fieldmask as needing to be
  * logged when the transaction is committed.  The inode must already be
- * associated with the given transaction.
- *
- * The values for fieldmask are defined in xfs_inode_item.h.  We always log all
- * of the core inode if any of it has changed, and we always log all of the
- * inline data/extents/b-tree root if any of them has changed.
- *
- * Grab and pin the cluster buffer associated with this inode to avoid RMW
- * cycles at inode writeback time. Avoid the need to add error handling to every
- * xfs_trans_log_inode() call by shutting down on read error.  This will cause
- * transactions to fail and everything to error out, just like if we return a
- * read error in a dirty transaction and cancel it.
+ * associated with the given transaction. All we do here is record where the
+ * inode was dirtied and mark the transaction and inode log item dirty;
+ * everything else is done in the ->precommit log item operation after the
+ * changes in the transaction have been completed.
  */
 void
 xfs_trans_log_inode(
@@ -96,7 +88,6 @@ xfs_trans_log_inode(
 {
        struct xfs_inode_log_item *iip = ip->i_itemp;
        struct inode            *inode = VFS_I(ip);
-       uint                    iversion_flags = 0;
 
        ASSERT(iip);
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
@@ -105,18 +96,6 @@ xfs_trans_log_inode(
        tp->t_flags |= XFS_TRANS_DIRTY;
 
        /*
-        * Don't bother with i_lock for the I_DIRTY_TIME check here, as races
-        * don't matter - we either will need an extra transaction in 24 hours
-        * to log the timestamps, or will clear already cleared fields in the
-        * worst case.
-        */
-       if (inode->i_state & I_DIRTY_TIME) {
-               spin_lock(&inode->i_lock);
-               inode->i_state &= ~I_DIRTY_TIME;
-               spin_unlock(&inode->i_lock);
-       }
-
-       /*
         * First time we log the inode in a transaction, bump the inode change
         * counter if it is configured for this to occur. While we have the
         * inode locked exclusively for metadata modification, we can usually
@@ -128,86 +107,10 @@ xfs_trans_log_inode(
        if (!test_and_set_bit(XFS_LI_DIRTY, &iip->ili_item.li_flags)) {
                if (IS_I_VERSION(inode) &&
                    inode_maybe_inc_iversion(inode, flags & XFS_ILOG_CORE))
-                       iversion_flags = XFS_ILOG_CORE;
-       }
-
-       /*
-        * If we're updating the inode core or the timestamps and it's possible
-        * to upgrade this inode to bigtime format, do so now.
-        */
-       if ((flags & (XFS_ILOG_CORE | XFS_ILOG_TIMESTAMP)) &&
-           xfs_has_bigtime(ip->i_mount) &&
-           !xfs_inode_has_bigtime(ip)) {
-               ip->i_diflags2 |= XFS_DIFLAG2_BIGTIME;
-               flags |= XFS_ILOG_CORE;
-       }
-
-       /*
-        * Inode verifiers do not check that the extent size hint is an integer
-        * multiple of the rt extent size on a directory with both rtinherit
-        * and extszinherit flags set.  If we're logging a directory that is
-        * misconfigured in this way, clear the hint.
-        */
-       if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
-           (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
-           (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
-               ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
-                                  XFS_DIFLAG_EXTSZINHERIT);
-               ip->i_extsize = 0;
-               flags |= XFS_ILOG_CORE;
+                       flags |= XFS_ILOG_IVERSION;
        }
 
-       /*
-        * Record the specific change for fdatasync optimisation. This allows
-        * fdatasync to skip log forces for inodes that are only timestamp
-        * dirty.
-        */
-       spin_lock(&iip->ili_lock);
-       iip->ili_fsync_fields |= flags;
-
-       if (!iip->ili_item.li_buf) {
-               struct xfs_buf  *bp;
-               int             error;
-
-               /*
-                * We hold the ILOCK here, so this inode is not going to be
-                * flushed while we are here. Further, because there is no
-                * buffer attached to the item, we know that there is no IO in
-                * progress, so nothing will clear the ili_fields while we read
-                * in the buffer. Hence we can safely drop the spin lock and
-                * read the buffer knowing that the state will not change from
-                * here.
-                */
-               spin_unlock(&iip->ili_lock);
-               error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &bp);
-               if (error) {
-                       xfs_force_shutdown(ip->i_mount, SHUTDOWN_META_IO_ERROR);
-                       return;
-               }
-
-               /*
-                * We need an explicit buffer reference for the log item but
-                * don't want the buffer to remain attached to the transaction.
-                * Hold the buffer but release the transaction reference once
-                * we've attached the inode log item to the buffer log item
-                * list.
-                */
-               xfs_buf_hold(bp);
-               spin_lock(&iip->ili_lock);
-               iip->ili_item.li_buf = bp;
-               bp->b_flags |= _XBF_INODES;
-               list_add_tail(&iip->ili_item.li_bio_list, &bp->b_li_list);
-               xfs_trans_brelse(tp, bp);
-       }
-
-       /*
-        * Always OR in the bits from the ili_last_fields field.  This is to
-        * coordinate with the xfs_iflush() and xfs_buf_inode_iodone() routines
-        * in the eventual clearing of the ili_fields bits.  See the big comment
-        * in xfs_iflush() for an explanation of this coordination mechanism.
-        */
-       iip->ili_fields |= (flags | iip->ili_last_fields | iversion_flags);
-       spin_unlock(&iip->ili_lock);
+       iip->ili_dirty_flags |= flags;
 }
 
 int
index 69bc89d..5bf4326 100644 (file)
@@ -769,14 +769,14 @@ xchk_are_bmaps_contiguous(
  * mapping or false if there are no more mappings.  Caller must ensure that
  * @info.icur is zeroed before the first call.
  */
-static int
+static bool
 xchk_bmap_iext_iter(
        struct xchk_bmap_info   *info,
        struct xfs_bmbt_irec    *irec)
 {
        struct xfs_bmbt_irec    got;
        struct xfs_ifork        *ifp;
-       xfs_filblks_t           prev_len;
+       unsigned int            nr = 0;
 
        ifp = xfs_ifork_ptr(info->sc->ip, info->whichfork);
 
@@ -790,12 +790,12 @@ xchk_bmap_iext_iter(
                                irec->br_startoff);
                return false;
        }
+       nr++;
 
        /*
         * Iterate subsequent iextent records and merge them with the one
         * that we just read, if possible.
         */
-       prev_len = irec->br_blockcount;
        while (xfs_iext_peek_next_extent(ifp, &info->icur, &got)) {
                if (!xchk_are_bmaps_contiguous(irec, &got))
                        break;
@@ -805,20 +805,21 @@ xchk_bmap_iext_iter(
                                        got.br_startoff);
                        return false;
                }
-
-               /*
-                * Notify the user of mergeable records in the data or attr
-                * forks.  CoW forks only exist in memory so we ignore them.
-                */
-               if (info->whichfork != XFS_COW_FORK &&
-                   prev_len + got.br_blockcount > BMBT_BLOCKCOUNT_MASK)
-                       xchk_ino_set_preen(info->sc, info->sc->ip->i_ino);
+               nr++;
 
                irec->br_blockcount += got.br_blockcount;
-               prev_len = got.br_blockcount;
                xfs_iext_next(ifp, &info->icur);
        }
 
+       /*
+        * If the merged mapping could be expressed with fewer bmbt records
+        * than we actually found, notify the user that this fork could be
+        * optimized.  CoW forks only exist in memory so we ignore them.
+        */
+       if (nr > 1 && info->whichfork != XFS_COW_FORK &&
+           howmany_64(irec->br_blockcount, XFS_MAX_BMBT_EXTLEN) < nr)
+               xchk_ino_set_preen(info->sc, info->sc->ip->i_ino);
+
        return true;
 }
 
index b38e938..e113f2f 100644 (file)
@@ -105,10 +105,10 @@ struct xfs_scrub {
 };
 
 /* XCHK state flags grow up from zero, XREP state flags grown down from 2^31 */
-#define XCHK_TRY_HARDER                (1 << 0)  /* can't get resources, try again */
-#define XCHK_FSGATES_DRAIN     (1 << 2)  /* defer ops draining enabled */
-#define XCHK_NEED_DRAIN                (1 << 3)  /* scrub needs to drain defer ops */
-#define XREP_ALREADY_FIXED     (1 << 31) /* checking our repair work */
+#define XCHK_TRY_HARDER                (1U << 0)  /* can't get resources, try again */
+#define XCHK_FSGATES_DRAIN     (1U << 2)  /* defer ops draining enabled */
+#define XCHK_NEED_DRAIN                (1U << 3)  /* scrub needs to drain defer ops */
+#define XREP_ALREADY_FIXED     (1U << 31) /* checking our repair work */
 
 /*
  * The XCHK_FSGATES* flags reflect functionality in the main filesystem that
index df7322e..023d4e0 100644 (file)
@@ -452,10 +452,18 @@ xfs_buf_item_format(
  * This is called to pin the buffer associated with the buf log item in memory
  * so it cannot be written out.
  *
- * We also always take a reference to the buffer log item here so that the bli
- * is held while the item is pinned in memory. This means that we can
- * unconditionally drop the reference count a transaction holds when the
- * transaction is completed.
+ * We take a reference to the buffer log item here so that the BLI life cycle
+ * extends at least until the buffer is unpinned via xfs_buf_item_unpin() and
+ * inserted into the AIL.
+ *
+ * We also need to take a reference to the buffer itself as the BLI unpin
+ * processing requires accessing the buffer after the BLI has dropped the final
+ * BLI reference. See xfs_buf_item_unpin() for an explanation.
+ * If unpins race to drop the final BLI reference and only the
+ * BLI owns a reference to the buffer, then the loser of the race can have the
+ * buffer fgreed from under it (e.g. on shutdown). Taking a buffer reference per
+ * pin count ensures the life cycle of the buffer extends for as
+ * long as we hold the buffer pin reference in xfs_buf_item_unpin().
  */
 STATIC void
 xfs_buf_item_pin(
@@ -470,13 +478,30 @@ xfs_buf_item_pin(
 
        trace_xfs_buf_item_pin(bip);
 
+       xfs_buf_hold(bip->bli_buf);
        atomic_inc(&bip->bli_refcount);
        atomic_inc(&bip->bli_buf->b_pin_count);
 }
 
 /*
- * This is called to unpin the buffer associated with the buf log item which
- * was previously pinned with a call to xfs_buf_item_pin().
+ * This is called to unpin the buffer associated with the buf log item which was
+ * previously pinned with a call to xfs_buf_item_pin().  We enter this function
+ * with a buffer pin count, a buffer reference and a BLI reference.
+ *
+ * We must drop the BLI reference before we unpin the buffer because the AIL
+ * doesn't acquire a BLI reference whenever it accesses it. Therefore if the
+ * refcount drops to zero, the bli could still be AIL resident and the buffer
+ * submitted for I/O at any point before we return. This can result in IO
+ * completion freeing the buffer while we are still trying to access it here.
+ * This race condition can also occur in shutdown situations where we abort and
+ * unpin buffers from contexts other that journal IO completion.
+ *
+ * Hence we have to hold a buffer reference per pin count to ensure that the
+ * buffer cannot be freed until we have finished processing the unpin operation.
+ * The reference is taken in xfs_buf_item_pin(), and we must hold it until we
+ * are done processing the buffer state. In the case of an abort (remove =
+ * true) then we re-use the current pin reference as the IO reference we hand
+ * off to IO failure handling.
  */
 STATIC void
 xfs_buf_item_unpin(
@@ -493,24 +518,18 @@ xfs_buf_item_unpin(
 
        trace_xfs_buf_item_unpin(bip);
 
-       /*
-        * Drop the bli ref associated with the pin and grab the hold required
-        * for the I/O simulation failure in the abort case. We have to do this
-        * before the pin count drops because the AIL doesn't acquire a bli
-        * reference. Therefore if the refcount drops to zero, the bli could
-        * still be AIL resident and the buffer submitted for I/O (and freed on
-        * completion) at any point before we return. This can be removed once
-        * the AIL properly holds a reference on the bli.
-        */
        freed = atomic_dec_and_test(&bip->bli_refcount);
-       if (freed && !stale && remove)
-               xfs_buf_hold(bp);
        if (atomic_dec_and_test(&bp->b_pin_count))
                wake_up_all(&bp->b_waiters);
 
-        /* nothing to do but drop the pin count if the bli is active */
-       if (!freed)
+       /*
+        * Nothing to do but drop the buffer pin reference if the BLI is
+        * still active.
+        */
+       if (!freed) {
+               xfs_buf_rele(bp);
                return;
+       }
 
        if (stale) {
                ASSERT(bip->bli_flags & XFS_BLI_STALE);
@@ -523,6 +542,15 @@ xfs_buf_item_unpin(
                trace_xfs_buf_item_unpin_stale(bip);
 
                /*
+                * The buffer has been locked and referenced since it was marked
+                * stale so we own both lock and reference exclusively here. We
+                * do not need the pin reference any more, so drop it now so
+                * that we only have one reference to drop once item completion
+                * processing is complete.
+                */
+               xfs_buf_rele(bp);
+
+               /*
                 * If we get called here because of an IO error, we may or may
                 * not have the item on the AIL. xfs_trans_ail_delete() will
                 * take care of that situation. xfs_trans_ail_delete() drops
@@ -538,16 +566,30 @@ xfs_buf_item_unpin(
                        ASSERT(bp->b_log_item == NULL);
                }
                xfs_buf_relse(bp);
-       } else if (remove) {
+               return;
+       }
+
+       if (remove) {
                /*
-                * The buffer must be locked and held by the caller to simulate
-                * an async I/O failure. We acquired the hold for this case
-                * before the buffer was unpinned.
+                * We need to simulate an async IO failures here to ensure that
+                * the correct error completion is run on this buffer. This
+                * requires a reference to the buffer and for the buffer to be
+                * locked. We can safely pass ownership of the pin reference to
+                * the IO to ensure that nothing can free the buffer while we
+                * wait for the lock and then run the IO failure completion.
                 */
                xfs_buf_lock(bp);
                bp->b_flags |= XBF_ASYNC;
                xfs_buf_ioend_fail(bp);
+               return;
        }
+
+       /*
+        * BLI has no more active references - it will be moved to the AIL to
+        * manage the remaining BLI/buffer life cycle. There is nothing left for
+        * us to do here so drop the pin reference to the buffer.
+        */
+       xfs_buf_rele(bp);
 }
 
 STATIC uint
index 22c1393..2fc98d3 100644 (file)
@@ -78,7 +78,6 @@ restart:
                *longest = 0;
                err = xfs_bmap_longest_free_extent(pag, NULL, longest);
                if (err) {
-                       xfs_perag_rele(pag);
                        if (err != -EAGAIN)
                                break;
                        /* Couldn't lock the AGF, skip this AG. */
index 0f60e30..4538909 100644 (file)
@@ -454,6 +454,27 @@ xfs_inodegc_queue_all(
        return ret;
 }
 
+/* Wait for all queued work and collect errors */
+static int
+xfs_inodegc_wait_all(
+       struct xfs_mount        *mp)
+{
+       int                     cpu;
+       int                     error = 0;
+
+       flush_workqueue(mp->m_inodegc_wq);
+       for_each_online_cpu(cpu) {
+               struct xfs_inodegc      *gc;
+
+               gc = per_cpu_ptr(mp->m_inodegc, cpu);
+               if (gc->error && !error)
+                       error = gc->error;
+               gc->error = 0;
+       }
+
+       return error;
+}
+
 /*
  * Check the validity of the inode we just found it the cache
  */
@@ -1491,15 +1512,14 @@ xfs_blockgc_free_space(
        if (error)
                return error;
 
-       xfs_inodegc_flush(mp);
-       return 0;
+       return xfs_inodegc_flush(mp);
 }
 
 /*
  * Reclaim all the free space that we can by scheduling the background blockgc
  * and inodegc workers immediately and waiting for them all to clear.
  */
-void
+int
 xfs_blockgc_flush_all(
        struct xfs_mount        *mp)
 {
@@ -1520,7 +1540,7 @@ xfs_blockgc_flush_all(
        for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
                flush_delayed_work(&pag->pag_blockgc_work);
 
-       xfs_inodegc_flush(mp);
+       return xfs_inodegc_flush(mp);
 }
 
 /*
@@ -1842,13 +1862,17 @@ xfs_inodegc_set_reclaimable(
  * This is the last chance to make changes to an otherwise unreferenced file
  * before incore reclamation happens.
  */
-static void
+static int
 xfs_inodegc_inactivate(
        struct xfs_inode        *ip)
 {
+       int                     error;
+
        trace_xfs_inode_inactivating(ip);
-       xfs_inactive(ip);
+       error = xfs_inactive(ip);
        xfs_inodegc_set_reclaimable(ip);
+       return error;
+
 }
 
 void
@@ -1880,8 +1904,12 @@ xfs_inodegc_worker(
 
        WRITE_ONCE(gc->shrinker_hits, 0);
        llist_for_each_entry_safe(ip, n, node, i_gclist) {
+               int     error;
+
                xfs_iflags_set(ip, XFS_INACTIVATING);
-               xfs_inodegc_inactivate(ip);
+               error = xfs_inodegc_inactivate(ip);
+               if (error && !gc->error)
+                       gc->error = error;
        }
 
        memalloc_nofs_restore(nofs_flag);
@@ -1905,13 +1933,13 @@ xfs_inodegc_push(
  * Force all currently queued inode inactivation work to run immediately and
  * wait for the work to finish.
  */
-void
+int
 xfs_inodegc_flush(
        struct xfs_mount        *mp)
 {
        xfs_inodegc_push(mp);
        trace_xfs_inodegc_flush(mp, __return_address);
-       flush_workqueue(mp->m_inodegc_wq);
+       return xfs_inodegc_wait_all(mp);
 }
 
 /*
index 8791019..1dcdcb2 100644 (file)
@@ -62,7 +62,7 @@ int xfs_blockgc_free_dquots(struct xfs_mount *mp, struct xfs_dquot *udqp,
                unsigned int iwalk_flags);
 int xfs_blockgc_free_quota(struct xfs_inode *ip, unsigned int iwalk_flags);
 int xfs_blockgc_free_space(struct xfs_mount *mp, struct xfs_icwalk *icm);
-void xfs_blockgc_flush_all(struct xfs_mount *mp);
+int xfs_blockgc_flush_all(struct xfs_mount *mp);
 
 void xfs_inode_set_eofblocks_tag(struct xfs_inode *ip);
 void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip);
@@ -80,7 +80,7 @@ void xfs_blockgc_start(struct xfs_mount *mp);
 
 void xfs_inodegc_worker(struct work_struct *work);
 void xfs_inodegc_push(struct xfs_mount *mp);
-void xfs_inodegc_flush(struct xfs_mount *mp);
+int xfs_inodegc_flush(struct xfs_mount *mp);
 void xfs_inodegc_stop(struct xfs_mount *mp);
 void xfs_inodegc_start(struct xfs_mount *mp);
 void xfs_inodegc_cpu_dead(struct xfs_mount *mp, unsigned int cpu);
index 5808aba..9e62cc5 100644 (file)
@@ -1620,16 +1620,7 @@ xfs_inactive_ifree(
         */
        xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
 
-       /*
-        * Just ignore errors at this point.  There is nothing we can do except
-        * to try to keep going. Make sure it's not a silent error.
-        */
-       error = xfs_trans_commit(tp);
-       if (error)
-               xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
-                       __func__, error);
-
-       return 0;
+       return xfs_trans_commit(tp);
 }
 
 /*
@@ -1693,12 +1684,12 @@ xfs_inode_needs_inactive(
  * now be truncated.  Also, we clear all of the read-ahead state
  * kept for the inode here since the file is now closed.
  */
-void
+int
 xfs_inactive(
        xfs_inode_t     *ip)
 {
        struct xfs_mount        *mp;
-       int                     error;
+       int                     error = 0;
        int                     truncate = 0;
 
        /*
@@ -1736,7 +1727,7 @@ xfs_inactive(
                 * reference to the inode at this point anyways.
                 */
                if (xfs_can_free_eofblocks(ip, true))
-                       xfs_free_eofblocks(ip);
+                       error = xfs_free_eofblocks(ip);
 
                goto out;
        }
@@ -1773,7 +1764,7 @@ xfs_inactive(
        /*
         * Free the inode.
         */
-       xfs_inactive_ifree(ip);
+       error = xfs_inactive_ifree(ip);
 
 out:
        /*
@@ -1781,6 +1772,7 @@ out:
         * the attached dquots.
         */
        xfs_qm_dqdetach(ip);
+       return error;
 }
 
 /*
index 69d21e4..7547caf 100644 (file)
@@ -470,7 +470,7 @@ enum layout_break_reason {
        (xfs_has_grpid((pip)->i_mount) || (VFS_I(pip)->i_mode & S_ISGID))
 
 int            xfs_release(struct xfs_inode *ip);
-void           xfs_inactive(struct xfs_inode *ip);
+int            xfs_inactive(struct xfs_inode *ip);
 int            xfs_lookup(struct xfs_inode *dp, const struct xfs_name *name,
                           struct xfs_inode **ipp, struct xfs_name *ci_name);
 int            xfs_create(struct mnt_idmap *idmap,
index ca2941a..91c847a 100644 (file)
@@ -29,6 +29,153 @@ static inline struct xfs_inode_log_item *INODE_ITEM(struct xfs_log_item *lip)
        return container_of(lip, struct xfs_inode_log_item, ili_item);
 }
 
+static uint64_t
+xfs_inode_item_sort(
+       struct xfs_log_item     *lip)
+{
+       return INODE_ITEM(lip)->ili_inode->i_ino;
+}
+
+/*
+ * Prior to finally logging the inode, we have to ensure that all the
+ * per-modification inode state changes are applied. This includes VFS inode
+ * state updates, format conversions, verifier state synchronisation and
+ * ensuring the inode buffer remains in memory whilst the inode is dirty.
+ *
+ * We have to be careful when we grab the inode cluster buffer due to lock
+ * ordering constraints. The unlinked inode modifications (xfs_iunlink_item)
+ * require AGI -> inode cluster buffer lock order. The inode cluster buffer is
+ * not locked until ->precommit, so it happens after everything else has been
+ * modified.
+ *
+ * Further, we have AGI -> AGF lock ordering, and with O_TMPFILE handling we
+ * have AGI -> AGF -> iunlink item -> inode cluster buffer lock order. Hence we
+ * cannot safely lock the inode cluster buffer in xfs_trans_log_inode() because
+ * it can be called on a inode (e.g. via bumplink/droplink) before we take the
+ * AGF lock modifying directory blocks.
+ *
+ * Rather than force a complete rework of all the transactions to call
+ * xfs_trans_log_inode() once and once only at the end of every transaction, we
+ * move the pinning of the inode cluster buffer to a ->precommit operation. This
+ * matches how the xfs_iunlink_item locks the inode cluster buffer, and it
+ * ensures that the inode cluster buffer locking is always done last in a
+ * transaction. i.e. we ensure the lock order is always AGI -> AGF -> inode
+ * cluster buffer.
+ *
+ * If we return the inode number as the precommit sort key then we'll also
+ * guarantee that the order all inode cluster buffer locking is the same all the
+ * inodes and unlink items in the transaction.
+ */
+static int
+xfs_inode_item_precommit(
+       struct xfs_trans        *tp,
+       struct xfs_log_item     *lip)
+{
+       struct xfs_inode_log_item *iip = INODE_ITEM(lip);
+       struct xfs_inode        *ip = iip->ili_inode;
+       struct inode            *inode = VFS_I(ip);
+       unsigned int            flags = iip->ili_dirty_flags;
+
+       /*
+        * Don't bother with i_lock for the I_DIRTY_TIME check here, as races
+        * don't matter - we either will need an extra transaction in 24 hours
+        * to log the timestamps, or will clear already cleared fields in the
+        * worst case.
+        */
+       if (inode->i_state & I_DIRTY_TIME) {
+               spin_lock(&inode->i_lock);
+               inode->i_state &= ~I_DIRTY_TIME;
+               spin_unlock(&inode->i_lock);
+       }
+
+       /*
+        * If we're updating the inode core or the timestamps and it's possible
+        * to upgrade this inode to bigtime format, do so now.
+        */
+       if ((flags & (XFS_ILOG_CORE | XFS_ILOG_TIMESTAMP)) &&
+           xfs_has_bigtime(ip->i_mount) &&
+           !xfs_inode_has_bigtime(ip)) {
+               ip->i_diflags2 |= XFS_DIFLAG2_BIGTIME;
+               flags |= XFS_ILOG_CORE;
+       }
+
+       /*
+        * Inode verifiers do not check that the extent size hint is an integer
+        * multiple of the rt extent size on a directory with both rtinherit
+        * and extszinherit flags set.  If we're logging a directory that is
+        * misconfigured in this way, clear the hint.
+        */
+       if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
+           (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
+           (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
+               ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
+                                  XFS_DIFLAG_EXTSZINHERIT);
+               ip->i_extsize = 0;
+               flags |= XFS_ILOG_CORE;
+       }
+
+       /*
+        * Record the specific change for fdatasync optimisation. This allows
+        * fdatasync to skip log forces for inodes that are only timestamp
+        * dirty. Once we've processed the XFS_ILOG_IVERSION flag, convert it
+        * to XFS_ILOG_CORE so that the actual on-disk dirty tracking
+        * (ili_fields) correctly tracks that the version has changed.
+        */
+       spin_lock(&iip->ili_lock);
+       iip->ili_fsync_fields |= (flags & ~XFS_ILOG_IVERSION);
+       if (flags & XFS_ILOG_IVERSION)
+               flags = ((flags & ~XFS_ILOG_IVERSION) | XFS_ILOG_CORE);
+
+       if (!iip->ili_item.li_buf) {
+               struct xfs_buf  *bp;
+               int             error;
+
+               /*
+                * We hold the ILOCK here, so this inode is not going to be
+                * flushed while we are here. Further, because there is no
+                * buffer attached to the item, we know that there is no IO in
+                * progress, so nothing will clear the ili_fields while we read
+                * in the buffer. Hence we can safely drop the spin lock and
+                * read the buffer knowing that the state will not change from
+                * here.
+                */
+               spin_unlock(&iip->ili_lock);
+               error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, &bp);
+               if (error)
+                       return error;
+
+               /*
+                * We need an explicit buffer reference for the log item but
+                * don't want the buffer to remain attached to the transaction.
+                * Hold the buffer but release the transaction reference once
+                * we've attached the inode log item to the buffer log item
+                * list.
+                */
+               xfs_buf_hold(bp);
+               spin_lock(&iip->ili_lock);
+               iip->ili_item.li_buf = bp;
+               bp->b_flags |= _XBF_INODES;
+               list_add_tail(&iip->ili_item.li_bio_list, &bp->b_li_list);
+               xfs_trans_brelse(tp, bp);
+       }
+
+       /*
+        * Always OR in the bits from the ili_last_fields field.  This is to
+        * coordinate with the xfs_iflush() and xfs_buf_inode_iodone() routines
+        * in the eventual clearing of the ili_fields bits.  See the big comment
+        * in xfs_iflush() for an explanation of this coordination mechanism.
+        */
+       iip->ili_fields |= (flags | iip->ili_last_fields);
+       spin_unlock(&iip->ili_lock);
+
+       /*
+        * We are done with the log item transaction dirty state, so clear it so
+        * that it doesn't pollute future transactions.
+        */
+       iip->ili_dirty_flags = 0;
+       return 0;
+}
+
 /*
  * The logged size of an inode fork is always the current size of the inode
  * fork. This means that when an inode fork is relogged, the size of the logged
@@ -662,6 +809,8 @@ xfs_inode_item_committing(
 }
 
 static const struct xfs_item_ops xfs_inode_item_ops = {
+       .iop_sort       = xfs_inode_item_sort,
+       .iop_precommit  = xfs_inode_item_precommit,
        .iop_size       = xfs_inode_item_size,
        .iop_format     = xfs_inode_item_format,
        .iop_pin        = xfs_inode_item_pin,
index bbd836a..377e060 100644 (file)
@@ -17,6 +17,7 @@ struct xfs_inode_log_item {
        struct xfs_log_item     ili_item;          /* common portion */
        struct xfs_inode        *ili_inode;        /* inode ptr */
        unsigned short          ili_lock_flags;    /* inode lock flags */
+       unsigned int            ili_dirty_flags;   /* dirty in current tx */
        /*
         * The ili_lock protects the interactions between the dirty state and
         * the flush state of the inode log item. This allows us to do atomic
index 322eb2e..82c81d2 100644 (file)
@@ -2711,7 +2711,9 @@ xlog_recover_iunlink_bucket(
                         * just to flush the inodegc queue and wait for it to
                         * complete.
                         */
-                       xfs_inodegc_flush(mp);
+                       error = xfs_inodegc_flush(mp);
+                       if (error)
+                               break;
                }
 
                prev_agino = agino;
@@ -2719,10 +2721,15 @@ xlog_recover_iunlink_bucket(
        }
 
        if (prev_ip) {
+               int     error2;
+
                ip->i_prev_unlinked = prev_agino;
                xfs_irele(prev_ip);
+
+               error2 = xfs_inodegc_flush(mp);
+               if (error2 && !error)
+                       return error2;
        }
-       xfs_inodegc_flush(mp);
        return error;
 }
 
@@ -2789,7 +2796,6 @@ xlog_recover_iunlink_ag(
                         * bucket and remaining inodes on it unreferenced and
                         * unfreeable.
                         */
-                       xfs_inodegc_flush(pag->pag_mount);
                        xlog_recover_clear_agi_bucket(pag, bucket);
                }
        }
@@ -2806,13 +2812,6 @@ xlog_recover_process_iunlinks(
 
        for_each_perag(log->l_mp, agno, pag)
                xlog_recover_iunlink_ag(pag);
-
-       /*
-        * Flush the pending unlinked inodes to ensure that the inactivations
-        * are fully completed on disk and the incore inodes can be reclaimed
-        * before we signal that recovery is complete.
-        */
-       xfs_inodegc_flush(log->l_mp);
 }
 
 STATIC void
index aaaf5ec..6c09f89 100644 (file)
@@ -62,6 +62,7 @@ struct xfs_error_cfg {
 struct xfs_inodegc {
        struct llist_head       list;
        struct delayed_work     work;
+       int                     error;
 
        /* approximate count of inodes in the list */
        unsigned int            items;
index f5dc46c..abcc559 100644 (file)
@@ -616,8 +616,10 @@ xfs_reflink_cancel_cow_blocks(
                        xfs_refcount_free_cow_extent(*tpp, del.br_startblock,
                                        del.br_blockcount);
 
-                       xfs_free_extent_later(*tpp, del.br_startblock,
+                       error = xfs_free_extent_later(*tpp, del.br_startblock,
                                          del.br_blockcount, NULL);
+                       if (error)
+                               break;
 
                        /* Roll the transaction */
                        error = xfs_defer_finish(tpp);
index 7e70625..4120bd1 100644 (file)
@@ -1100,6 +1100,7 @@ xfs_inodegc_init_percpu(
 #endif
                init_llist_head(&gc->list);
                gc->items = 0;
+               gc->error = 0;
                INIT_DELAYED_WORK(&gc->work, xfs_inodegc_worker);
        }
        return 0;
index 8afc0c0..8c0bfc9 100644 (file)
@@ -290,7 +290,9 @@ retry:
                 * Do not perform a synchronous scan because callers can hold
                 * other locks.
                 */
-               xfs_blockgc_flush_all(mp);
+               error = xfs_blockgc_flush_all(mp);
+               if (error)
+                       return error;
                want_retry = false;
                goto retry;
        }
@@ -970,6 +972,11 @@ __xfs_trans_commit(
                error = xfs_defer_finish_noroll(&tp);
                if (error)
                        goto out_unreserve;
+
+               /* Run precommits from final tx in defer chain. */
+               error = xfs_trans_run_precommits(tp);
+               if (error)
+                       goto out_unreserve;
        }
 
        /*
index d1f57e4..cebdf1c 100644 (file)
 /*
  * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler.
  * Otherwise, the type of .notes section would become PROGBITS instead of NOTES.
+ *
+ * Also, discard .note.gnu.property, otherwise it forces the notes section to
+ * be 8-byte aligned which causes alignment mismatches with the kernel's custom
+ * 4-byte aligned notes.
  */
 #define NOTES                                                          \
-       /DISCARD/ : { *(.note.GNU-stack) }                              \
+       /DISCARD/ : {                                                   \
+               *(.note.GNU-stack)                                      \
+               *(.note.gnu.property)                                   \
+       }                                                               \
        .notes : AT(ADDR(.notes) - LOAD_OFFSET) {                       \
                BOUNDED_SECTION_BY(.note.*, _notes)                     \
        } NOTES_HEADERS                                                 \
index 3598839..ad08f83 100644 (file)
@@ -105,6 +105,22 @@ char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp);
 
 void drmm_kfree(struct drm_device *dev, void *data);
 
-int drmm_mutex_init(struct drm_device *dev, struct mutex *lock);
+void __drmm_mutex_release(struct drm_device *dev, void *res);
+
+/**
+ * drmm_mutex_init - &drm_device-managed mutex_init()
+ * @dev: DRM device
+ * @lock: lock to be initialized
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ *
+ * This is a &drm_device-managed version of mutex_init(). The initialized
+ * lock is automatically destroyed on the final drm_dev_put().
+ */
+#define drmm_mutex_init(dev, lock) ({                                       \
+       mutex_init(lock);                                                    \
+       drmm_add_action_or_reset(dev, __drmm_mutex_release, lock);           \
+})                                                                          \
 
 #endif
index c87aeec..583fe3b 100644 (file)
@@ -96,6 +96,7 @@
 
 /* FFA Bus/Device/Driver related */
 struct ffa_device {
+       u32 id;
        int vm_id;
        bool mode_32bit;
        uuid_t uuid;
index b441e63..c0ffe20 100644 (file)
@@ -1376,8 +1376,6 @@ enum blk_unique_id {
        BLK_UID_NAA     = 3,
 };
 
-#define NFL4_UFLG_MASK                 0x0000003F
-
 struct block_device_operations {
        void (*submit_bio)(struct bio *bio);
        int (*poll_bio)(struct bio *bio, struct io_comp_batch *iob,
index 947a60b..d7779a1 100644 (file)
  * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
  * to disable branch tracing on a per file basis.
  */
-#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
-    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
                          int expect, int is_constant);
-
+#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
+    && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
 #define likely_notrace(x)      __builtin_expect(!!(x), 1)
 #define unlikely_notrace(x)    __builtin_expect(!!(x), 0)
 
index eacb7dd..c1a7dc3 100644 (file)
@@ -572,4 +572,10 @@ void cper_print_proc_ia(const char *pfx,
 int cper_mem_err_location(struct cper_mem_err_compact *mem, char *msg);
 int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg);
 
+struct acpi_hest_generic_status;
+void cper_estatus_print(const char *pfx,
+                       const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
+int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
+
 #endif
index 9deeaeb..abf3d3b 100644 (file)
@@ -74,6 +74,7 @@ struct class {
 struct class_dev_iter {
        struct klist_iter               ki;
        const struct device_type        *type;
+       struct subsys_private           *sp;
 };
 
 int __must_check class_register(const struct class *class);
index 7aa62c9..571d1a6 100644 (file)
@@ -1338,4 +1338,6 @@ bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table)
        return xen_efi_config_table_is_usable(guid, table);
 }
 
+umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n);
+
 #endif /* _LINUX_EFI_H */
index 1716c01..efb6e2c 100644 (file)
@@ -391,7 +391,7 @@ struct fw_iso_packet {
        u32 tag:2;              /* tx: Tag in packet header             */
        u32 sy:4;               /* tx: Sy in packet header              */
        u32 header_length:8;    /* Length of immediate header           */
-       u32 header[0];          /* tx: Top of 1394 isoch. data_block    */
+       u32 header[];           /* tx: Top of 1394 isoch. data_block    */
 };
 
 #define FW_ISO_CONTEXT_TRANSMIT                        0
index 21a9816..133f064 100644 (file)
@@ -1076,29 +1076,29 @@ extern int send_sigurg(struct fown_struct *fown);
  * sb->s_flags.  Note that these mirror the equivalent MS_* flags where
  * represented in both.
  */
-#define SB_RDONLY       1      /* Mount read-only */
-#define SB_NOSUID       2      /* Ignore suid and sgid bits */
-#define SB_NODEV        4      /* Disallow access to device special files */
-#define SB_NOEXEC       8      /* Disallow program execution */
-#define SB_SYNCHRONOUS 16      /* Writes are synced at once */
-#define SB_MANDLOCK    64      /* Allow mandatory locks on an FS */
-#define SB_DIRSYNC     128     /* Directory modifications are synchronous */
-#define SB_NOATIME     1024    /* Do not update access times. */
-#define SB_NODIRATIME  2048    /* Do not update directory access times */
-#define SB_SILENT      32768
-#define SB_POSIXACL    (1<<16) /* VFS does not apply the umask */
-#define SB_INLINECRYPT (1<<17) /* Use blk-crypto for encrypted files */
-#define SB_KERNMOUNT   (1<<22) /* this is a kern_mount call */
-#define SB_I_VERSION   (1<<23) /* Update inode I_version field */
-#define SB_LAZYTIME    (1<<25) /* Update the on-disk [acm]times lazily */
+#define SB_RDONLY       BIT(0) /* Mount read-only */
+#define SB_NOSUID       BIT(1) /* Ignore suid and sgid bits */
+#define SB_NODEV        BIT(2) /* Disallow access to device special files */
+#define SB_NOEXEC       BIT(3) /* Disallow program execution */
+#define SB_SYNCHRONOUS  BIT(4) /* Writes are synced at once */
+#define SB_MANDLOCK     BIT(6) /* Allow mandatory locks on an FS */
+#define SB_DIRSYNC      BIT(7) /* Directory modifications are synchronous */
+#define SB_NOATIME      BIT(10)        /* Do not update access times. */
+#define SB_NODIRATIME   BIT(11)        /* Do not update directory access times */
+#define SB_SILENT       BIT(15)
+#define SB_POSIXACL     BIT(16)        /* VFS does not apply the umask */
+#define SB_INLINECRYPT  BIT(17)        /* Use blk-crypto for encrypted files */
+#define SB_KERNMOUNT    BIT(22)        /* this is a kern_mount call */
+#define SB_I_VERSION    BIT(23)        /* Update inode I_version field */
+#define SB_LAZYTIME     BIT(25)        /* Update the on-disk [acm]times lazily */
 
 /* These sb flags are internal to the kernel */
-#define SB_SUBMOUNT     (1<<26)
-#define SB_FORCE       (1<<27)
-#define SB_NOSEC       (1<<28)
-#define SB_BORN                (1<<29)
-#define SB_ACTIVE      (1<<30)
-#define SB_NOUSER      (1<<31)
+#define SB_SUBMOUNT     BIT(26)
+#define SB_FORCE        BIT(27)
+#define SB_NOSEC        BIT(28)
+#define SB_BORN         BIT(29)
+#define SB_ACTIVE       BIT(30)
+#define SB_NOUSER       BIT(31)
 
 /* These flags relate to encoding and casefolding */
 #define SB_ENC_STRICT_MODE_FL  (1 << 0)
index fc985e5..8de6b6e 100644 (file)
@@ -208,6 +208,7 @@ struct team {
        bool queue_override_enabled;
        struct list_head *qom_lists; /* array of queue override mapping lists */
        bool port_mtu_change_allowed;
+       bool notifier_ctx;
        struct {
                unsigned int count;
                unsigned int interval; /* in ms */
index dd64e54..9cb6c80 100644 (file)
@@ -135,7 +135,7 @@ static inline int iio_gts_find_int_time_by_sel(struct iio_gts *gts, int sel)
 /**
  * iio_gts_find_sel_by_int_time - find selector matching integration time
  * @gts:       Gain time scale descriptor
- * @gain:      HW-gain for which matching selector is searched for
+ * @time:      Integration time for which matching selector is searched for
  *
  * Return:     a selector matching given integration time or -EINVAL if
  *             selector was not found.
index b32256e..74bd269 100644 (file)
@@ -344,6 +344,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
 #define lockdep_repin_lock(l,c)        lock_repin_lock(&(l)->dep_map, (c))
 #define lockdep_unpin_lock(l,c)        lock_unpin_lock(&(l)->dep_map, (c))
 
+/*
+ * Must use lock_map_aquire_try() with override maps to avoid
+ * lockdep thinking they participate in the block chain.
+ */
+#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type)    \
+       struct lockdep_map _name = {                    \
+               .name = #_name "-wait-type-override",   \
+               .wait_type_inner = _wait_type,          \
+               .lock_type = LD_LOCK_WAIT_OVERRIDE, }
+
 #else /* !CONFIG_LOCKDEP */
 
 static inline void lockdep_init_task(struct task_struct *task)
@@ -432,6 +442,9 @@ extern int lockdep_is_held(const void *);
 #define lockdep_repin_lock(l, c)               do { (void)(l); (void)(c); } while (0)
 #define lockdep_unpin_lock(l, c)               do { (void)(l); (void)(c); } while (0)
 
+#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type)    \
+       struct lockdep_map __maybe_unused _name = {}
+
 #endif /* !LOCKDEP */
 
 enum xhlock_context_t {
@@ -556,6 +569,7 @@ do {                                                                        \
 #define rwsem_release(l, i)                    lock_release(l, i)
 
 #define lock_map_acquire(l)                    lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
+#define lock_map_acquire_try(l)                        lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_)
 #define lock_map_acquire_read(l)               lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
 #define lock_map_acquire_tryread(l)            lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
 #define lock_map_release(l)                    lock_release(l, _THIS_IP_)
index d224308..59f4fb1 100644 (file)
@@ -33,6 +33,7 @@ enum lockdep_wait_type {
 enum lockdep_lock_type {
        LD_LOCK_NORMAL = 0,     /* normal, catch all */
        LD_LOCK_PERCPU,         /* percpu */
+       LD_LOCK_WAIT_OVERRIDE,  /* annotation */
        LD_LOCK_MAX,
 };
 
index a4c4f73..94d2be5 100644 (file)
@@ -1093,6 +1093,7 @@ void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
                         int npsvs, u32 *sig_index);
 int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
+__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev);
 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
 int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
                        struct mlx5_odp_caps *odp_caps);
index dc5e2cb..b89778d 100644 (file)
@@ -1705,7 +1705,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         rc[0x1];
 
        u8         uar_4k[0x1];
-       u8         reserved_at_241[0x9];
+       u8         reserved_at_241[0x7];
+       u8         fl_rc_qp_when_roce_disabled[0x1];
+       u8         regexp_params[0x1];
        u8         uar_sz[0x6];
        u8         port_selection_cap[0x1];
        u8         reserved_at_248[0x1];
index cdb14a1..a50ea79 100644 (file)
@@ -383,6 +383,13 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
 void arch_teardown_msi_irq(unsigned int irq);
 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
 void arch_teardown_msi_irqs(struct pci_dev *dev);
+#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
+
+/*
+ * Xen uses non-default msi_domain_ops and hence needs a way to populate sysfs
+ * entries of MSI IRQs.
+ */
+#if defined(CONFIG_PCI_XEN) || defined(CONFIG_PCI_MSI_ARCH_FALLBACKS)
 #ifdef CONFIG_SYSFS
 int msi_device_populate_sysfs(struct device *dev);
 void msi_device_destroy_sysfs(struct device *dev);
@@ -390,7 +397,7 @@ void msi_device_destroy_sysfs(struct device *dev);
 static inline int msi_device_populate_sysfs(struct device *dev) { return 0; }
 static inline void msi_device_destroy_sysfs(struct device *dev) { }
 #endif /* !CONFIG_SYSFS */
-#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
+#endif /* CONFIG_PCI_XEN || CONFIG_PCI_MSI_ARCH_FALLBACKS */
 
 /*
  * The restore hook is still available even for fully irq domain based
index 08fbd46..c2f0c60 100644 (file)
@@ -620,7 +620,7 @@ struct netdev_queue {
        netdevice_tracker       dev_tracker;
 
        struct Qdisc __rcu      *qdisc;
-       struct Qdisc            *qdisc_sleeping;
+       struct Qdisc __rcu      *qdisc_sleeping;
 #ifdef CONFIG_SYSFS
        struct kobject          kobj;
 #endif
@@ -768,8 +768,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
                /* We only give a hint, preemption can change CPU under us */
                val |= raw_smp_processor_id();
 
-               if (table->ents[index] != val)
-                       table->ents[index] = val;
+               /* The following WRITE_ONCE() is paired with the READ_ONCE()
+                * here, and another one in get_rps_cpu().
+                */
+               if (READ_ONCE(table->ents[index]) != val)
+                       WRITE_ONCE(table->ents[index], val);
        }
 }
 
index 1c68d67..92a2063 100644 (file)
@@ -617,6 +617,12 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
  * Please note that, confusingly, "page_mapping" refers to the inode
  * address_space which maps the page from disk; whereas "page_mapped"
  * refers to user virtual address space into which the page is mapped.
+ *
+ * For slab pages, since slab reuses the bits in struct page to store its
+ * internal states, the page->mapping does not exist as such, nor do these
+ * flags below.  So in order to avoid testing non-existent bits, please
+ * make sure that PageSlab(page) actually evaluates to false before calling
+ * the following functions (e.g., PageAnon).  See mm/slab.h.
  */
 #define PAGE_MAPPING_ANON      0x1
 #define PAGE_MAPPING_MOVABLE   0x2
index 5e1e115..fdf9c95 100644 (file)
 #include <linux/types.h>
 
 /*
- * Linux EFI stub v1.0 adds the following functionality:
- * - Loading initrd from the LINUX_EFI_INITRD_MEDIA_GUID device path,
- * - Loading/starting the kernel from firmware that targets a different
- *   machine type, via the entrypoint exposed in the .compat PE/COFF section.
+ * Starting from version v3.0, the major version field should be interpreted as
+ * a bit mask of features supported by the kernel's EFI stub:
+ * - 0x1: initrd loading from the LINUX_EFI_INITRD_MEDIA_GUID device path,
+ * - 0x2: initrd loading using the initrd= command line option, where the file
+ *        may be specified using device path notation, and is not required to
+ *        reside on the same volume as the loaded kernel image.
  *
  * The recommended way of loading and starting v1.0 or later kernels is to use
  * the LoadImage() and StartImage() EFI boot services, and expose the initrd
  * via the LINUX_EFI_INITRD_MEDIA_GUID device path.
  *
- * Versions older than v1.0 support initrd loading via the image load options
- * (using initrd=, limited to the volume from which the kernel itself was
- * loaded), or via arch specific means (bootparams, DT, etc).
+ * Versions older than v1.0 may support initrd loading via the image load
+ * options (using initrd=, limited to the volume from which the kernel itself
+ * was loaded), or only via arch specific means (bootparams, DT, etc).
  *
- * On x86, LoadImage() and StartImage() can be omitted if the EFI handover
- * protocol is implemented, which can be inferred from the version,
- * handover_offset and xloadflags fields in the bootparams structure.
+ * The minor version field must remain 0x0.
+ * (https://lore.kernel.org/all/efd6f2d4-547c-1378-1faa-53c044dbd297@gmail.com/)
  */
-#define LINUX_EFISTUB_MAJOR_VERSION            0x1
-#define LINUX_EFISTUB_MINOR_VERSION            0x1
+#define LINUX_EFISTUB_MAJOR_VERSION            0x3
+#define LINUX_EFISTUB_MINOR_VERSION            0x0
 
 /*
  * LINUX_PE_MAGIC appears at offset 0x38 into the MS-DOS header of EFI bootable
index c5a0dc8..6478838 100644 (file)
@@ -1900,10 +1900,8 @@ void phy_package_leave(struct phy_device *phydev);
 int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
                          int addr, size_t priv_size);
 
-#if IS_ENABLED(CONFIG_PHYLIB)
 int __init mdio_bus_init(void);
 void mdio_bus_exit(void);
-#endif
 
 int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data);
 int phy_ethtool_get_sset_count(struct phy_device *phydev);
index a1aa681..7c8d654 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef __LINUX_BQ27X00_BATTERY_H__
 #define __LINUX_BQ27X00_BATTERY_H__
 
+#include <linux/power_supply.h>
+
 enum bq27xxx_chip {
        BQ27000 = 1, /* bq27000, bq27200 */
        BQ27010, /* bq27010, bq27210 */
@@ -68,7 +70,9 @@ struct bq27xxx_device_info {
        struct bq27xxx_access_methods bus;
        struct bq27xxx_reg_cache cache;
        int charge_design_full;
+       bool removed;
        unsigned long last_update;
+       union power_supply_propval last_status;
        struct delayed_work work;
        struct power_supply *bat;
        struct list_head list;
index 537cbf9..e0f5ac9 100644 (file)
@@ -29,7 +29,6 @@ struct kernel_clone_args {
        u32 io_thread:1;
        u32 user_worker:1;
        u32 no_files:1;
-       u32 ignore_signals:1;
        unsigned long stack;
        unsigned long stack_size;
        unsigned long tls;
index 6123c10..837a236 100644 (file)
@@ -2,22 +2,13 @@
 #ifndef _LINUX_VHOST_TASK_H
 #define _LINUX_VHOST_TASK_H
 
-#include <linux/completion.h>
 
-struct task_struct;
+struct vhost_task;
 
-struct vhost_task {
-       int (*fn)(void *data);
-       void *data;
-       struct completion exited;
-       unsigned long flags;
-       struct task_struct *task;
-};
-
-struct vhost_task *vhost_task_create(int (*fn)(void *), void *arg,
+struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
                                     const char *name);
 void vhost_task_start(struct vhost_task *vtsk);
 void vhost_task_stop(struct vhost_task *vtsk);
-bool vhost_task_should_stop(struct vhost_task *vtsk);
+void vhost_task_wake(struct vhost_task *vtsk);
 
 #endif
index 7bde8e1..224293b 100644 (file)
@@ -107,7 +107,10 @@ extern void synchronize_shrinkers(void);
 
 #ifdef CONFIG_SHRINKER_DEBUG
 extern int shrinker_debugfs_add(struct shrinker *shrinker);
-extern struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker);
+extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
+                                             int *debugfs_id);
+extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
+                                   int debugfs_id);
 extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
                                                  const char *fmt, ...);
 #else /* CONFIG_SHRINKER_DEBUG */
@@ -115,10 +118,16 @@ static inline int shrinker_debugfs_add(struct shrinker *shrinker)
 {
        return 0;
 }
-static inline struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
+static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
+                                                    int *debugfs_id)
 {
+       *debugfs_id = -1;
        return NULL;
 }
+static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
+                                          int debugfs_id)
+{
+}
 static inline __printf(2, 3)
 int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
 {
index 738776a..0b40417 100644 (file)
@@ -1587,6 +1587,16 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
        to->l4_hash = from->l4_hash;
 };
 
+static inline int skb_cmp_decrypted(const struct sk_buff *skb1,
+                                   const struct sk_buff *skb2)
+{
+#ifdef CONFIG_TLS_DEVICE
+       return skb2->decrypted - skb1->decrypted;
+#else
+       return 0;
+#endif
+}
+
 static inline void skb_copy_decrypted(struct sk_buff *to,
                                      const struct sk_buff *from)
 {
index 84f7874..054d791 100644 (file)
@@ -71,7 +71,6 @@ struct sk_psock_link {
 };
 
 struct sk_psock_work_state {
-       struct sk_buff                  *skb;
        u32                             len;
        u32                             off;
 };
@@ -105,7 +104,7 @@ struct sk_psock {
        struct proto                    *sk_proto;
        struct mutex                    work_mutex;
        struct sk_psock_work_state      work_state;
-       struct work_struct              work;
+       struct delayed_work             work;
        struct rcu_work                 rwork;
 };
 
index 24aa159..fbc4bd4 100644 (file)
@@ -176,7 +176,7 @@ extern struct svc_rdma_recv_ctxt *
 extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
                                   struct svc_rdma_recv_ctxt *ctxt);
 extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
-extern void svc_rdma_release_rqst(struct svc_rqst *rqstp);
+extern void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *ctxt);
 extern int svc_rdma_recvfrom(struct svc_rqst *);
 
 /* svc_rdma_rw.c */
index 8674792..a6b1263 100644 (file)
@@ -23,7 +23,7 @@ struct svc_xprt_ops {
        int             (*xpo_sendto)(struct svc_rqst *);
        int             (*xpo_result_payload)(struct svc_rqst *, unsigned int,
                                              unsigned int);
-       void            (*xpo_release_rqst)(struct svc_rqst *);
+       void            (*xpo_release_ctxt)(struct svc_xprt *xprt, void *ctxt);
        void            (*xpo_detach)(struct svc_xprt *);
        void            (*xpo_free)(struct svc_xprt *);
        void            (*xpo_kill_temp_xprt)(struct svc_xprt *);
index d16ae62..a711604 100644 (file)
@@ -61,10 +61,9 @@ int          svc_recv(struct svc_rqst *, long);
 void           svc_send(struct svc_rqst *rqstp);
 void           svc_drop(struct svc_rqst *);
 void           svc_sock_update_bufs(struct svc_serv *serv);
-bool           svc_alien_sock(struct net *net, int fd);
-int            svc_addsock(struct svc_serv *serv, const int fd,
-                                       char *name_return, const size_t len,
-                                       const struct cred *cred);
+int            svc_addsock(struct svc_serv *serv, struct net *net,
+                           const int fd, char *name_return, const size_t len,
+                           const struct cred *cred);
 void           svc_init_xprt_sock(void);
 void           svc_cleanup_xprt_sock(void);
 struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
index df81043..42b249b 100644 (file)
@@ -243,11 +243,7 @@ static inline bool is_ssam_device(struct device *d)
  * Return: Returns the pointer to the &struct ssam_device_driver wrapping the
  * given device driver @d.
  */
-static inline
-struct ssam_device_driver *to_ssam_device_driver(struct device_driver *d)
-{
-       return container_of(d, struct ssam_device_driver, driver);
-}
+#define to_ssam_device_driver(d)       container_of_const(d, struct ssam_device_driver, driver)
 
 const struct ssam_device_id *ssam_device_id_match(const struct ssam_device_id *table,
                                                  const struct ssam_device_uid uid);
index 7769338..6a1e8f1 100644 (file)
@@ -282,6 +282,7 @@ enum tpm_chip_flags {
        TPM_CHIP_FLAG_ALWAYS_POWERED            = BIT(5),
        TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED    = BIT(6),
        TPM_CHIP_FLAG_FIRMWARE_UPGRADE          = BIT(7),
+       TPM_CHIP_FLAG_SUSPENDED                 = BIT(8),
 };
 
 #define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
index 0e37322..7c4a0b7 100644 (file)
@@ -806,6 +806,7 @@ enum {
        FILTER_TRACE_FN,
        FILTER_COMM,
        FILTER_CPU,
+       FILTER_STACKTRACE,
 };
 
 extern int trace_event_raw_init(struct trace_event_call *call);
index a2448e9..07531c4 100644 (file)
@@ -443,7 +443,7 @@ static inline struct usb_composite_driver *to_cdriver(
  * @bcd_webusb_version: 0x0100 by default, WebUSB specification version
  * @b_webusb_vendor_code: 0x0 by default, vendor code for WebUSB
  * @landing_page: empty by default, landing page to announce in WebUSB
- * @use_webusb:: false by default, interested gadgets set it
+ * @use_webusb: false by default, interested gadgets set it
  * @os_desc_config: the configuration to be used with OS descriptors
  * @setup_pending: true when setup request is queued but not completed
  * @os_desc_pending: true when os_desc request is queued but not completed
index 094c77e..0c7eff9 100644 (file)
@@ -501,6 +501,11 @@ void *hcd_buffer_alloc(struct usb_bus *bus, size_t size,
 void hcd_buffer_free(struct usb_bus *bus, size_t size,
        void *addr, dma_addr_t dma);
 
+void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
+               size_t size, gfp_t mem_flags, dma_addr_t *dma);
+void hcd_buffer_free_pages(struct usb_hcd *hcd,
+               size_t size, void *addr, dma_addr_t dma);
+
 /* generic bus glue, needed for host controllers that don't use PCI */
 extern irqreturn_t usb_hcd_irq(int irq, void *__hcd);
 
index 2847f5a..8afa8c3 100644 (file)
 
 #ifdef CONFIG_USER_EVENTS
 struct user_event_mm {
-       struct list_head        link;
+       struct list_head        mms_link;
        struct list_head        enablers;
        struct mm_struct        *mm;
+       /* Used for one-shot lists, protected by event_mutex */
        struct user_event_mm    *next;
        refcount_t              refcnt;
        refcount_t              tasks;
index e7c4487..367d538 100644 (file)
@@ -686,7 +686,10 @@ struct dtv_frontend_properties {
  * @id:                        Frontend ID
  * @exit:              Used to inform the DVB core that the frontend
  *                     thread should exit (usually, means that the hardware
- *                     got disconnected.
+ *                     got disconnected).
+ * @remove_mutex:      mutex that avoids a race condition between a callback
+ *                     called when the hardware is disconnected and the
+ *                     file_operations of dvb_frontend.
  */
 
 struct dvb_frontend {
@@ -704,6 +707,7 @@ struct dvb_frontend {
        int (*callback)(void *adapter_priv, int component, int cmd, int arg);
        int id;
        unsigned int exit;
+       struct mutex remove_mutex;
 };
 
 /**
index 9980b1d..4a921ea 100644 (file)
@@ -39,6 +39,9 @@ struct net_device;
  * @exit:              flag to indicate when the device is being removed.
  * @demux:             pointer to &struct dmx_demux.
  * @ioctl_mutex:       protect access to this struct.
+ * @remove_mutex:      mutex that avoids a race condition between a callback
+ *                     called when the hardware is disconnected and the
+ *                     file_operations of dvb_net.
  *
  * Currently, the core supports up to %DVB_NET_DEVICES_MAX (10) network
  * devices.
@@ -51,6 +54,7 @@ struct dvb_net {
        unsigned int exit:1;
        struct dmx_demux *demux;
        struct mutex ioctl_mutex;
+       struct mutex remove_mutex;
 };
 
 /**
index 29d25c8..8958e5e 100644 (file)
@@ -194,6 +194,21 @@ struct dvb_device {
 };
 
 /**
+ * struct dvbdevfops_node - fops nodes registered in dvbdevfops_list
+ *
+ * @fops:              Dynamically allocated fops for ->owner registration
+ * @type:              type of dvb_device
+ * @template:          dvb_device used for registration
+ * @list_head:         list_head for dvbdevfops_list
+ */
+struct dvbdevfops_node {
+       struct file_operations *fops;
+       enum dvb_device_type type;
+       const struct dvb_device *template;
+       struct list_head list_head;
+};
+
+/**
  * dvb_device_get - Increase dvb_device reference
  *
  * @dvbdev:    pointer to struct dvb_device
index cfd19e7..b325df0 100644 (file)
@@ -1119,6 +1119,7 @@ struct v4l2_subdev {
  * @vfh: pointer to &struct v4l2_fh
  * @state: pointer to &struct v4l2_subdev_state
  * @owner: module pointer to the owner of this file handle
+ * @client_caps: bitmask of ``V4L2_SUBDEV_CLIENT_CAP_*``
  */
 struct v4l2_subdev_fh {
        struct v4l2_fh vfh;
index 07df96c..872dcb9 100644 (file)
@@ -350,6 +350,7 @@ enum {
 enum {
        HCI_SETUP,
        HCI_CONFIG,
+       HCI_DEBUGFS_CREATED,
        HCI_AUTO_OFF,
        HCI_RFKILLED,
        HCI_MGMT,
index a6c8aee..9654567 100644 (file)
@@ -515,6 +515,7 @@ struct hci_dev {
        struct work_struct      cmd_sync_work;
        struct list_head        cmd_sync_work_list;
        struct mutex            cmd_sync_work_lock;
+       struct mutex            unregister_lock;
        struct work_struct      cmd_sync_cancel_work;
        struct work_struct      reenable_adv_work;
 
@@ -1201,7 +1202,8 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev,
                if (id != BT_ISO_QOS_CIS_UNSET && id != c->iso_qos.ucast.cis)
                        continue;
 
-               if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
+               /* Match destination address if set */
+               if (!ba || (ba_type == c->dst_type && !bacmp(&c->dst, ba))) {
                        rcu_read_unlock();
                        return c;
                }
@@ -1327,7 +1329,7 @@ int hci_le_create_cis(struct hci_conn *conn);
 
 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
                              u8 role);
-int hci_conn_del(struct hci_conn *conn);
+void hci_conn_del(struct hci_conn *conn);
 void hci_conn_hash_flush(struct hci_dev *hdev);
 void hci_conn_check_pending(struct hci_dev *hdev);
 
index 0efef2a..59955ac 100644 (file)
@@ -221,6 +221,7 @@ struct bonding {
        struct   bond_up_slave __rcu *usable_slaves;
        struct   bond_up_slave __rcu *all_slaves;
        bool     force_primary;
+       bool     notifier_ctx;
        s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
        int     (*recv_probe)(const struct sk_buff *, struct bonding *,
                              struct slave *);
index 3352b1a..2e26e43 100644 (file)
@@ -24,6 +24,7 @@ struct tls_handshake_args {
        struct socket           *ta_sock;
        tls_done_func_t         ta_done;
        void                    *ta_data;
+       const char              *ta_peername;
        unsigned int            ta_timeout_ms;
        key_serial_t            ta_keyring;
        key_serial_t            ta_my_cert;
index c3fffaa..acec504 100644 (file)
@@ -76,6 +76,7 @@ struct ipcm_cookie {
        __be32                  addr;
        int                     oif;
        struct ip_options_rcu   *opt;
+       __u8                    protocol;
        __u8                    ttl;
        __s16                   tos;
        char                    priority;
@@ -96,6 +97,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
        ipcm->sockc.tsflags = inet->sk.sk_tsflags;
        ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
        ipcm->addr = inet->inet_saddr;
+       ipcm->protocol = inet->inet_num;
 }
 
 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
index cd386aa..9eef199 100644 (file)
@@ -347,10 +347,8 @@ struct mana_tx_qp {
 struct mana_ethtool_stats {
        u64 stop_queue;
        u64 wake_queue;
-       u64 tx_cqes;
        u64 tx_cqe_err;
        u64 tx_cqe_unknown_type;
-       u64 rx_cqes;
        u64 rx_coalesced_err;
        u64 rx_cqe_unknown_type;
 };
index 3fa5774..f6a8ecc 100644 (file)
@@ -180,7 +180,7 @@ struct pneigh_entry {
        netdevice_tracker       dev_tracker;
        u32                     flags;
        u8                      protocol;
-       u                     key[];
+       u32                     key[];
 };
 
 /*
index 3cceb3e..5f2cfd8 100644 (file)
@@ -53,7 +53,7 @@ struct netns_sysctl_ipv6 {
        int seg6_flowlabel;
        u32 ioam6_id;
        u64 ioam6_id_wide;
-       bool skip_notify_on_dev_down;
+       u8 skip_notify_on_dev_down;
        u8 fib_notify_on_flag_change;
        u8 icmpv6_error_anycast_as_unicast;
 };
index 9fa291a..2b12725 100644 (file)
@@ -497,29 +497,6 @@ static inline struct fib6_nh *nexthop_fib6_nh(struct nexthop *nh)
        return NULL;
 }
 
-/* Variant of nexthop_fib6_nh().
- * Caller should either hold rcu_read_lock(), or RTNL.
- */
-static inline struct fib6_nh *nexthop_fib6_nh_bh(struct nexthop *nh)
-{
-       struct nh_info *nhi;
-
-       if (nh->is_group) {
-               struct nh_group *nh_grp;
-
-               nh_grp = rcu_dereference_rtnl(nh->nh_grp);
-               nh = nexthop_mpath_select(nh_grp, 0);
-               if (!nh)
-                       return NULL;
-       }
-
-       nhi = rcu_dereference_rtnl(nh->nh_info);
-       if (nhi->family == AF_INET6)
-               return &nhi->fib6_nh;
-
-       return NULL;
-}
-
 static inline struct net_device *fib6_info_nh_dev(struct fib6_info *f6i)
 {
        struct fib6_nh *fib6_nh;
index c8ec2f3..126f9e2 100644 (file)
@@ -399,22 +399,4 @@ static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
                page_pool_update_nid(pool, new_nid);
 }
 
-static inline void page_pool_ring_lock(struct page_pool *pool)
-       __acquires(&pool->ring.producer_lock)
-{
-       if (in_softirq())
-               spin_lock(&pool->ring.producer_lock);
-       else
-               spin_lock_bh(&pool->ring.producer_lock);
-}
-
-static inline void page_pool_ring_unlock(struct page_pool *pool)
-       __releases(&pool->ring.producer_lock)
-{
-       if (in_softirq())
-               spin_unlock(&pool->ring.producer_lock);
-       else
-               spin_unlock_bh(&pool->ring.producer_lock);
-}
-
 #endif /* _NET_PAGE_POOL_H */
index 9233ad3..bc77792 100644 (file)
 #define PING_HTABLE_SIZE       64
 #define PING_HTABLE_MASK       (PING_HTABLE_SIZE-1)
 
-/*
- * gid_t is either uint or ushort.  We want to pass it to
- * proc_dointvec_minmax(), so it must not be larger than MAX_INT
- */
-#define GID_T_MAX (((gid_t)~0U) >> 1)
+#define GID_T_MAX (((gid_t)~0U) - 1)
 
 /* Compatibility glue so we can support IPv6 when it's compiled as a module */
 struct pingv6_ops {
index f436688..5722931 100644 (file)
@@ -127,6 +127,8 @@ static inline void qdisc_run(struct Qdisc *q)
        }
 }
 
+extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
+
 /* Calculate maximal size of packet seen by hard_start_xmit
    routine of this device.
  */
index 308ef0a..30fe780 100644 (file)
@@ -23,9 +23,6 @@ static inline int rpl_init(void)
 static inline void rpl_exit(void) {}
 #endif
 
-/* Worst decompression memory usage ipv6 address (16) + pad 7 */
-#define IPV6_RPL_SRH_WORST_SWAP_SIZE (sizeof(struct in6_addr) + 7)
-
 size_t ipv6_rpl_srh_size(unsigned char n, unsigned char cmpri,
                         unsigned char cmpre);
 
index fab5ba3..27271f2 100644 (file)
@@ -545,7 +545,7 @@ static inline struct Qdisc *qdisc_root_bh(const struct Qdisc *qdisc)
 
 static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
 {
-       return qdisc->dev_queue->qdisc_sleeping;
+       return rcu_dereference_rtnl(qdisc->dev_queue->qdisc_sleeping);
 }
 
 static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
@@ -754,7 +754,9 @@ static inline bool qdisc_tx_changing(const struct net_device *dev)
 
        for (i = 0; i < dev->num_tx_queues; i++) {
                struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-               if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
+
+               if (rcu_access_pointer(txq->qdisc) !=
+                   rcu_access_pointer(txq->qdisc_sleeping))
                        return true;
        }
        return false;
index 656ea89..6f428a7 100644 (file)
@@ -336,6 +336,7 @@ struct sk_filter;
   *    @sk_cgrp_data: cgroup data for this cgroup
   *    @sk_memcg: this socket's memory cgroup association
   *    @sk_write_pending: a write to stream socket waits to start
+  *    @sk_wait_pending: number of threads blocked on this socket
   *    @sk_state_change: callback to indicate change in the state of the sock
   *    @sk_data_ready: callback to indicate there is data to be processed
   *    @sk_write_space: callback to indicate there is bf sending space available
@@ -428,6 +429,7 @@ struct sock {
        unsigned int            sk_napi_id;
 #endif
        int                     sk_rcvbuf;
+       int                     sk_wait_pending;
 
        struct sk_filter __rcu  *sk_filter;
        union {
@@ -1150,8 +1152,12 @@ static inline void sock_rps_record_flow(const struct sock *sk)
                 * OR   an additional socket flag
                 * [1] : sk_state and sk_prot are in the same cache line.
                 */
-               if (sk->sk_state == TCP_ESTABLISHED)
-                       sock_rps_record_flow_hash(sk->sk_rxhash);
+               if (sk->sk_state == TCP_ESTABLISHED) {
+                       /* This READ_ONCE() is paired with the WRITE_ONCE()
+                        * from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
+                        */
+                       sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
+               }
        }
 #endif
 }
@@ -1160,20 +1166,25 @@ static inline void sock_rps_save_rxhash(struct sock *sk,
                                        const struct sk_buff *skb)
 {
 #ifdef CONFIG_RPS
-       if (unlikely(sk->sk_rxhash != skb->hash))
-               sk->sk_rxhash = skb->hash;
+       /* The following WRITE_ONCE() is paired with the READ_ONCE()
+        * here, and another one in sock_rps_record_flow().
+        */
+       if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
+               WRITE_ONCE(sk->sk_rxhash, skb->hash);
 #endif
 }
 
 static inline void sock_rps_reset_rxhash(struct sock *sk)
 {
 #ifdef CONFIG_RPS
-       sk->sk_rxhash = 0;
+       /* Paired with READ_ONCE() in sock_rps_record_flow() */
+       WRITE_ONCE(sk->sk_rxhash, 0);
 #endif
 }
 
 #define sk_wait_event(__sk, __timeo, __condition, __wait)              \
        ({      int __rc;                                               \
+               __sk->sk_wait_pending++;                                \
                release_sock(__sk);                                     \
                __rc = __condition;                                     \
                if (!__rc) {                                            \
@@ -1183,6 +1194,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
                }                                                       \
                sched_annotate_sleep();                                 \
                lock_sock(__sk);                                        \
+               __sk->sk_wait_pending--;                                \
                __rc = __condition;                                     \
                __rc;                                                   \
        })
index 04a3164..5066e45 100644 (file)
@@ -632,6 +632,7 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb);
 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
 void tcp_fin(struct sock *sk);
 void tcp_check_space(struct sock *sk);
+void tcp_sack_compress_send_ack(struct sock *sk);
 
 /* tcp_timer.c */
 void tcp_init_xmit_timers(struct sock *);
@@ -1470,6 +1471,8 @@ static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
 }
 
 void tcp_cleanup_rbuf(struct sock *sk, int copied);
+void __tcp_cleanup_rbuf(struct sock *sk, int copied);
+
 
 /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
  * If 87.5 % (7/8) of the space has been consumed, we want to override
@@ -2326,6 +2329,14 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
 void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
 #endif /* CONFIG_BPF_SYSCALL */
 
+#ifdef CONFIG_INET
+void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
+#else
+static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
+{
+}
+#endif
+
 int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
                          struct sk_msg *msg, u32 bytes, int flags);
 #endif /* CONFIG_NET_SOCK_MSG */
index 6056ce5..596595c 100644 (file)
@@ -126,6 +126,7 @@ struct tls_strparser {
        u32 mark : 8;
        u32 stopped : 1;
        u32 copy_mode : 1;
+       u32 mixed_decrypted : 1;
        u32 msg_ready : 1;
 
        struct strp_msg stm;
index dbc47af..4f44f0b 100644 (file)
@@ -44,6 +44,9 @@ int hdac_bus_eml_sdw_power_down_unlocked(struct hdac_bus *bus, int sublink);
 
 int hdac_bus_eml_sdw_set_lsdiid(struct hdac_bus *bus, int sublink, int dev_num);
 
+int hdac_bus_eml_sdw_map_stream_ch(struct hdac_bus *bus, int sublink, int y,
+                                  int channel_mask, int stream_id, int dir);
+
 void hda_bus_ml_put_all(struct hdac_bus *bus);
 void hda_bus_ml_reset_losidv(struct hdac_bus *bus);
 int hda_bus_ml_resume(struct hdac_bus *bus);
@@ -51,6 +54,7 @@ int hda_bus_ml_suspend(struct hdac_bus *bus);
 
 struct hdac_ext_link *hdac_bus_eml_ssp_get_hlink(struct hdac_bus *bus);
 struct hdac_ext_link *hdac_bus_eml_dmic_get_hlink(struct hdac_bus *bus);
+struct hdac_ext_link *hdac_bus_eml_sdw_get_hlink(struct hdac_bus *bus);
 
 struct mutex *hdac_bus_eml_get_mutex(struct hdac_bus *bus, bool alt, int elid);
 
@@ -144,6 +148,13 @@ hdac_bus_eml_sdw_power_down_unlocked(struct hdac_bus *bus, int sublink) { return
 static inline int
 hdac_bus_eml_sdw_set_lsdiid(struct hdac_bus *bus, int sublink, int dev_num) { return 0; }
 
+static inline int
+hdac_bus_eml_sdw_map_stream_ch(struct hdac_bus *bus, int sublink, int y,
+                              int channel_mask, int stream_id, int dir)
+{
+       return 0;
+}
+
 static inline void hda_bus_ml_put_all(struct hdac_bus *bus) { }
 static inline void hda_bus_ml_reset_losidv(struct hdac_bus *bus) { }
 static inline int hda_bus_ml_resume(struct hdac_bus *bus) { return 0; }
@@ -155,6 +166,9 @@ hdac_bus_eml_ssp_get_hlink(struct hdac_bus *bus) { return NULL; }
 static inline struct hdac_ext_link *
 hdac_bus_eml_dmic_get_hlink(struct hdac_bus *bus) { return NULL; }
 
+static inline struct hdac_ext_link *
+hdac_bus_eml_sdw_get_hlink(struct hdac_bus *bus) { return NULL; }
+
 static inline struct mutex *
 hdac_bus_eml_get_mutex(struct hdac_bus *bus, bool alt, int elid) { return NULL; }
 
index b38fd25..5282790 100644 (file)
@@ -170,6 +170,7 @@ struct snd_soc_acpi_link_adr {
 /* Descriptor for SST ASoC machine driver */
 struct snd_soc_acpi_mach {
        u8 id[ACPI_ID_LEN];
+       const char *uid;
        const struct snd_soc_acpi_codecs *comp_ids;
        const u32 link_mask;
        const struct snd_soc_acpi_link_adr *links;
index 4d6ac76..ebd2475 100644 (file)
@@ -122,6 +122,10 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
 int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
                struct snd_soc_pcm_runtime *be, int stream);
 
+/* can this BE perform prepare */
+int snd_soc_dpcm_can_be_prepared(struct snd_soc_pcm_runtime *fe,
+                                struct snd_soc_pcm_runtime *be, int stream);
+
 /* is the current PCM operation for this FE ? */
 int snd_soc_dpcm_fe_can_update(struct snd_soc_pcm_runtime *fe, int stream);
 
index 2291181..4c15420 100644 (file)
@@ -562,12 +562,13 @@ struct iscsit_conn {
 #define LOGIN_FLAGS_READ_ACTIVE                2
 #define LOGIN_FLAGS_WRITE_ACTIVE       3
 #define LOGIN_FLAGS_CLOSED             4
+#define LOGIN_FLAGS_WORKER_RUNNING     5
        unsigned long           login_flags;
        struct delayed_work     login_work;
        struct iscsi_login      *login;
        struct timer_list       nopin_timer;
        struct timer_list       nopin_response_timer;
-       struct timer_list       transport_timer;
+       struct timer_list       login_timer;
        struct task_struct      *login_kworker;
        /* Spinlock used for add/deleting cmd's from conn_cmd_list */
        spinlock_t              cmd_lock;
@@ -576,6 +577,8 @@ struct iscsit_conn {
        spinlock_t              nopin_timer_lock;
        spinlock_t              response_queue_lock;
        spinlock_t              state_lock;
+       spinlock_t              login_timer_lock;
+       spinlock_t              login_worker_lock;
        /* libcrypto RX and TX contexts for crc32c */
        struct ahash_request    *conn_rx_hash;
        struct ahash_request    *conn_tx_hash;
@@ -792,7 +795,6 @@ struct iscsi_np {
        enum np_thread_state_table np_thread_state;
        bool                    enabled;
        atomic_t                np_reset_count;
-       enum iscsi_timer_flags_table np_login_timer_flags;
        u32                     np_exports;
        enum np_flags_table     np_flags;
        spinlock_t              np_thread_lock;
@@ -800,7 +802,6 @@ struct iscsi_np {
        struct socket           *np_socket;
        struct sockaddr_storage np_sockaddr;
        struct task_struct      *np_thread;
-       struct timer_list       np_login_timer;
        void                    *np_context;
        struct iscsit_transport *np_transport;
        struct list_head        np_list;
index 1bb11a6..c994ff5 100644 (file)
@@ -1035,6 +1035,7 @@ enum bpf_attach_type {
        BPF_TRACE_KPROBE_MULTI,
        BPF_LSM_CGROUP,
        BPF_STRUCT_OPS,
+       BPF_NETFILTER,
        __MAX_BPF_ATTACH_TYPE
 };
 
index 1de4d0b..3d7ea58 100644 (file)
@@ -44,6 +44,7 @@ enum {
        HANDSHAKE_A_ACCEPT_AUTH_MODE,
        HANDSHAKE_A_ACCEPT_PEER_IDENTITY,
        HANDSHAKE_A_ACCEPT_CERTIFICATE,
+       HANDSHAKE_A_ACCEPT_PEERNAME,
 
        __HANDSHAKE_A_ACCEPT_MAX,
        HANDSHAKE_A_ACCEPT_MAX = (__HANDSHAKE_A_ACCEPT_MAX - 1)
index 4b7f2df..e682ab6 100644 (file)
@@ -163,6 +163,7 @@ struct in_addr {
 #define IP_MULTICAST_ALL               49
 #define IP_UNICAST_IF                  50
 #define IP_LOCAL_PORT_RANGE            51
+#define IP_PROTOCOL                    52
 
 #define MCAST_EXCLUDE  0
 #define MCAST_INCLUDE  1
index f29899b..4bf9c4f 100644 (file)
@@ -66,7 +66,8 @@ enum skl_ch_cfg {
        SKL_CH_CFG_DUAL_MONO = 9,
        SKL_CH_CFG_I2S_DUAL_STEREO_0 = 10,
        SKL_CH_CFG_I2S_DUAL_STEREO_1 = 11,
-       SKL_CH_CFG_4_CHANNEL = 12,
+       SKL_CH_CFG_7_1 = 12,
+       SKL_CH_CFG_4_CHANNEL = SKL_CH_CFG_7_1,
        SKL_CH_CFG_INVALID
 };
 
index bbc3787..e9ec7e4 100644 (file)
 #define SOF_TKN_CAVS_AUDIO_FORMAT_IN_INTERLEAVING_STYLE        1906
 #define SOF_TKN_CAVS_AUDIO_FORMAT_IN_FMT_CFG   1907
 #define SOF_TKN_CAVS_AUDIO_FORMAT_IN_SAMPLE_TYPE       1908
-#define SOF_TKN_CAVS_AUDIO_FORMAT_PIN_INDEX            1909
+#define SOF_TKN_CAVS_AUDIO_FORMAT_INPUT_PIN_INDEX      1909
 /* intentional token numbering discontinuity, reserved for future use */
 #define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_RATE     1930
 #define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_BIT_DEPTH        1931
 #define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_INTERLEAVING_STYLE       1936
 #define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_FMT_CFG  1937
 #define SOF_TKN_CAVS_AUDIO_FORMAT_OUT_SAMPLE_TYPE      1938
+#define SOF_TKN_CAVS_AUDIO_FORMAT_OUTPUT_PIN_INDEX     1939
 /* intentional token numbering discontinuity, reserved for future use */
 #define SOF_TKN_CAVS_AUDIO_FORMAT_IBS          1970
 #define SOF_TKN_CAVS_AUDIO_FORMAT_OBS          1971
index f755329..df1d04f 100644 (file)
@@ -1133,7 +1133,7 @@ static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba)
        ({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); })
 #endif
 
-static inline size_t sizeof_utp_transfer_cmd_desc(const struct ufs_hba *hba)
+static inline size_t ufshcd_get_ucd_size(const struct ufs_hba *hba)
 {
        return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba);
 }
index 9aa74d2..89bff20 100644 (file)
@@ -25,10 +25,6 @@ int io_epoll_ctl_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
        struct io_epoll *epoll = io_kiocb_to_cmd(req, struct io_epoll);
 
-       pr_warn_once("%s: epoll_ctl support in io_uring is deprecated and will "
-                    "be removed in a future Linux kernel version.\n",
-                    current->comm);
-
        if (sqe->buf_index || sqe->splice_fd_in)
                return -EINVAL;
 
index 9db4bc1..5e329e3 100644 (file)
@@ -255,9 +255,13 @@ static int io_sq_thread(void *data)
                        sqt_spin = true;
 
                if (sqt_spin || !time_after(jiffies, timeout)) {
-                       cond_resched();
                        if (sqt_spin)
                                timeout = jiffies + sqd->sq_thread_idle;
+                       if (unlikely(need_resched())) {
+                               mutex_unlock(&sqd->lock);
+                               cond_resched();
+                               mutex_lock(&sqd->lock);
+                       }
                        continue;
                }
 
index 00c253b..9901efe 100644 (file)
@@ -1215,7 +1215,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
 
        ret = htab_lock_bucket(htab, b, hash, &flags);
        if (ret)
-               return ret;
+               goto err_lock_bucket;
 
        l_old = lookup_elem_raw(head, hash, key, key_size);
 
@@ -1236,6 +1236,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
 err:
        htab_unlock_bucket(htab, b, hash, flags);
 
+err_lock_bucket:
        if (ret)
                htab_lru_push_free(htab, l_new);
        else if (l_old)
@@ -1338,7 +1339,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
 
        ret = htab_lock_bucket(htab, b, hash, &flags);
        if (ret)
-               return ret;
+               goto err_lock_bucket;
 
        l_old = lookup_elem_raw(head, hash, key, key_size);
 
@@ -1361,6 +1362,7 @@ static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
        ret = 0;
 err:
        htab_unlock_bucket(htab, b, hash, flags);
+err_lock_bucket:
        if (l_new)
                bpf_lru_push_free(&htab->lru, &l_new->lru_node);
        return ret;
index 2c5c64c..cd5eafa 100644 (file)
@@ -69,9 +69,13 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
        /* Misc members not needed in bpf_map_meta_equal() check. */
        inner_map_meta->ops = inner_map->ops;
        if (inner_map->ops == &array_map_ops) {
+               struct bpf_array *inner_array_meta =
+                       container_of(inner_map_meta, struct bpf_array, map);
+               struct bpf_array *inner_array = container_of(inner_map, struct bpf_array, map);
+
+               inner_array_meta->index_mask = inner_array->index_mask;
+               inner_array_meta->elem_size = inner_array->elem_size;
                inner_map_meta->bypass_spec_v1 = inner_map->bypass_spec_v1;
-               container_of(inner_map_meta, struct bpf_array, map)->index_mask =
-                    container_of(inner_map, struct bpf_array, map)->index_mask;
        }
 
        fdput(f);
index d9c9f45..8a26cd8 100644 (file)
@@ -859,4 +859,4 @@ static int __init bpf_offload_init(void)
        return rhashtable_init(&offdevs, &offdevs_params);
 }
 
-late_initcall(bpf_offload_init);
+core_initcall(bpf_offload_init);
index 14f39c1..0c21d0d 100644 (file)
@@ -2433,6 +2433,10 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
                default:
                        return -EINVAL;
                }
+       case BPF_PROG_TYPE_NETFILTER:
+               if (expected_attach_type == BPF_NETFILTER)
+                       return 0;
+               return -EINVAL;
        case BPF_PROG_TYPE_SYSCALL:
        case BPF_PROG_TYPE_EXT:
                if (expected_attach_type)
@@ -4590,7 +4594,12 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
 
        switch (prog->type) {
        case BPF_PROG_TYPE_EXT:
+               break;
        case BPF_PROG_TYPE_NETFILTER:
+               if (attr->link_create.attach_type != BPF_NETFILTER) {
+                       ret = -EINVAL;
+                       goto out;
+               }
                break;
        case BPF_PROG_TYPE_PERF_EVENT:
        case BPF_PROG_TYPE_TRACEPOINT:
index fbcf5a4..5871aa7 100644 (file)
@@ -17033,7 +17033,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
                                        insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH,
                                                                        insn->dst_reg,
                                                                        shift);
-                               insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg,
+                               insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg,
                                                                (1ULL << size * 8) - 1);
                        }
                }
index aeef06c..5407241 100644 (file)
@@ -108,7 +108,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
 
        cgroup_lock();
 
-       percpu_down_write(&cgroup_threadgroup_rwsem);
+       cgroup_attach_lock(true);
 
        /* all tasks in @from are being moved, all csets are source */
        spin_lock_irq(&css_set_lock);
@@ -144,7 +144,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
        } while (task && !ret);
 out_err:
        cgroup_migrate_finish(&mgctx);
-       percpu_up_write(&cgroup_threadgroup_rwsem);
+       cgroup_attach_unlock(true);
        cgroup_unlock();
        return ret;
 }
index 625d748..245cf62 100644 (file)
@@ -6486,19 +6486,18 @@ err:
 static void cgroup_css_set_put_fork(struct kernel_clone_args *kargs)
        __releases(&cgroup_threadgroup_rwsem) __releases(&cgroup_mutex)
 {
+       struct cgroup *cgrp = kargs->cgrp;
+       struct css_set *cset = kargs->cset;
+
        cgroup_threadgroup_change_end(current);
 
-       if (kargs->flags & CLONE_INTO_CGROUP) {
-               struct cgroup *cgrp = kargs->cgrp;
-               struct css_set *cset = kargs->cset;
+       if (cset) {
+               put_css_set(cset);
+               kargs->cset = NULL;
+       }
 
+       if (kargs->flags & CLONE_INTO_CGROUP) {
                cgroup_unlock();
-
-               if (cset) {
-                       put_css_set(cset);
-                       kargs->cset = NULL;
-               }
-
                if (cgrp) {
                        cgroup_put(cgrp);
                        kargs->cgrp = NULL;
index 34b90e2..edb50b4 100644 (file)
@@ -411,7 +411,10 @@ static void coredump_task_exit(struct task_struct *tsk)
        tsk->flags |= PF_POSTCOREDUMP;
        core_state = tsk->signal->core_state;
        spin_unlock_irq(&tsk->sighand->siglock);
-       if (core_state) {
+
+       /* The vhost_worker does not particpate in coredumps */
+       if (core_state &&
+           ((tsk->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)) {
                struct core_thread self;
 
                self.task = current;
index ed4e01d..41c9641 100644 (file)
@@ -627,6 +627,7 @@ void free_task(struct task_struct *tsk)
        arch_release_task_struct(tsk);
        if (tsk->flags & PF_KTHREAD)
                free_kthread_struct(tsk);
+       bpf_task_storage_free(tsk);
        free_task_struct(tsk);
 }
 EXPORT_SYMBOL(free_task);
@@ -979,7 +980,6 @@ void __put_task_struct(struct task_struct *tsk)
        cgroup_free(tsk);
        task_numa_free(tsk, true);
        security_task_free(tsk);
-       bpf_task_storage_free(tsk);
        exit_creds(tsk);
        delayacct_tsk_free(tsk);
        put_signal_struct(tsk->signal);
@@ -2336,16 +2336,16 @@ __latent_entropy struct task_struct *copy_process(
        p->flags &= ~PF_KTHREAD;
        if (args->kthread)
                p->flags |= PF_KTHREAD;
-       if (args->user_worker)
-               p->flags |= PF_USER_WORKER;
-       if (args->io_thread) {
+       if (args->user_worker) {
                /*
-                * Mark us an IO worker, and block any signal that isn't
+                * Mark us a user worker, and block any signal that isn't
                 * fatal or STOP
                 */
-               p->flags |= PF_IO_WORKER;
+               p->flags |= PF_USER_WORKER;
                siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
        }
+       if (args->io_thread)
+               p->flags |= PF_IO_WORKER;
 
        if (args->name)
                strscpy_pad(p->comm, args->name, sizeof(p->comm));
@@ -2517,9 +2517,6 @@ __latent_entropy struct task_struct *copy_process(
        if (retval)
                goto bad_fork_cleanup_io;
 
-       if (args->ignore_signals)
-               ignore_signals(p);
-
        stackleak_task_init(p);
 
        if (pid != &init_struct_pid) {
index 7a97bcb..b4c31a5 100644 (file)
@@ -542,7 +542,7 @@ fail:
        return ret;
 }
 
-#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
+#if defined(CONFIG_PCI_MSI_ARCH_FALLBACKS) || defined(CONFIG_PCI_XEN)
 /**
  * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
  * @dev:       The device (PCI, platform etc) which will get sysfs entries
@@ -574,7 +574,7 @@ void msi_device_destroy_sysfs(struct device *dev)
        msi_for_each_desc(desc, dev, MSI_DESC_ALL)
                msi_sysfs_remove_desc(dev, desc);
 }
-#endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */
+#endif /* CONFIG_PCI_MSI_ARCH_FALLBACK || CONFIG_PCI_XEN */
 #else /* CONFIG_SYSFS */
 static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
 static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }
index dcd1d5b..4dfd2f3 100644 (file)
@@ -2263,6 +2263,9 @@ static inline bool usage_match(struct lock_list *entry, void *mask)
 
 static inline bool usage_skip(struct lock_list *entry, void *mask)
 {
+       if (entry->class->lock_type == LD_LOCK_NORMAL)
+               return false;
+
        /*
         * Skip local_lock() for irq inversion detection.
         *
@@ -2289,14 +2292,16 @@ static inline bool usage_skip(struct lock_list *entry, void *mask)
         * As a result, we will skip local_lock(), when we search for irq
         * inversion bugs.
         */
-       if (entry->class->lock_type == LD_LOCK_PERCPU) {
-               if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
-                       return false;
+       if (entry->class->lock_type == LD_LOCK_PERCPU &&
+           DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
+               return false;
 
-               return true;
-       }
+       /*
+        * Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually
+        * a lock and only used to override the wait_type.
+        */
 
-       return false;
+       return true;
 }
 
 /*
@@ -4768,7 +4773,8 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
 
        for (; depth < curr->lockdep_depth; depth++) {
                struct held_lock *prev = curr->held_locks + depth;
-               u8 prev_inner = hlock_class(prev)->wait_type_inner;
+               struct lock_class *class = hlock_class(prev);
+               u8 prev_inner = class->wait_type_inner;
 
                if (prev_inner) {
                        /*
@@ -4778,6 +4784,14 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
                         * Also due to trylocks.
                         */
                        curr_inner = min(curr_inner, prev_inner);
+
+                       /*
+                        * Allow override for annotations -- this is typically
+                        * only valid/needed for code that only exists when
+                        * CONFIG_PREEMPT_RT=n.
+                        */
+                       if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE))
+                               curr_inner = prev_inner;
                }
        }
 
index e97232b..8a5d6d6 100644 (file)
@@ -257,7 +257,7 @@ static ssize_t module_zstd_decompress(struct load_info *info,
        do {
                struct page *page = module_get_next_page(info);
 
-               if (!IS_ERR(page)) {
+               if (IS_ERR(page)) {
                        retval = PTR_ERR(page);
                        goto out;
                }
index 044aa2c..4e2cf78 100644 (file)
@@ -1521,14 +1521,14 @@ static void __layout_sections(struct module *mod, struct load_info *info, bool i
                MOD_RODATA,
                MOD_RO_AFTER_INIT,
                MOD_DATA,
-               MOD_INVALID,    /* This is needed to match the masks array */
+               MOD_DATA,
        };
        static const int init_m_to_mem_type[] = {
                MOD_INIT_TEXT,
                MOD_INIT_RODATA,
                MOD_INVALID,
                MOD_INIT_DATA,
-               MOD_INVALID,    /* This is needed to match the masks array */
+               MOD_INIT_DATA,
        };
 
        for (m = 0; m < ARRAY_SIZE(masks); ++m) {
index ad7b6ad..6ab2c94 100644 (file)
@@ -276,6 +276,7 @@ static ssize_t read_file_mod_stats(struct file *file, char __user *user_buf,
        struct mod_fail_load *mod_fail;
        unsigned int len, size, count_failed = 0;
        char *buf;
+       int ret;
        u32 live_mod_count, fkreads, fdecompress, fbecoming, floads;
        unsigned long total_size, text_size, ikread_bytes, ibecoming_bytes,
                idecompress_bytes, imod_bytes, total_virtual_lost;
@@ -390,8 +391,9 @@ static ssize_t read_file_mod_stats(struct file *file, char __user *user_buf,
 out_unlock:
        mutex_unlock(&module_mutex);
 out:
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
        kfree(buf);
-        return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+       return ret;
 }
 #undef MAX_PREAMBLE
 #undef MAX_FAILED_MOD_PRINT
index 8f6330f..2547fa7 100644 (file)
@@ -1368,7 +1368,9 @@ int zap_other_threads(struct task_struct *p)
 
        while_each_thread(p, t) {
                task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
-               count++;
+               /* Don't require de_thread to wait for the vhost_worker */
+               if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
+                       count++;
 
                /* Don't bother with already dead threads */
                if (t->exit_state)
@@ -2861,11 +2863,11 @@ relock:
                }
 
                /*
-                * PF_IO_WORKER threads will catch and exit on fatal signals
+                * PF_USER_WORKER threads will catch and exit on fatal signals
                 * themselves. They have cleanup that must be performed, so
                 * we cannot call do_exit() on their behalf.
                 */
-               if (current->flags & PF_IO_WORKER)
+               if (current->flags & PF_USER_WORKER)
                        goto out;
 
                /*
index 9a050e3..1f4b07d 100644 (file)
@@ -900,13 +900,23 @@ static const struct bpf_func_proto bpf_send_signal_thread_proto = {
 
 BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
 {
+       struct path copy;
        long len;
        char *p;
 
        if (!sz)
                return 0;
 
-       p = d_path(path, buf, sz);
+       /*
+        * The path pointer is verified as trusted and safe to use,
+        * but let's double check it's valid anyway to workaround
+        * potentially broken verifier.
+        */
+       len = copy_from_kernel_nofault(&copy, path, sizeof(*path));
+       if (len < 0)
+               return len;
+
+       p = d_path(&copy, buf, sz);
        if (IS_ERR(p)) {
                len = PTR_ERR(p);
        } else {
index 9abb390..18d3684 100644 (file)
 struct fprobe_rethook_node {
        struct rethook_node node;
        unsigned long entry_ip;
+       unsigned long entry_parent_ip;
        char data[];
 };
 
-static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
-                          struct ftrace_ops *ops, struct ftrace_regs *fregs)
+static inline void __fprobe_handler(unsigned long ip, unsigned long parent_ip,
+                       struct ftrace_ops *ops, struct ftrace_regs *fregs)
 {
        struct fprobe_rethook_node *fpr;
        struct rethook_node *rh = NULL;
        struct fprobe *fp;
        void *entry_data = NULL;
-       int bit, ret;
+       int ret = 0;
 
        fp = container_of(ops, struct fprobe, ops);
-       if (fprobe_disabled(fp))
-               return;
-
-       bit = ftrace_test_recursion_trylock(ip, parent_ip);
-       if (bit < 0) {
-               fp->nmissed++;
-               return;
-       }
 
        if (fp->exit_handler) {
                rh = rethook_try_get(fp->rethook);
                if (!rh) {
                        fp->nmissed++;
-                       goto out;
+                       return;
                }
                fpr = container_of(rh, struct fprobe_rethook_node, node);
                fpr->entry_ip = ip;
+               fpr->entry_parent_ip = parent_ip;
                if (fp->entry_data_size)
                        entry_data = fpr->data;
        }
@@ -61,23 +55,60 @@ static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
                else
                        rethook_hook(rh, ftrace_get_regs(fregs), true);
        }
-out:
+}
+
+static void fprobe_handler(unsigned long ip, unsigned long parent_ip,
+               struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+       struct fprobe *fp;
+       int bit;
+
+       fp = container_of(ops, struct fprobe, ops);
+       if (fprobe_disabled(fp))
+               return;
+
+       /* recursion detection has to go before any traceable function and
+        * all functions before this point should be marked as notrace
+        */
+       bit = ftrace_test_recursion_trylock(ip, parent_ip);
+       if (bit < 0) {
+               fp->nmissed++;
+               return;
+       }
+       __fprobe_handler(ip, parent_ip, ops, fregs);
        ftrace_test_recursion_unlock(bit);
+
 }
 NOKPROBE_SYMBOL(fprobe_handler);
 
 static void fprobe_kprobe_handler(unsigned long ip, unsigned long parent_ip,
                                  struct ftrace_ops *ops, struct ftrace_regs *fregs)
 {
-       struct fprobe *fp = container_of(ops, struct fprobe, ops);
+       struct fprobe *fp;
+       int bit;
+
+       fp = container_of(ops, struct fprobe, ops);
+       if (fprobe_disabled(fp))
+               return;
+
+       /* recursion detection has to go before any traceable function and
+        * all functions called before this point should be marked as notrace
+        */
+       bit = ftrace_test_recursion_trylock(ip, parent_ip);
+       if (bit < 0) {
+               fp->nmissed++;
+               return;
+       }
 
        if (unlikely(kprobe_running())) {
                fp->nmissed++;
                return;
        }
+
        kprobe_busy_begin();
-       fprobe_handler(ip, parent_ip, ops, fregs);
+       __fprobe_handler(ip, parent_ip, ops, fregs);
        kprobe_busy_end();
+       ftrace_test_recursion_unlock(bit);
 }
 
 static void fprobe_exit_handler(struct rethook_node *rh, void *data,
@@ -85,14 +116,26 @@ static void fprobe_exit_handler(struct rethook_node *rh, void *data,
 {
        struct fprobe *fp = (struct fprobe *)data;
        struct fprobe_rethook_node *fpr;
+       int bit;
 
        if (!fp || fprobe_disabled(fp))
                return;
 
        fpr = container_of(rh, struct fprobe_rethook_node, node);
 
+       /*
+        * we need to assure no calls to traceable functions in-between the
+        * end of fprobe_handler and the beginning of fprobe_exit_handler.
+        */
+       bit = ftrace_test_recursion_trylock(fpr->entry_ip, fpr->entry_parent_ip);
+       if (bit < 0) {
+               fp->nmissed++;
+               return;
+       }
+
        fp->exit_handler(fp, fpr->entry_ip, regs,
                         fp->entry_data_size ? (void *)fpr->data : NULL);
+       ftrace_test_recursion_unlock(bit);
 }
 NOKPROBE_SYMBOL(fprobe_exit_handler);
 
index 32c3dfd..60f6cb2 100644 (file)
@@ -288,7 +288,7 @@ unsigned long rethook_trampoline_handler(struct pt_regs *regs,
         * These loops must be protected from rethook_free_rcu() because those
         * are accessing 'rhn->rethook'.
         */
-       preempt_disable();
+       preempt_disable_notrace();
 
        /*
         * Run the handler on the shadow stack. Do not unlink the list here because
@@ -321,7 +321,7 @@ unsigned long rethook_trampoline_handler(struct pt_regs *regs,
                first = first->next;
                rethook_recycle(rhn);
        }
-       preempt_enable();
+       preempt_enable_notrace();
 
        return correct_ret_addr;
 }
index ebc5978..64a4dde 100644 (file)
@@ -60,6 +60,7 @@
  */
 bool ring_buffer_expanded;
 
+#ifdef CONFIG_FTRACE_STARTUP_TEST
 /*
  * We need to change this state when a selftest is running.
  * A selftest will lurk into the ring-buffer to count the
@@ -75,7 +76,6 @@ static bool __read_mostly tracing_selftest_running;
  */
 bool __read_mostly tracing_selftest_disabled;
 
-#ifdef CONFIG_FTRACE_STARTUP_TEST
 void __init disable_tracing_selftest(const char *reason)
 {
        if (!tracing_selftest_disabled) {
@@ -83,6 +83,9 @@ void __init disable_tracing_selftest(const char *reason)
                pr_info("Ftrace startup test is disabled due to %s\n", reason);
        }
 }
+#else
+#define tracing_selftest_running       0
+#define tracing_selftest_disabled      0
 #endif
 
 /* Pipe tracepoints to printk */
@@ -1051,7 +1054,10 @@ int __trace_array_puts(struct trace_array *tr, unsigned long ip,
        if (!(tr->trace_flags & TRACE_ITER_PRINTK))
                return 0;
 
-       if (unlikely(tracing_selftest_running || tracing_disabled))
+       if (unlikely(tracing_selftest_running && tr == &global_trace))
+               return 0;
+
+       if (unlikely(tracing_disabled))
                return 0;
 
        alloc = sizeof(*entry) + size + 2; /* possible \n added */
@@ -2041,6 +2047,24 @@ static int run_tracer_selftest(struct tracer *type)
        return 0;
 }
 
+static int do_run_tracer_selftest(struct tracer *type)
+{
+       int ret;
+
+       /*
+        * Tests can take a long time, especially if they are run one after the
+        * other, as does happen during bootup when all the tracers are
+        * registered. This could cause the soft lockup watchdog to trigger.
+        */
+       cond_resched();
+
+       tracing_selftest_running = true;
+       ret = run_tracer_selftest(type);
+       tracing_selftest_running = false;
+
+       return ret;
+}
+
 static __init int init_trace_selftests(void)
 {
        struct trace_selftests *p, *n;
@@ -2092,6 +2116,10 @@ static inline int run_tracer_selftest(struct tracer *type)
 {
        return 0;
 }
+static inline int do_run_tracer_selftest(struct tracer *type)
+{
+       return 0;
+}
 #endif /* CONFIG_FTRACE_STARTUP_TEST */
 
 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
@@ -2127,8 +2155,6 @@ int __init register_tracer(struct tracer *type)
 
        mutex_lock(&trace_types_lock);
 
-       tracing_selftest_running = true;
-
        for (t = trace_types; t; t = t->next) {
                if (strcmp(type->name, t->name) == 0) {
                        /* already found */
@@ -2157,7 +2183,7 @@ int __init register_tracer(struct tracer *type)
        /* store the tracer for __set_tracer_option */
        type->flags->trace = type;
 
-       ret = run_tracer_selftest(type);
+       ret = do_run_tracer_selftest(type);
        if (ret < 0)
                goto out;
 
@@ -2166,7 +2192,6 @@ int __init register_tracer(struct tracer *type)
        add_tracer_options(&global_trace, type);
 
  out:
-       tracing_selftest_running = false;
        mutex_unlock(&trace_types_lock);
 
        if (ret || !default_bootup_tracer)
@@ -3490,7 +3515,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
        unsigned int trace_ctx;
        char *tbuffer;
 
-       if (tracing_disabled || tracing_selftest_running)
+       if (tracing_disabled)
                return 0;
 
        /* Don't pollute graph traces with trace_vprintk internals */
@@ -3538,6 +3563,9 @@ __printf(3, 0)
 int trace_array_vprintk(struct trace_array *tr,
                        unsigned long ip, const char *fmt, va_list args)
 {
+       if (tracing_selftest_running && tr == &global_trace)
+               return 0;
+
        return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
 }
 
@@ -5752,7 +5780,7 @@ static const char readme_msg[] =
        "\t    table using the key(s) and value(s) named, and the value of a\n"
        "\t    sum called 'hitcount' is incremented.  Keys and values\n"
        "\t    correspond to fields in the event's format description.  Keys\n"
-       "\t    can be any field, or the special string 'stacktrace'.\n"
+       "\t    can be any field, or the special string 'common_stacktrace'.\n"
        "\t    Compound keys consisting of up to two fields can be specified\n"
        "\t    by the 'keys' keyword.  Values must correspond to numeric\n"
        "\t    fields.  Sort keys consisting of up to two fields can be\n"
index 654ffa4..57e539d 100644 (file)
@@ -194,6 +194,8 @@ static int trace_define_generic_fields(void)
        __generic_field(int, common_cpu, FILTER_CPU);
        __generic_field(char *, COMM, FILTER_COMM);
        __generic_field(char *, comm, FILTER_COMM);
+       __generic_field(char *, stacktrace, FILTER_STACKTRACE);
+       __generic_field(char *, STACKTRACE, FILTER_STACKTRACE);
 
        return ret;
 }
index 486cca3..b97d3ad 100644 (file)
@@ -1364,7 +1364,7 @@ static const char *hist_field_name(struct hist_field *field,
                if (field->field)
                        field_name = field->field->name;
                else
-                       field_name = "stacktrace";
+                       field_name = "common_stacktrace";
        } else if (field->flags & HIST_FIELD_FL_HITCOUNT)
                field_name = "hitcount";
 
@@ -2367,7 +2367,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
                hist_data->enable_timestamps = true;
                if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
                        hist_data->attrs->ts_in_usecs = true;
-       } else if (strcmp(field_name, "stacktrace") == 0) {
+       } else if (strcmp(field_name, "common_stacktrace") == 0) {
                *flags |= HIST_FIELD_FL_STACKTRACE;
        } else if (strcmp(field_name, "common_cpu") == 0)
                *flags |= HIST_FIELD_FL_CPU;
@@ -2378,11 +2378,15 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
                if (!field || !field->size) {
                        /*
                         * For backward compatibility, if field_name
-                        * was "cpu", then we treat this the same as
-                        * common_cpu. This also works for "CPU".
+                        * was "cpu" or "stacktrace", then we treat this
+                        * the same as common_cpu and common_stacktrace
+                        * respectively. This also works for "CPU", and
+                        * "STACKTRACE".
                         */
                        if (field && field->filter_type == FILTER_CPU) {
                                *flags |= HIST_FIELD_FL_CPU;
+                       } else if (field && field->filter_type == FILTER_STACKTRACE) {
+                               *flags |= HIST_FIELD_FL_STACKTRACE;
                        } else {
                                hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
                                         errpos(field_name));
@@ -4238,13 +4242,19 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
                goto out;
        }
 
-       /* Some types cannot be a value */
-       if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
-                                HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
-                                HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
-                                HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) {
-               hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
-               ret = -EINVAL;
+       /* values and variables should not have some modifiers */
+       if (hist_field->flags & HIST_FIELD_FL_VAR) {
+               /* Variable */
+               if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
+                                        HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2))
+                       goto err;
+       } else {
+               /* Value */
+               if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
+                                        HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
+                                        HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
+                                        HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE))
+                       goto err;
        }
 
        hist_data->fields[val_idx] = hist_field;
@@ -4256,6 +4266,9 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
                ret = -EINVAL;
  out:
        return ret;
+ err:
+       hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
+       return -EINVAL;
 }
 
 static int create_val_field(struct hist_trigger_data *hist_data,
@@ -5385,7 +5398,7 @@ static void hist_trigger_print_key(struct seq_file *m,
                        if (key_field->field)
                                seq_printf(m, "%s.stacktrace", key_field->field->name);
                        else
-                               seq_puts(m, "stacktrace:\n");
+                               seq_puts(m, "common_stacktrace:\n");
                        hist_trigger_stacktrace_print(m,
                                                      key + key_field->offset,
                                                      HIST_STACKTRACE_DEPTH);
@@ -5968,7 +5981,7 @@ static int event_hist_trigger_print(struct seq_file *m,
                        if (field->field)
                                seq_printf(m, "%s.stacktrace", field->field->name);
                        else
-                               seq_puts(m, "stacktrace");
+                               seq_puts(m, "common_stacktrace");
                } else
                        hist_field_print(m, field);
        }
index b1ecd76..dbb1470 100644 (file)
@@ -96,12 +96,12 @@ struct user_event {
  * these to track enablement sites that are tied to an event.
  */
 struct user_event_enabler {
-       struct list_head        link;
+       struct list_head        mm_enablers_link;
        struct user_event       *event;
        unsigned long           addr;
 
        /* Track enable bit, flags, etc. Aligned for bitops. */
-       unsigned int            values;
+       unsigned long           values;
 };
 
 /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
@@ -116,7 +116,9 @@ struct user_event_enabler {
 /* Only duplicate the bit value */
 #define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK
 
-#define ENABLE_BITOPS(e) ((unsigned long *)&(e)->values)
+#define ENABLE_BITOPS(e) (&(e)->values)
+
+#define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
 
 /* Used for asynchronous faulting in of pages */
 struct user_event_enabler_fault {
@@ -153,7 +155,7 @@ struct user_event_file_info {
 #define VALIDATOR_REL (1 << 1)
 
 struct user_event_validator {
-       struct list_head        link;
+       struct list_head        user_event_link;
        int                     offset;
        int                     flags;
 };
@@ -259,7 +261,7 @@ error:
 
 static void user_event_enabler_destroy(struct user_event_enabler *enabler)
 {
-       list_del_rcu(&enabler->link);
+       list_del_rcu(&enabler->mm_enablers_link);
 
        /* No longer tracking the event via the enabler */
        refcount_dec(&enabler->event->refcnt);
@@ -423,9 +425,9 @@ static int user_event_enabler_write(struct user_event_mm *mm,
 
        /* Update bit atomically, user tracers must be atomic as well */
        if (enabler->event && enabler->event->status)
-               set_bit(enabler->values & ENABLE_VAL_BIT_MASK, ptr);
+               set_bit(ENABLE_BIT(enabler), ptr);
        else
-               clear_bit(enabler->values & ENABLE_VAL_BIT_MASK, ptr);
+               clear_bit(ENABLE_BIT(enabler), ptr);
 
        kunmap_local(kaddr);
        unpin_user_pages_dirty_lock(&page, 1, true);
@@ -437,11 +439,9 @@ static bool user_event_enabler_exists(struct user_event_mm *mm,
                                      unsigned long uaddr, unsigned char bit)
 {
        struct user_event_enabler *enabler;
-       struct user_event_enabler *next;
 
-       list_for_each_entry_safe(enabler, next, &mm->enablers, link) {
-               if (enabler->addr == uaddr &&
-                   (enabler->values & ENABLE_VAL_BIT_MASK) == bit)
+       list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
+               if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit)
                        return true;
        }
 
@@ -451,23 +451,36 @@ static bool user_event_enabler_exists(struct user_event_mm *mm,
 static void user_event_enabler_update(struct user_event *user)
 {
        struct user_event_enabler *enabler;
-       struct user_event_mm *mm = user_event_mm_get_all(user);
        struct user_event_mm *next;
+       struct user_event_mm *mm;
        int attempt;
 
+       lockdep_assert_held(&event_mutex);
+
+       /*
+        * We need to build a one-shot list of all the mms that have an
+        * enabler for the user_event passed in. This list is only valid
+        * while holding the event_mutex. The only reason for this is due
+        * to the global mm list being RCU protected and we use methods
+        * which can wait (mmap_read_lock and pin_user_pages_remote).
+        *
+        * NOTE: user_event_mm_get_all() increments the ref count of each
+        * mm that is added to the list to prevent removal timing windows.
+        * We must always put each mm after they are used, which may wait.
+        */
+       mm = user_event_mm_get_all(user);
+
        while (mm) {
                next = mm->next;
                mmap_read_lock(mm->mm);
-               rcu_read_lock();
 
-               list_for_each_entry_rcu(enabler, &mm->enablers, link) {
+               list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
                        if (enabler->event == user) {
                                attempt = 0;
                                user_event_enabler_write(mm, enabler, true, &attempt);
                        }
                }
 
-               rcu_read_unlock();
                mmap_read_unlock(mm->mm);
                user_event_mm_put(mm);
                mm = next;
@@ -495,7 +508,9 @@ static bool user_event_enabler_dup(struct user_event_enabler *orig,
        enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
 
        refcount_inc(&enabler->event->refcnt);
-       list_add_rcu(&enabler->link, &mm->enablers);
+
+       /* Enablers not exposed yet, RCU not required */
+       list_add(&enabler->mm_enablers_link, &mm->enablers);
 
        return true;
 }
@@ -514,6 +529,14 @@ static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
        struct user_event_mm *mm;
 
        /*
+        * We use the mm->next field to build a one-shot list from the global
+        * RCU protected list. To build this list the event_mutex must be held.
+        * This lets us build a list without requiring allocs that could fail
+        * when user based events are most wanted for diagnostics.
+        */
+       lockdep_assert_held(&event_mutex);
+
+       /*
         * We do not want to block fork/exec while enablements are being
         * updated, so we use RCU to walk the current tasks that have used
         * user_events ABI for 1 or more events. Each enabler found in each
@@ -525,23 +548,24 @@ static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
         */
        rcu_read_lock();
 
-       list_for_each_entry_rcu(mm, &user_event_mms, link)
-               list_for_each_entry_rcu(enabler, &mm->enablers, link)
+       list_for_each_entry_rcu(mm, &user_event_mms, mms_link) {
+               list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) {
                        if (enabler->event == user) {
                                mm->next = found;
                                found = user_event_mm_get(mm);
                                break;
                        }
+               }
+       }
 
        rcu_read_unlock();
 
        return found;
 }
 
-static struct user_event_mm *user_event_mm_create(struct task_struct *t)
+static struct user_event_mm *user_event_mm_alloc(struct task_struct *t)
 {
        struct user_event_mm *user_mm;
-       unsigned long flags;
 
        user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
 
@@ -553,12 +577,6 @@ static struct user_event_mm *user_event_mm_create(struct task_struct *t)
        refcount_set(&user_mm->refcnt, 1);
        refcount_set(&user_mm->tasks, 1);
 
-       spin_lock_irqsave(&user_event_mms_lock, flags);
-       list_add_rcu(&user_mm->link, &user_event_mms);
-       spin_unlock_irqrestore(&user_event_mms_lock, flags);
-
-       t->user_event_mm = user_mm;
-
        /*
         * The lifetime of the memory descriptor can slightly outlast
         * the task lifetime if a ref to the user_event_mm is taken
@@ -572,6 +590,17 @@ static struct user_event_mm *user_event_mm_create(struct task_struct *t)
        return user_mm;
 }
 
+static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&user_event_mms_lock, flags);
+       list_add_rcu(&user_mm->mms_link, &user_event_mms);
+       spin_unlock_irqrestore(&user_event_mms_lock, flags);
+
+       t->user_event_mm = user_mm;
+}
+
 static struct user_event_mm *current_user_event_mm(void)
 {
        struct user_event_mm *user_mm = current->user_event_mm;
@@ -579,10 +608,12 @@ static struct user_event_mm *current_user_event_mm(void)
        if (user_mm)
                goto inc;
 
-       user_mm = user_event_mm_create(current);
+       user_mm = user_event_mm_alloc(current);
 
        if (!user_mm)
                goto error;
+
+       user_event_mm_attach(user_mm, current);
 inc:
        refcount_inc(&user_mm->refcnt);
 error:
@@ -593,7 +624,7 @@ static void user_event_mm_destroy(struct user_event_mm *mm)
 {
        struct user_event_enabler *enabler, *next;
 
-       list_for_each_entry_safe(enabler, next, &mm->enablers, link)
+       list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
                user_event_enabler_destroy(enabler);
 
        mmdrop(mm->mm);
@@ -630,7 +661,7 @@ void user_event_mm_remove(struct task_struct *t)
 
        /* Remove the mm from the list, so it can no longer be enabled */
        spin_lock_irqsave(&user_event_mms_lock, flags);
-       list_del_rcu(&mm->link);
+       list_del_rcu(&mm->mms_link);
        spin_unlock_irqrestore(&user_event_mms_lock, flags);
 
        /*
@@ -670,7 +701,7 @@ void user_event_mm_remove(struct task_struct *t)
 
 void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
 {
-       struct user_event_mm *mm = user_event_mm_create(t);
+       struct user_event_mm *mm = user_event_mm_alloc(t);
        struct user_event_enabler *enabler;
 
        if (!mm)
@@ -678,16 +709,18 @@ void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
 
        rcu_read_lock();
 
-       list_for_each_entry_rcu(enabler, &old_mm->enablers, link)
+       list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) {
                if (!user_event_enabler_dup(enabler, mm))
                        goto error;
+       }
 
        rcu_read_unlock();
 
+       user_event_mm_attach(mm, t);
        return;
 error:
        rcu_read_unlock();
-       user_event_mm_remove(t);
+       user_event_mm_destroy(mm);
 }
 
 static bool current_user_event_enabler_exists(unsigned long uaddr,
@@ -748,7 +781,7 @@ retry:
         */
        if (!*write_result) {
                refcount_inc(&enabler->event->refcnt);
-               list_add_rcu(&enabler->link, &user_mm->enablers);
+               list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
        }
 
        mutex_unlock(&event_mutex);
@@ -904,8 +937,8 @@ static void user_event_destroy_validators(struct user_event *user)
        struct user_event_validator *validator, *next;
        struct list_head *head = &user->validators;
 
-       list_for_each_entry_safe(validator, next, head, link) {
-               list_del(&validator->link);
+       list_for_each_entry_safe(validator, next, head, user_event_link) {
+               list_del(&validator->user_event_link);
                kfree(validator);
        }
 }
@@ -959,7 +992,7 @@ add_validator:
        validator->offset = offset;
 
        /* Want sequential access when validating */
-       list_add_tail(&validator->link, &user->validators);
+       list_add_tail(&validator->user_event_link, &user->validators);
 
 add_field:
        field->type = type;
@@ -1349,7 +1382,7 @@ static int user_event_validate(struct user_event *user, void *data, int len)
        void *pos, *end = data + len;
        u32 loc, offset, size;
 
-       list_for_each_entry(validator, head, link) {
+       list_for_each_entry(validator, head, user_event_link) {
                pos = data + validator->offset;
 
                /* Already done min_size check, no bounds check here */
@@ -2270,9 +2303,9 @@ static long user_events_ioctl_unreg(unsigned long uarg)
         */
        mutex_lock(&event_mutex);
 
-       list_for_each_entry_safe(enabler, next, &mm->enablers, link)
+       list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) {
                if (enabler->addr == reg.disable_addr &&
-                   (enabler->values & ENABLE_VAL_BIT_MASK) == reg.disable_bit) {
+                   ENABLE_BIT(enabler) == reg.disable_bit) {
                        set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
 
                        if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
@@ -2281,6 +2314,7 @@ static long user_events_ioctl_unreg(unsigned long uarg)
                        /* Removed at least one */
                        ret = 0;
                }
+       }
 
        mutex_unlock(&event_mutex);
 
index efbbec2..e97e3fa 100644 (file)
@@ -1652,6 +1652,8 @@ static enum hrtimer_restart timerlat_irq(struct hrtimer *timer)
                        osnoise_stop_tracing();
                        notify_new_max_latency(diff);
 
+                       wake_up_process(tlat->kthread);
+
                        return HRTIMER_NORESTART;
                }
        }
index ef8ed3b..6a4ecfb 100644 (file)
@@ -308,7 +308,7 @@ trace_probe_primary_from_call(struct trace_event_call *call)
 {
        struct trace_probe_event *tpe = trace_probe_event_from_call(call);
 
-       return list_first_entry(&tpe->probes, struct trace_probe, list);
+       return list_first_entry_or_null(&tpe->probes, struct trace_probe, list);
 }
 
 static inline struct list_head *trace_probe_probe_list(struct trace_probe *tp)
index a931d9a..5295904 100644 (file)
@@ -848,6 +848,12 @@ trace_selftest_startup_function_graph(struct tracer *trace,
        }
 
 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+       /*
+        * These tests can take some time to run. Make sure on non PREEMPT
+        * kernels, we do not trigger the softlockup detector.
+        */
+       cond_resched();
+
        tracing_reset_online_cpus(&tr->array_buffer);
        set_graph_array(tr);
 
@@ -869,6 +875,8 @@ trace_selftest_startup_function_graph(struct tracer *trace,
        if (ret)
                goto out;
 
+       cond_resched();
+
        ret = register_ftrace_graph(&fgraph_ops);
        if (ret) {
                warn_failed_init_tracer(trace, ret);
@@ -891,6 +899,8 @@ trace_selftest_startup_function_graph(struct tracer *trace,
        if (ret)
                goto out;
 
+       cond_resched();
+
        tracing_start();
 
        if (!ret && !count) {
index b7cbd66..f80d5c5 100644 (file)
@@ -12,58 +12,88 @@ enum vhost_task_flags {
        VHOST_TASK_FLAGS_STOP,
 };
 
+struct vhost_task {
+       bool (*fn)(void *data);
+       void *data;
+       struct completion exited;
+       unsigned long flags;
+       struct task_struct *task;
+};
+
 static int vhost_task_fn(void *data)
 {
        struct vhost_task *vtsk = data;
-       int ret;
+       bool dead = false;
+
+       for (;;) {
+               bool did_work;
+
+               /* mb paired w/ vhost_task_stop */
+               if (test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags))
+                       break;
+
+               if (!dead && signal_pending(current)) {
+                       struct ksignal ksig;
+                       /*
+                        * Calling get_signal will block in SIGSTOP,
+                        * or clear fatal_signal_pending, but remember
+                        * what was set.
+                        *
+                        * This thread won't actually exit until all
+                        * of the file descriptors are closed, and
+                        * the release function is called.
+                        */
+                       dead = get_signal(&ksig);
+                       if (dead)
+                               clear_thread_flag(TIF_SIGPENDING);
+               }
+
+               did_work = vtsk->fn(vtsk->data);
+               if (!did_work) {
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       schedule();
+               }
+       }
 
-       ret = vtsk->fn(vtsk->data);
        complete(&vtsk->exited);
-       do_exit(ret);
+       do_exit(0);
+}
+
+/**
+ * vhost_task_wake - wakeup the vhost_task
+ * @vtsk: vhost_task to wake
+ *
+ * wake up the vhost_task worker thread
+ */
+void vhost_task_wake(struct vhost_task *vtsk)
+{
+       wake_up_process(vtsk->task);
 }
+EXPORT_SYMBOL_GPL(vhost_task_wake);
 
 /**
  * vhost_task_stop - stop a vhost_task
  * @vtsk: vhost_task to stop
  *
- * Callers must call vhost_task_should_stop and return from their worker
- * function when it returns true;
+ * vhost_task_fn ensures the worker thread exits after
+ * VHOST_TASK_FLAGS_SOP becomes true.
  */
 void vhost_task_stop(struct vhost_task *vtsk)
 {
-       pid_t pid = vtsk->task->pid;
-
        set_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
-       wake_up_process(vtsk->task);
+       vhost_task_wake(vtsk);
        /*
         * Make sure vhost_task_fn is no longer accessing the vhost_task before
-        * freeing it below. If userspace crashed or exited without closing,
-        * then the vhost_task->task could already be marked dead so
-        * kernel_wait will return early.
+        * freeing it below.
         */
        wait_for_completion(&vtsk->exited);
-       /*
-        * If we are just closing/removing a device and the parent process is
-        * not exiting then reap the task.
-        */
-       kernel_wait4(pid, NULL, __WCLONE, NULL);
        kfree(vtsk);
 }
 EXPORT_SYMBOL_GPL(vhost_task_stop);
 
 /**
- * vhost_task_should_stop - should the vhost task return from the work function
- * @vtsk: vhost_task to stop
- */
-bool vhost_task_should_stop(struct vhost_task *vtsk)
-{
-       return test_bit(VHOST_TASK_FLAGS_STOP, &vtsk->flags);
-}
-EXPORT_SYMBOL_GPL(vhost_task_should_stop);
-
-/**
- * vhost_task_create - create a copy of a process to be used by the kernel
- * @fn: thread stack
+ * vhost_task_create - create a copy of a task to be used by the kernel
+ * @fn: vhost worker function
  * @arg: data to be passed to fn
  * @name: the thread's name
  *
@@ -71,17 +101,17 @@ EXPORT_SYMBOL_GPL(vhost_task_should_stop);
  * failure. The returned task is inactive, and the caller must fire it up
  * through vhost_task_start().
  */
-struct vhost_task *vhost_task_create(int (*fn)(void *), void *arg,
+struct vhost_task *vhost_task_create(bool (*fn)(void *), void *arg,
                                     const char *name)
 {
        struct kernel_clone_args args = {
-               .flags          = CLONE_FS | CLONE_UNTRACED | CLONE_VM,
+               .flags          = CLONE_FS | CLONE_UNTRACED | CLONE_VM |
+                                 CLONE_THREAD | CLONE_SIGHAND,
                .exit_signal    = 0,
                .fn             = vhost_task_fn,
                .name           = name,
                .user_worker    = 1,
                .no_files       = 1,
-               .ignore_signals = 1,
        };
        struct vhost_task *vtsk;
        struct task_struct *tsk;
index 73c1636..4c34867 100644 (file)
@@ -280,8 +280,8 @@ static void irq_cpu_rmap_release(struct kref *ref)
        struct irq_glue *glue =
                container_of(ref, struct irq_glue, notify.kref);
 
-       cpu_rmap_put(glue->rmap);
        glue->rmap->obj[glue->index] = NULL;
+       cpu_rmap_put(glue->rmap);
        kfree(glue);
 }
 
index 003edc5..984985c 100644 (file)
@@ -126,7 +126,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
 
 static void fill_pool(void)
 {
-       gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
+       gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
        struct debug_obj *obj;
        unsigned long flags;
 
@@ -591,10 +591,21 @@ static void debug_objects_fill_pool(void)
 {
        /*
         * On RT enabled kernels the pool refill must happen in preemptible
-        * context:
+        * context -- for !RT kernels we rely on the fact that spinlock_t and
+        * raw_spinlock_t are basically the same type and this lock-type
+        * inversion works just fine.
         */
-       if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
+       if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
+               /*
+                * Annotate away the spinlock_t inside raw_spinlock_t warning
+                * by temporarily raising the wait-type to WAIT_SLEEP, matching
+                * the preemptible() condition above.
+                */
+               static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
+               lock_map_acquire_try(&fill_pool_map);
                fill_pool();
+               lock_map_release(&fill_pool_map);
+       }
 }
 
 static void
index 110a364..8ebc43d 100644 (file)
@@ -5317,15 +5317,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
 
        mt = mte_node_type(mas->node);
        pivots = ma_pivots(mas_mn(mas), mt);
-       if (offset)
-               mas->min = pivots[offset - 1] + 1;
-
-       if (offset < mt_pivots[mt])
-               mas->max = pivots[offset];
-
-       if (mas->index < mas->min)
-               mas->index = mas->min;
-
+       min = mas_safe_min(mas, pivots, offset);
+       if (mas->index < min)
+               mas->index = min;
        mas->last = mas->index + size - 1;
        return 0;
 }
index 05ed84c..1d7d480 100644 (file)
@@ -45,6 +45,7 @@ struct test_batched_req {
        bool sent;
        const struct firmware *fw;
        const char *name;
+       const char *fw_buf;
        struct completion completion;
        struct task_struct *task;
        struct device *dev;
@@ -175,8 +176,14 @@ static void __test_release_all_firmware(void)
 
        for (i = 0; i < test_fw_config->num_requests; i++) {
                req = &test_fw_config->reqs[i];
-               if (req->fw)
+               if (req->fw) {
+                       if (req->fw_buf) {
+                               kfree_const(req->fw_buf);
+                               req->fw_buf = NULL;
+                       }
                        release_firmware(req->fw);
+                       req->fw = NULL;
+               }
        }
 
        vfree(test_fw_config->reqs);
@@ -353,16 +360,26 @@ static ssize_t config_test_show_str(char *dst,
        return len;
 }
 
-static intest_dev_config_update_bool(const char *buf, size_t size,
+static inline int __test_dev_config_update_bool(const char *buf, size_t size,
                                       bool *cfg)
 {
        int ret;
 
-       mutex_lock(&test_fw_mutex);
        if (kstrtobool(buf, cfg) < 0)
                ret = -EINVAL;
        else
                ret = size;
+
+       return ret;
+}
+
+static int test_dev_config_update_bool(const char *buf, size_t size,
+                                      bool *cfg)
+{
+       int ret;
+
+       mutex_lock(&test_fw_mutex);
+       ret = __test_dev_config_update_bool(buf, size, cfg);
        mutex_unlock(&test_fw_mutex);
 
        return ret;
@@ -373,7 +390,8 @@ static ssize_t test_dev_config_show_bool(char *buf, bool val)
        return snprintf(buf, PAGE_SIZE, "%d\n", val);
 }
 
-static int test_dev_config_update_size_t(const char *buf,
+static int __test_dev_config_update_size_t(
+                                        const char *buf,
                                         size_t size,
                                         size_t *cfg)
 {
@@ -384,9 +402,7 @@ static int test_dev_config_update_size_t(const char *buf,
        if (ret)
                return ret;
 
-       mutex_lock(&test_fw_mutex);
        *(size_t *)cfg = new;
-       mutex_unlock(&test_fw_mutex);
 
        /* Always return full write size even if we didn't consume all */
        return size;
@@ -402,7 +418,7 @@ static ssize_t test_dev_config_show_int(char *buf, int val)
        return snprintf(buf, PAGE_SIZE, "%d\n", val);
 }
 
-static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+static int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
 {
        u8 val;
        int ret;
@@ -411,14 +427,23 @@ static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
        if (ret)
                return ret;
 
-       mutex_lock(&test_fw_mutex);
        *(u8 *)cfg = val;
-       mutex_unlock(&test_fw_mutex);
 
        /* Always return full write size even if we didn't consume all */
        return size;
 }
 
+static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
+{
+       int ret;
+
+       mutex_lock(&test_fw_mutex);
+       ret = __test_dev_config_update_u8(buf, size, cfg);
+       mutex_unlock(&test_fw_mutex);
+
+       return ret;
+}
+
 static ssize_t test_dev_config_show_u8(char *buf, u8 val)
 {
        return snprintf(buf, PAGE_SIZE, "%u\n", val);
@@ -471,10 +496,10 @@ static ssize_t config_num_requests_store(struct device *dev,
                mutex_unlock(&test_fw_mutex);
                goto out;
        }
-       mutex_unlock(&test_fw_mutex);
 
-       rc = test_dev_config_update_u8(buf, count,
-                                      &test_fw_config->num_requests);
+       rc = __test_dev_config_update_u8(buf, count,
+                                        &test_fw_config->num_requests);
+       mutex_unlock(&test_fw_mutex);
 
 out:
        return rc;
@@ -518,10 +543,10 @@ static ssize_t config_buf_size_store(struct device *dev,
                mutex_unlock(&test_fw_mutex);
                goto out;
        }
-       mutex_unlock(&test_fw_mutex);
 
-       rc = test_dev_config_update_size_t(buf, count,
-                                          &test_fw_config->buf_size);
+       rc = __test_dev_config_update_size_t(buf, count,
+                                            &test_fw_config->buf_size);
+       mutex_unlock(&test_fw_mutex);
 
 out:
        return rc;
@@ -548,10 +573,10 @@ static ssize_t config_file_offset_store(struct device *dev,
                mutex_unlock(&test_fw_mutex);
                goto out;
        }
-       mutex_unlock(&test_fw_mutex);
 
-       rc = test_dev_config_update_size_t(buf, count,
-                                          &test_fw_config->file_offset);
+       rc = __test_dev_config_update_size_t(buf, count,
+                                            &test_fw_config->file_offset);
+       mutex_unlock(&test_fw_mutex);
 
 out:
        return rc;
@@ -652,6 +677,8 @@ static ssize_t trigger_request_store(struct device *dev,
 
        mutex_lock(&test_fw_mutex);
        release_firmware(test_firmware);
+       if (test_fw_config->reqs)
+               __test_release_all_firmware();
        test_firmware = NULL;
        rc = request_firmware(&test_firmware, name, dev);
        if (rc) {
@@ -752,6 +779,8 @@ static ssize_t trigger_async_request_store(struct device *dev,
        mutex_lock(&test_fw_mutex);
        release_firmware(test_firmware);
        test_firmware = NULL;
+       if (test_fw_config->reqs)
+               __test_release_all_firmware();
        rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
                                     NULL, trigger_async_request_cb);
        if (rc) {
@@ -794,6 +823,8 @@ static ssize_t trigger_custom_fallback_store(struct device *dev,
 
        mutex_lock(&test_fw_mutex);
        release_firmware(test_firmware);
+       if (test_fw_config->reqs)
+               __test_release_all_firmware();
        test_firmware = NULL;
        rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOUEVENT, name,
                                     dev, GFP_KERNEL, NULL,
@@ -856,6 +887,8 @@ static int test_fw_run_batch_request(void *data)
                                                 test_fw_config->buf_size);
                if (!req->fw)
                        kfree(test_buf);
+               else
+                       req->fw_buf = test_buf;
        } else {
                req->rc = test_fw_config->req_firmware(&req->fw,
                                                       req->name,
@@ -895,6 +928,11 @@ static ssize_t trigger_batched_requests_store(struct device *dev,
 
        mutex_lock(&test_fw_mutex);
 
+       if (test_fw_config->reqs) {
+               rc = -EBUSY;
+               goto out_bail;
+       }
+
        test_fw_config->reqs =
                vzalloc(array3_size(sizeof(struct test_batched_req),
                                    test_fw_config->num_requests, 2));
@@ -911,6 +949,7 @@ static ssize_t trigger_batched_requests_store(struct device *dev,
                req->fw = NULL;
                req->idx = i;
                req->name = test_fw_config->name;
+               req->fw_buf = NULL;
                req->dev = dev;
                init_completion(&req->completion);
                req->task = kthread_run(test_fw_run_batch_request, req,
@@ -993,6 +1032,11 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
 
        mutex_lock(&test_fw_mutex);
 
+       if (test_fw_config->reqs) {
+               rc = -EBUSY;
+               goto out_bail;
+       }
+
        test_fw_config->reqs =
                vzalloc(array3_size(sizeof(struct test_batched_req),
                                    test_fw_config->num_requests, 2));
@@ -1010,6 +1054,7 @@ ssize_t trigger_batched_requests_async_store(struct device *dev,
        for (i = 0; i < test_fw_config->num_requests; i++) {
                req = &test_fw_config->reqs[i];
                req->name = test_fw_config->name;
+               req->fw_buf = NULL;
                req->fw = NULL;
                req->idx = i;
                init_completion(&req->completion);
index a925415..018a5bd 100644 (file)
@@ -98,6 +98,7 @@ config PAGE_OWNER
 config PAGE_TABLE_CHECK
        bool "Check for invalid mappings in user page tables"
        depends on ARCH_SUPPORTS_PAGE_TABLE_CHECK
+       depends on EXCLUSIVE_SYSTEM_RAM
        select PAGE_EXTENSION
        help
          Check that anonymous page is not being mapped twice with read write
index 2aafc46..392fb27 100644 (file)
@@ -29,7 +29,7 @@
  * canary of every 8 bytes is the same. 64-bit memory can be filled and checked
  * at a time instead of byte by byte to improve performance.
  */
-#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(0x0706050403020100))
+#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(le64_to_cpu(0x0706050403020100)))
 
 /* Maximum stack depth for reports. */
 #define KFENCE_STACK_DEPTH 64
index 25d8610..f2baf97 100644 (file)
@@ -71,6 +71,8 @@ static void page_table_check_clear(struct mm_struct *mm, unsigned long addr,
 
        page = pfn_to_page(pfn);
        page_ext = page_ext_get(page);
+
+       BUG_ON(PageSlab(page));
        anon = PageAnon(page);
 
        for (i = 0; i < pgcnt; i++) {
@@ -107,6 +109,8 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr,
 
        page = pfn_to_page(pfn);
        page_ext = page_ext_get(page);
+
+       BUG_ON(PageSlab(page));
        anon = PageAnon(page);
 
        for (i = 0; i < pgcnt; i++) {
@@ -133,6 +137,8 @@ void __page_table_check_zero(struct page *page, unsigned int order)
        struct page_ext *page_ext;
        unsigned long i;
 
+       BUG_ON(PageSlab(page));
+
        page_ext = page_ext_get(page);
        BUG_ON(!page_ext);
        for (i = 0; i < (1ul << order); i++) {
index 3f83b10..fe10436 100644 (file)
@@ -237,7 +237,8 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
 }
 EXPORT_SYMBOL(shrinker_debugfs_rename);
 
-struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
+struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
+                                      int *debugfs_id)
 {
        struct dentry *entry = shrinker->debugfs_entry;
 
@@ -246,14 +247,18 @@ struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker)
        kfree_const(shrinker->name);
        shrinker->name = NULL;
 
-       if (entry) {
-               ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id);
-               shrinker->debugfs_entry = NULL;
-       }
+       *debugfs_id = entry ? shrinker->debugfs_id : -1;
+       shrinker->debugfs_entry = NULL;
 
        return entry;
 }
 
+void shrinker_debugfs_remove(struct dentry *debugfs_entry, int debugfs_id)
+{
+       debugfs_remove_recursive(debugfs_entry);
+       ida_free(&shrinker_debugfs_ida, debugfs_id);
+}
+
 static int __init shrinker_debugfs_init(void)
 {
        struct shrinker *shrinker;
index d257916..6d0cd28 100644 (file)
@@ -805,6 +805,7 @@ EXPORT_SYMBOL(register_shrinker);
 void unregister_shrinker(struct shrinker *shrinker)
 {
        struct dentry *debugfs_entry;
+       int debugfs_id;
 
        if (!(shrinker->flags & SHRINKER_REGISTERED))
                return;
@@ -814,13 +815,13 @@ void unregister_shrinker(struct shrinker *shrinker)
        shrinker->flags &= ~SHRINKER_REGISTERED;
        if (shrinker->flags & SHRINKER_MEMCG_AWARE)
                unregister_memcg_shrinker(shrinker);
-       debugfs_entry = shrinker_debugfs_remove(shrinker);
+       debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id);
        mutex_unlock(&shrinker_mutex);
 
        atomic_inc(&shrinker_srcu_generation);
        synchronize_srcu(&shrinker_srcu);
 
-       debugfs_remove_recursive(debugfs_entry);
+       shrinker_debugfs_remove(debugfs_entry, debugfs_id);
 
        kfree(shrinker->nr_deferred);
        shrinker->nr_deferred = NULL;
index 44ddaf5..02f7f41 100644 (file)
@@ -1331,31 +1331,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
        obj_to_location(obj, &page, &obj_idx);
        zspage = get_zspage(page);
 
-#ifdef CONFIG_ZPOOL
-       /*
-        * Move the zspage to front of pool's LRU.
-        *
-        * Note that this is swap-specific, so by definition there are no ongoing
-        * accesses to the memory while the page is swapped out that would make
-        * it "hot". A new entry is hot, then ages to the tail until it gets either
-        * written back or swaps back in.
-        *
-        * Furthermore, map is also called during writeback. We must not put an
-        * isolated page on the LRU mid-reclaim.
-        *
-        * As a result, only update the LRU when the page is mapped for write
-        * when it's first instantiated.
-        *
-        * This is a deviation from the other backends, which perform this update
-        * in the allocation function (zbud_alloc, z3fold_alloc).
-        */
-       if (mm == ZS_MM_WO) {
-               if (!list_empty(&zspage->lru))
-                       list_del(&zspage->lru);
-               list_add(&zspage->lru, &pool->lru);
-       }
-#endif
-
        /*
         * migration cannot move any zpages in this zspage. Here, pool->lock
         * is too heavy since callers would take some time until they calls
@@ -1525,9 +1500,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
                fix_fullness_group(class, zspage);
                record_obj(handle, obj);
                class_stat_inc(class, ZS_OBJS_INUSE, 1);
-               spin_unlock(&pool->lock);
 
-               return handle;
+               goto out;
        }
 
        spin_unlock(&pool->lock);
@@ -1550,6 +1524,14 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
 
        /* We completely set up zspage so mark them as movable */
        SetZsPageMovable(pool, zspage);
+out:
+#ifdef CONFIG_ZPOOL
+       /* Add/move zspage to beginning of LRU */
+       if (!list_empty(&zspage->lru))
+               list_del(&zspage->lru);
+       list_add(&zspage->lru, &pool->lru);
+#endif
+
        spin_unlock(&pool->lock);
 
        return handle;
index e1e621d..59da2a4 100644 (file)
@@ -1020,6 +1020,22 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
                goto fail;
 
        case ZSWAP_SWAPCACHE_NEW: /* page is locked */
+               /*
+                * Having a local reference to the zswap entry doesn't exclude
+                * swapping from invalidating and recycling the swap slot. Once
+                * the swapcache is secured against concurrent swapping to and
+                * from the slot, recheck that the entry is still current before
+                * writing.
+                */
+               spin_lock(&tree->lock);
+               if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) {
+                       spin_unlock(&tree->lock);
+                       delete_from_swap_cache(page_folio(page));
+                       ret = -ENOMEM;
+                       goto fail;
+               }
+               spin_unlock(&tree->lock);
+
                /* decompress */
                acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
                dlen = PAGE_SIZE;
index 870e493..b90781b 100644 (file)
@@ -109,8 +109,8 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
         * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
         * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
         */
-       if (veth->h_vlan_proto != vlan->vlan_proto ||
-           vlan->flags & VLAN_FLAG_REORDER_HDR) {
+       if (vlan->flags & VLAN_FLAG_REORDER_HDR ||
+           veth->h_vlan_proto != vlan->vlan_proto) {
                u16 vlan_tci;
                vlan_tci = vlan->vlan_id;
                vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb->priority);
index 2b2d33e..995d29e 100644 (file)
@@ -400,6 +400,7 @@ done:
        return error;
 }
 
+#ifdef CONFIG_PROC_FS
 void *atm_dev_seq_start(struct seq_file *seq, loff_t *pos)
 {
        mutex_lock(&atm_dev_mutex);
@@ -415,3 +416,4 @@ void *atm_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        return seq_list_next(v, &atm_devs, pos);
 }
+#endif
index 6968e55..28a939d 100644 (file)
@@ -101,7 +101,6 @@ static void batadv_dat_purge(struct work_struct *work);
  */
 static void batadv_dat_start_timer(struct batadv_priv *bat_priv)
 {
-       INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
        queue_delayed_work(batadv_event_workqueue, &bat_priv->dat.work,
                           msecs_to_jiffies(10000));
 }
@@ -819,6 +818,7 @@ int batadv_dat_init(struct batadv_priv *bat_priv)
        if (!bat_priv->dat.hash)
                return -ENOMEM;
 
+       INIT_DELAYED_WORK(&bat_priv->dat.work, batadv_dat_purge);
        batadv_dat_start_timer(bat_priv);
 
        batadv_tvlv_handler_register(bat_priv, batadv_dat_tvlv_ogm_handler_v1,
index 640b951..1ef952b 100644 (file)
@@ -947,8 +947,8 @@ static void find_cis(struct hci_conn *conn, void *data)
 {
        struct iso_list_data *d = data;
 
-       /* Ignore broadcast */
-       if (!bacmp(&conn->dst, BDADDR_ANY))
+       /* Ignore broadcast or if CIG don't match */
+       if (!bacmp(&conn->dst, BDADDR_ANY) || d->cig != conn->iso_qos.ucast.cig)
                return;
 
        d->count++;
@@ -963,12 +963,17 @@ static void cis_cleanup(struct hci_conn *conn)
        struct hci_dev *hdev = conn->hdev;
        struct iso_list_data d;
 
+       if (conn->iso_qos.ucast.cig == BT_ISO_QOS_CIG_UNSET)
+               return;
+
        memset(&d, 0, sizeof(d));
        d.cig = conn->iso_qos.ucast.cig;
 
        /* Check if ISO connection is a CIS and remove CIG if there are
         * no other connections using it.
         */
+       hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d);
+       hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d);
        hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d);
        if (d.count)
                return;
@@ -1083,8 +1088,28 @@ static void hci_conn_unlink(struct hci_conn *conn)
        if (!conn->parent) {
                struct hci_link *link, *t;
 
-               list_for_each_entry_safe(link, t, &conn->link_list, list)
-                       hci_conn_unlink(link->conn);
+               list_for_each_entry_safe(link, t, &conn->link_list, list) {
+                       struct hci_conn *child = link->conn;
+
+                       hci_conn_unlink(child);
+
+                       /* If hdev is down it means
+                        * hci_dev_close_sync/hci_conn_hash_flush is in progress
+                        * and links don't need to be cleanup as all connections
+                        * would be cleanup.
+                        */
+                       if (!test_bit(HCI_UP, &hdev->flags))
+                               continue;
+
+                       /* Due to race, SCO connection might be not established
+                        * yet at this point. Delete it now, otherwise it is
+                        * possible for it to be stuck and can't be deleted.
+                        */
+                       if ((child->type == SCO_LINK ||
+                            child->type == ESCO_LINK) &&
+                           child->handle == HCI_CONN_HANDLE_UNSET)
+                               hci_conn_del(child);
+               }
 
                return;
        }
@@ -1092,35 +1117,30 @@ static void hci_conn_unlink(struct hci_conn *conn)
        if (!conn->link)
                return;
 
-       hci_conn_put(conn->parent);
-       conn->parent = NULL;
-
        list_del_rcu(&conn->link->list);
        synchronize_rcu();
 
+       hci_conn_drop(conn->parent);
+       hci_conn_put(conn->parent);
+       conn->parent = NULL;
+
        kfree(conn->link);
        conn->link = NULL;
-
-       /* Due to race, SCO connection might be not established
-        * yet at this point. Delete it now, otherwise it is
-        * possible for it to be stuck and can't be deleted.
-        */
-       if (conn->handle == HCI_CONN_HANDLE_UNSET)
-               hci_conn_del(conn);
 }
 
-int hci_conn_del(struct hci_conn *conn)
+void hci_conn_del(struct hci_conn *conn)
 {
        struct hci_dev *hdev = conn->hdev;
 
        BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
 
+       hci_conn_unlink(conn);
+
        cancel_delayed_work_sync(&conn->disc_work);
        cancel_delayed_work_sync(&conn->auto_accept_work);
        cancel_delayed_work_sync(&conn->idle_work);
 
        if (conn->type == ACL_LINK) {
-               hci_conn_unlink(conn);
                /* Unacked frames */
                hdev->acl_cnt += conn->sent;
        } else if (conn->type == LE_LINK) {
@@ -1131,13 +1151,6 @@ int hci_conn_del(struct hci_conn *conn)
                else
                        hdev->acl_cnt += conn->sent;
        } else {
-               struct hci_conn *acl = conn->parent;
-
-               if (acl) {
-                       hci_conn_unlink(conn);
-                       hci_conn_drop(acl);
-               }
-
                /* Unacked ISO frames */
                if (conn->type == ISO_LINK) {
                        if (hdev->iso_pkts)
@@ -1160,8 +1173,6 @@ int hci_conn_del(struct hci_conn *conn)
         * rest of hci_conn_del.
         */
        hci_conn_cleanup(conn);
-
-       return 0;
 }
 
 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type)
@@ -1760,24 +1771,23 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos)
 
        memset(&data, 0, sizeof(data));
 
-       /* Allocate a CIG if not set */
+       /* Allocate first still reconfigurable CIG if not set */
        if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) {
-               for (data.cig = 0x00; data.cig < 0xff; data.cig++) {
+               for (data.cig = 0x00; data.cig < 0xf0; data.cig++) {
                        data.count = 0;
-                       data.cis = 0xff;
 
-                       hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
-                                                BT_BOUND, &data);
+                       hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
+                                                BT_CONNECT, &data);
                        if (data.count)
                                continue;
 
-                       hci_conn_hash_list_state(hdev, cis_list, ISO_LINK,
+                       hci_conn_hash_list_state(hdev, find_cis, ISO_LINK,
                                                 BT_CONNECTED, &data);
                        if (!data.count)
                                break;
                }
 
-               if (data.cig == 0xff)
+               if (data.cig == 0xf0)
                        return false;
 
                /* Update CIG */
@@ -2462,22 +2472,21 @@ timer:
 /* Drop all connection on the device */
 void hci_conn_hash_flush(struct hci_dev *hdev)
 {
-       struct hci_conn_hash *h = &hdev->conn_hash;
-       struct hci_conn *c, *n;
+       struct list_head *head = &hdev->conn_hash.list;
+       struct hci_conn *conn;
 
        BT_DBG("hdev %s", hdev->name);
 
-       list_for_each_entry_safe(c, n, &h->list, list) {
-               c->state = BT_CLOSED;
-
-               hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
-
-               /* Unlink before deleting otherwise it is possible that
-                * hci_conn_del removes the link which may cause the list to
-                * contain items already freed.
-                */
-               hci_conn_unlink(c);
-               hci_conn_del(c);
+       /* We should not traverse the list here, because hci_conn_del
+        * can remove extra links, which may cause the list traversal
+        * to hit items that have already been released.
+        */
+       while ((conn = list_first_entry_or_null(head,
+                                               struct hci_conn,
+                                               list)) != NULL) {
+               conn->state = BT_CLOSED;
+               hci_disconn_cfm(conn, HCI_ERROR_LOCAL_HOST_TERM);
+               hci_conn_del(conn);
        }
 }
 
index a856b10..48917c6 100644 (file)
@@ -1416,10 +1416,10 @@ int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
 
 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
 {
-       struct smp_ltk *k;
+       struct smp_ltk *k, *tmp;
        int removed = 0;
 
-       list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
+       list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
                if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
                        continue;
 
@@ -1435,9 +1435,9 @@ int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
 
 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
 {
-       struct smp_irk *k;
+       struct smp_irk *k, *tmp;
 
-       list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
+       list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
                if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
                        continue;
 
@@ -2686,7 +2686,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
 {
        BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
 
+       mutex_lock(&hdev->unregister_lock);
        hci_dev_set_flag(hdev, HCI_UNREGISTER);
+       mutex_unlock(&hdev->unregister_lock);
 
        write_lock(&hci_dev_list_lock);
        list_del(&hdev->list);
index d00ef6e..09ba6d8 100644 (file)
@@ -3804,48 +3804,56 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
                                   struct sk_buff *skb)
 {
        struct hci_rp_le_set_cig_params *rp = data;
+       struct hci_cp_le_set_cig_params *cp;
        struct hci_conn *conn;
-       int i = 0;
+       u8 status = rp->status;
+       int i;
 
        bt_dev_dbg(hdev, "status 0x%2.2x", rp->status);
 
+       cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_CIG_PARAMS);
+       if (!cp || rp->num_handles != cp->num_cis || rp->cig_id != cp->cig_id) {
+               bt_dev_err(hdev, "unexpected Set CIG Parameters response data");
+               status = HCI_ERROR_UNSPECIFIED;
+       }
+
        hci_dev_lock(hdev);
 
-       if (rp->status) {
+       if (status) {
                while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) {
                        conn->state = BT_CLOSED;
-                       hci_connect_cfm(conn, rp->status);
+                       hci_connect_cfm(conn, status);
                        hci_conn_del(conn);
                }
                goto unlock;
        }
 
-       rcu_read_lock();
+       /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2553
+        *
+        * If the Status return parameter is zero, then the Controller shall
+        * set the Connection_Handle arrayed return parameter to the connection
+        * handle(s) corresponding to the CIS configurations specified in
+        * the CIS_IDs command parameter, in the same order.
+        */
+       for (i = 0; i < rp->num_handles; ++i) {
+               conn = hci_conn_hash_lookup_cis(hdev, NULL, 0, rp->cig_id,
+                                               cp->cis[i].cis_id);
+               if (!conn || !bacmp(&conn->dst, BDADDR_ANY))
+                       continue;
 
-       list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
-               if (conn->type != ISO_LINK ||
-                   conn->iso_qos.ucast.cig != rp->cig_id ||
-                   conn->state == BT_CONNECTED)
+               if (conn->state != BT_BOUND && conn->state != BT_CONNECT)
                        continue;
 
-               conn->handle = __le16_to_cpu(rp->handle[i++]);
+               conn->handle = __le16_to_cpu(rp->handle[i]);
 
                bt_dev_dbg(hdev, "%p handle 0x%4.4x parent %p", conn,
                           conn->handle, conn->parent);
 
                /* Create CIS if LE is already connected */
-               if (conn->parent && conn->parent->state == BT_CONNECTED) {
-                       rcu_read_unlock();
+               if (conn->parent && conn->parent->state == BT_CONNECTED)
                        hci_le_create_cis(conn);
-                       rcu_read_lock();
-               }
-
-               if (i == rp->num_handles)
-                       break;
        }
 
-       rcu_read_unlock();
-
 unlock:
        hci_dev_unlock(hdev);
 
index 647a8ce..804cde4 100644 (file)
@@ -629,6 +629,7 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
        INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
        INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
        mutex_init(&hdev->cmd_sync_work_lock);
+       mutex_init(&hdev->unregister_lock);
 
        INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
        INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
@@ -692,14 +693,19 @@ int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
                        void *data, hci_cmd_sync_work_destroy_t destroy)
 {
        struct hci_cmd_sync_work_entry *entry;
+       int err = 0;
 
-       if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
-               return -ENODEV;
+       mutex_lock(&hdev->unregister_lock);
+       if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
+               err = -ENODEV;
+               goto unlock;
+       }
 
        entry = kmalloc(sizeof(*entry), GFP_KERNEL);
-       if (!entry)
-               return -ENOMEM;
-
+       if (!entry) {
+               err = -ENOMEM;
+               goto unlock;
+       }
        entry->func = func;
        entry->data = data;
        entry->destroy = destroy;
@@ -710,7 +716,9 @@ int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
 
        queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
 
-       return 0;
+unlock:
+       mutex_unlock(&hdev->unregister_lock);
+       return err;
 }
 EXPORT_SYMBOL(hci_cmd_sync_submit);
 
@@ -4543,6 +4551,9 @@ static int hci_init_sync(struct hci_dev *hdev)
            !hci_dev_test_flag(hdev, HCI_CONFIG))
                return 0;
 
+       if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
+               return 0;
+
        hci_debugfs_create_common(hdev);
 
        if (lmp_bredr_capable(hdev))
index 376b523..c5e8798 100644 (file)
@@ -4306,6 +4306,10 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
        result = __le16_to_cpu(rsp->result);
        status = __le16_to_cpu(rsp->status);
 
+       if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
+                                          dcid > L2CAP_CID_DYN_END))
+               return -EPROTO;
+
        BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
               dcid, scid, result, status);
 
@@ -4337,6 +4341,11 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
 
        switch (result) {
        case L2CAP_CR_SUCCESS:
+               if (__l2cap_get_chan_by_dcid(conn, dcid)) {
+                       err = -EBADSLT;
+                       break;
+               }
+
                l2cap_state_change(chan, BT_CONFIG);
                chan->ident = 0;
                chan->dcid = dcid;
@@ -4663,7 +4672,9 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
 
        chan->ops->set_shutdown(chan);
 
+       l2cap_chan_unlock(chan);
        mutex_lock(&conn->chan_lock);
+       l2cap_chan_lock(chan);
        l2cap_chan_del(chan, ECONNRESET);
        mutex_unlock(&conn->chan_lock);
 
@@ -4702,7 +4713,9 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
                return 0;
        }
 
+       l2cap_chan_unlock(chan);
        mutex_lock(&conn->chan_lock);
+       l2cap_chan_lock(chan);
        l2cap_chan_del(chan, 0);
        mutex_unlock(&conn->chan_lock);
 
index 2b05328..efb0960 100644 (file)
@@ -27,6 +27,10 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br,
 int br_get_vlan_tunnel_info_size(struct net_bridge_vlan_group *vg);
 int br_fill_vlan_tunnel_info(struct sk_buff *skb,
                             struct net_bridge_vlan_group *vg);
+bool vlan_tunid_inrange(const struct net_bridge_vlan *v_curr,
+                       const struct net_bridge_vlan *v_last);
+int br_vlan_tunnel_info(const struct net_bridge_port *p, int cmd,
+                       u16 vid, u32 tun_id, bool *changed);
 
 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
 /* br_vlan_tunnel.c */
@@ -43,10 +47,6 @@ void br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
                                   struct net_bridge_vlan_group *vg);
 int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
                                 struct net_bridge_vlan *vlan);
-bool vlan_tunid_inrange(const struct net_bridge_vlan *v_curr,
-                       const struct net_bridge_vlan *v_last);
-int br_vlan_tunnel_info(const struct net_bridge_port *p, int cmd,
-                       u16 vid, u32 tun_id, bool *changed);
 #else
 static inline int vlan_tunnel_init(struct net_bridge_vlan_group *vg)
 {
index a750259..84f9aba 100644 (file)
@@ -1139,7 +1139,7 @@ static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
        struct isotp_sock *so = isotp_sk(sk);
        int ret = 0;
 
-       if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
+       if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK | MSG_CMSG_COMPAT))
                return -EINVAL;
 
        if (!so->bound)
index 821d4ff..ecff1c9 100644 (file)
@@ -126,7 +126,7 @@ static void j1939_can_recv(struct sk_buff *iskb, void *data)
 #define J1939_CAN_ID CAN_EFF_FLAG
 #define J1939_CAN_MASK (CAN_EFF_FLAG | CAN_RTR_FLAG)
 
-static DEFINE_SPINLOCK(j1939_netdev_lock);
+static DEFINE_MUTEX(j1939_netdev_lock);
 
 static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
 {
@@ -220,7 +220,7 @@ static void __j1939_rx_release(struct kref *kref)
        j1939_can_rx_unregister(priv);
        j1939_ecu_unmap_all(priv);
        j1939_priv_set(priv->ndev, NULL);
-       spin_unlock(&j1939_netdev_lock);
+       mutex_unlock(&j1939_netdev_lock);
 }
 
 /* get pointer to priv without increasing ref counter */
@@ -248,9 +248,9 @@ static struct j1939_priv *j1939_priv_get_by_ndev(struct net_device *ndev)
 {
        struct j1939_priv *priv;
 
-       spin_lock(&j1939_netdev_lock);
+       mutex_lock(&j1939_netdev_lock);
        priv = j1939_priv_get_by_ndev_locked(ndev);
-       spin_unlock(&j1939_netdev_lock);
+       mutex_unlock(&j1939_netdev_lock);
 
        return priv;
 }
@@ -260,14 +260,14 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
        struct j1939_priv *priv, *priv_new;
        int ret;
 
-       spin_lock(&j1939_netdev_lock);
+       mutex_lock(&j1939_netdev_lock);
        priv = j1939_priv_get_by_ndev_locked(ndev);
        if (priv) {
                kref_get(&priv->rx_kref);
-               spin_unlock(&j1939_netdev_lock);
+               mutex_unlock(&j1939_netdev_lock);
                return priv;
        }
-       spin_unlock(&j1939_netdev_lock);
+       mutex_unlock(&j1939_netdev_lock);
 
        priv = j1939_priv_create(ndev);
        if (!priv)
@@ -277,29 +277,31 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
        spin_lock_init(&priv->j1939_socks_lock);
        INIT_LIST_HEAD(&priv->j1939_socks);
 
-       spin_lock(&j1939_netdev_lock);
+       mutex_lock(&j1939_netdev_lock);
        priv_new = j1939_priv_get_by_ndev_locked(ndev);
        if (priv_new) {
                /* Someone was faster than us, use their priv and roll
                 * back our's.
                 */
                kref_get(&priv_new->rx_kref);
-               spin_unlock(&j1939_netdev_lock);
+               mutex_unlock(&j1939_netdev_lock);
                dev_put(ndev);
                kfree(priv);
                return priv_new;
        }
        j1939_priv_set(ndev, priv);
-       spin_unlock(&j1939_netdev_lock);
 
        ret = j1939_can_rx_register(priv);
        if (ret < 0)
                goto out_priv_put;
 
+       mutex_unlock(&j1939_netdev_lock);
        return priv;
 
  out_priv_put:
        j1939_priv_set(ndev, NULL);
+       mutex_unlock(&j1939_netdev_lock);
+
        dev_put(ndev);
        kfree(priv);
 
@@ -308,7 +310,7 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev)
 
 void j1939_netdev_stop(struct j1939_priv *priv)
 {
-       kref_put_lock(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
+       kref_put_mutex(&priv->rx_kref, __j1939_rx_release, &j1939_netdev_lock);
        j1939_priv_put(priv);
 }
 
index 7e90f9e..35970c2 100644 (file)
@@ -798,7 +798,7 @@ static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
        struct j1939_sk_buff_cb *skcb;
        int ret = 0;
 
-       if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE))
+       if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
                return -EINVAL;
 
        if (flags & MSG_ERRQUEUE)
@@ -1088,6 +1088,11 @@ void j1939_sk_errqueue(struct j1939_session *session,
 
 void j1939_sk_send_loop_abort(struct sock *sk, int err)
 {
+       struct j1939_sock *jsk = j1939_sk(sk);
+
+       if (jsk->state & J1939_SOCK_ERRQUEUE)
+               return;
+
        sk->sk_err = err;
 
        sk_error_report(sk);
index b3c13e0..c29f3e1 100644 (file)
@@ -4471,8 +4471,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                u32 next_cpu;
                u32 ident;
 
-               /* First check into global flow table if there is a match */
-               ident = sock_flow_table->ents[hash & sock_flow_table->mask];
+               /* First check into global flow table if there is a match.
+                * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
+                */
+               ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
                if ((ident ^ hash) & ~rps_cpu_mask)
                        goto try_rps;
 
@@ -10541,7 +10543,7 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
                return NULL;
        netdev_init_one_queue(dev, queue, NULL);
        RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
-       queue->qdisc_sleeping = &noop_qdisc;
+       RCU_INIT_POINTER(queue->qdisc_sleeping, &noop_qdisc);
        rcu_assign_pointer(dev->ingress_queue, queue);
 #endif
        return queue;
index e212e9d..a3e12a6 100644 (file)
@@ -134,6 +134,29 @@ EXPORT_SYMBOL(page_pool_ethtool_stats_get);
 #define recycle_stat_add(pool, __stat, val)
 #endif
 
+static bool page_pool_producer_lock(struct page_pool *pool)
+       __acquires(&pool->ring.producer_lock)
+{
+       bool in_softirq = in_softirq();
+
+       if (in_softirq)
+               spin_lock(&pool->ring.producer_lock);
+       else
+               spin_lock_bh(&pool->ring.producer_lock);
+
+       return in_softirq;
+}
+
+static void page_pool_producer_unlock(struct page_pool *pool,
+                                     bool in_softirq)
+       __releases(&pool->ring.producer_lock)
+{
+       if (in_softirq)
+               spin_unlock(&pool->ring.producer_lock);
+       else
+               spin_unlock_bh(&pool->ring.producer_lock);
+}
+
 static int page_pool_init(struct page_pool *pool,
                          const struct page_pool_params *params)
 {
@@ -617,6 +640,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
                             int count)
 {
        int i, bulk_len = 0;
+       bool in_softirq;
 
        for (i = 0; i < count; i++) {
                struct page *page = virt_to_head_page(data[i]);
@@ -635,7 +659,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
                return;
 
        /* Bulk producer into ptr_ring page_pool cache */
-       page_pool_ring_lock(pool);
+       in_softirq = page_pool_producer_lock(pool);
        for (i = 0; i < bulk_len; i++) {
                if (__ptr_ring_produce(&pool->ring, data[i])) {
                        /* ring full */
@@ -644,7 +668,7 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data,
                }
        }
        recycle_stat_add(pool, ring, i);
-       page_pool_ring_unlock(pool);
+       page_pool_producer_unlock(pool, in_softirq);
 
        /* Hopefully all pages was return into ptr_ring */
        if (likely(i == bulk_len))
index 653901a..41de3a2 100644 (file)
@@ -2385,6 +2385,37 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[],
                if (tb[IFLA_BROADCAST] &&
                    nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
                        return -EINVAL;
+
+               if (tb[IFLA_GSO_MAX_SIZE] &&
+                   nla_get_u32(tb[IFLA_GSO_MAX_SIZE]) > dev->tso_max_size) {
+                       NL_SET_ERR_MSG(extack, "too big gso_max_size");
+                       return -EINVAL;
+               }
+
+               if (tb[IFLA_GSO_MAX_SEGS] &&
+                   (nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > GSO_MAX_SEGS ||
+                    nla_get_u32(tb[IFLA_GSO_MAX_SEGS]) > dev->tso_max_segs)) {
+                       NL_SET_ERR_MSG(extack, "too big gso_max_segs");
+                       return -EINVAL;
+               }
+
+               if (tb[IFLA_GRO_MAX_SIZE] &&
+                   nla_get_u32(tb[IFLA_GRO_MAX_SIZE]) > GRO_MAX_SIZE) {
+                       NL_SET_ERR_MSG(extack, "too big gro_max_size");
+                       return -EINVAL;
+               }
+
+               if (tb[IFLA_GSO_IPV4_MAX_SIZE] &&
+                   nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]) > dev->tso_max_size) {
+                       NL_SET_ERR_MSG(extack, "too big gso_ipv4_max_size");
+                       return -EINVAL;
+               }
+
+               if (tb[IFLA_GRO_IPV4_MAX_SIZE] &&
+                   nla_get_u32(tb[IFLA_GRO_IPV4_MAX_SIZE]) > GRO_MAX_SIZE) {
+                       NL_SET_ERR_MSG(extack, "too big gro_ipv4_max_size");
+                       return -EINVAL;
+               }
        }
 
        if (tb[IFLA_AF_SPEC]) {
@@ -2858,11 +2889,6 @@ static int do_setlink(const struct sk_buff *skb,
        if (tb[IFLA_GSO_MAX_SIZE]) {
                u32 max_size = nla_get_u32(tb[IFLA_GSO_MAX_SIZE]);
 
-               if (max_size > dev->tso_max_size) {
-                       err = -EINVAL;
-                       goto errout;
-               }
-
                if (dev->gso_max_size ^ max_size) {
                        netif_set_gso_max_size(dev, max_size);
                        status |= DO_SETLINK_MODIFIED;
@@ -2872,11 +2898,6 @@ static int do_setlink(const struct sk_buff *skb,
        if (tb[IFLA_GSO_MAX_SEGS]) {
                u32 max_segs = nla_get_u32(tb[IFLA_GSO_MAX_SEGS]);
 
-               if (max_segs > GSO_MAX_SEGS || max_segs > dev->tso_max_segs) {
-                       err = -EINVAL;
-                       goto errout;
-               }
-
                if (dev->gso_max_segs ^ max_segs) {
                        netif_set_gso_max_segs(dev, max_segs);
                        status |= DO_SETLINK_MODIFIED;
@@ -2895,11 +2916,6 @@ static int do_setlink(const struct sk_buff *skb,
        if (tb[IFLA_GSO_IPV4_MAX_SIZE]) {
                u32 max_size = nla_get_u32(tb[IFLA_GSO_IPV4_MAX_SIZE]);
 
-               if (max_size > dev->tso_max_size) {
-                       err = -EINVAL;
-                       goto errout;
-               }
-
                if (dev->gso_ipv4_max_size ^ max_size) {
                        netif_set_gso_ipv4_max_size(dev, max_size);
                        status |= DO_SETLINK_MODIFIED;
@@ -3285,6 +3301,7 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
        struct net_device *dev;
        unsigned int num_tx_queues = 1;
        unsigned int num_rx_queues = 1;
+       int err;
 
        if (tb[IFLA_NUM_TX_QUEUES])
                num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
@@ -3320,13 +3337,18 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
        if (!dev)
                return ERR_PTR(-ENOMEM);
 
+       err = validate_linkmsg(dev, tb, extack);
+       if (err < 0) {
+               free_netdev(dev);
+               return ERR_PTR(err);
+       }
+
        dev_net_set(dev, net);
        dev->rtnl_link_ops = ops;
        dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
 
        if (tb[IFLA_MTU]) {
                u32 mtu = nla_get_u32(tb[IFLA_MTU]);
-               int err;
 
                err = dev_validate_mtu(dev, mtu, extack);
                if (err) {
index 515ec5c..cea28d3 100644 (file)
@@ -5224,8 +5224,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
        } else {
                skb = skb_clone(orig_skb, GFP_ATOMIC);
 
-               if (skb_orphan_frags_rx(skb, GFP_ATOMIC))
+               if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) {
+                       kfree_skb(skb);
                        return;
+               }
        }
        if (!skb)
                return;
index f818837..a29508e 100644 (file)
@@ -481,8 +481,6 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
                msg_rx = sk_psock_peek_msg(psock);
        }
 out:
-       if (psock->work_state.skb && copied > 0)
-               schedule_work(&psock->work);
        return copied;
 }
 EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
@@ -624,42 +622,33 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
 
 static void sk_psock_skb_state(struct sk_psock *psock,
                               struct sk_psock_work_state *state,
-                              struct sk_buff *skb,
                               int len, int off)
 {
        spin_lock_bh(&psock->ingress_lock);
        if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
-               state->skb = skb;
                state->len = len;
                state->off = off;
-       } else {
-               sock_drop(psock->sk, skb);
        }
        spin_unlock_bh(&psock->ingress_lock);
 }
 
 static void sk_psock_backlog(struct work_struct *work)
 {
-       struct sk_psock *psock = container_of(work, struct sk_psock, work);
+       struct delayed_work *dwork = to_delayed_work(work);
+       struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
        struct sk_psock_work_state *state = &psock->work_state;
        struct sk_buff *skb = NULL;
+       u32 len = 0, off = 0;
        bool ingress;
-       u32 len, off;
        int ret;
 
        mutex_lock(&psock->work_mutex);
-       if (unlikely(state->skb)) {
-               spin_lock_bh(&psock->ingress_lock);
-               skb = state->skb;
+       if (unlikely(state->len)) {
                len = state->len;
                off = state->off;
-               state->skb = NULL;
-               spin_unlock_bh(&psock->ingress_lock);
        }
-       if (skb)
-               goto start;
 
-       while ((skb = skb_dequeue(&psock->ingress_skb))) {
+       while ((skb = skb_peek(&psock->ingress_skb))) {
                len = skb->len;
                off = 0;
                if (skb_bpf_strparser(skb)) {
@@ -668,7 +657,6 @@ static void sk_psock_backlog(struct work_struct *work)
                        off = stm->offset;
                        len = stm->full_len;
                }
-start:
                ingress = skb_bpf_ingress(skb);
                skb_bpf_redirect_clear(skb);
                do {
@@ -678,22 +666,28 @@ start:
                                                          len, ingress);
                        if (ret <= 0) {
                                if (ret == -EAGAIN) {
-                                       sk_psock_skb_state(psock, state, skb,
-                                                          len, off);
+                                       sk_psock_skb_state(psock, state, len, off);
+
+                                       /* Delay slightly to prioritize any
+                                        * other work that might be here.
+                                        */
+                                       if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
+                                               schedule_delayed_work(&psock->work, 1);
                                        goto end;
                                }
                                /* Hard errors break pipe and stop xmit. */
                                sk_psock_report_error(psock, ret ? -ret : EPIPE);
                                sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
-                               sock_drop(psock->sk, skb);
                                goto end;
                        }
                        off += ret;
                        len -= ret;
                } while (len);
 
-               if (!ingress)
+               skb = skb_dequeue(&psock->ingress_skb);
+               if (!ingress) {
                        kfree_skb(skb);
+               }
        }
 end:
        mutex_unlock(&psock->work_mutex);
@@ -734,7 +728,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
        INIT_LIST_HEAD(&psock->link);
        spin_lock_init(&psock->link_lock);
 
-       INIT_WORK(&psock->work, sk_psock_backlog);
+       INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
        mutex_init(&psock->work_mutex);
        INIT_LIST_HEAD(&psock->ingress_msg);
        spin_lock_init(&psock->ingress_lock);
@@ -786,11 +780,6 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
                skb_bpf_redirect_clear(skb);
                sock_drop(psock->sk, skb);
        }
-       kfree_skb(psock->work_state.skb);
-       /* We null the skb here to ensure that calls to sk_psock_backlog
-        * do not pick up the free'd skb.
-        */
-       psock->work_state.skb = NULL;
        __sk_psock_purge_ingress_msg(psock);
 }
 
@@ -809,7 +798,6 @@ void sk_psock_stop(struct sk_psock *psock)
        spin_lock_bh(&psock->ingress_lock);
        sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
        sk_psock_cork_free(psock);
-       __sk_psock_zap_ingress(psock);
        spin_unlock_bh(&psock->ingress_lock);
 }
 
@@ -823,7 +811,8 @@ static void sk_psock_destroy(struct work_struct *work)
 
        sk_psock_done_strp(psock);
 
-       cancel_work_sync(&psock->work);
+       cancel_delayed_work_sync(&psock->work);
+       __sk_psock_zap_ingress(psock);
        mutex_destroy(&psock->work_mutex);
 
        psock_progs_drop(&psock->progs);
@@ -938,7 +927,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
        }
 
        skb_queue_tail(&psock_other->ingress_skb, skb);
-       schedule_work(&psock_other->work);
+       schedule_delayed_work(&psock_other->work, 0);
        spin_unlock_bh(&psock_other->ingress_lock);
        return 0;
 }
@@ -990,10 +979,8 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
                err = -EIO;
                sk_other = psock->sk;
                if (sock_flag(sk_other, SOCK_DEAD) ||
-                   !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
-                       skb_bpf_redirect_clear(skb);
+                   !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
                        goto out_free;
-               }
 
                skb_bpf_set_ingress(skb);
 
@@ -1018,22 +1005,23 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
                        spin_lock_bh(&psock->ingress_lock);
                        if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
                                skb_queue_tail(&psock->ingress_skb, skb);
-                               schedule_work(&psock->work);
+                               schedule_delayed_work(&psock->work, 0);
                                err = 0;
                        }
                        spin_unlock_bh(&psock->ingress_lock);
-                       if (err < 0) {
-                               skb_bpf_redirect_clear(skb);
+                       if (err < 0)
                                goto out_free;
-                       }
                }
                break;
        case __SK_REDIRECT:
+               tcp_eat_skb(psock->sk, skb);
                err = sk_psock_skb_redirect(psock, skb);
                break;
        case __SK_DROP:
        default:
 out_free:
+               skb_bpf_redirect_clear(skb);
+               tcp_eat_skb(psock->sk, skb);
                sock_drop(psock->sk, skb);
        }
 
@@ -1049,7 +1037,7 @@ static void sk_psock_write_space(struct sock *sk)
        psock = sk_psock(sk);
        if (likely(psock)) {
                if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
-                       schedule_work(&psock->work);
+                       schedule_delayed_work(&psock->work, 0);
                write_space = psock->saved_write_space;
        }
        rcu_read_unlock();
@@ -1078,8 +1066,7 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
                skb_dst_drop(skb);
                skb_bpf_redirect_clear(skb);
                ret = bpf_prog_run_pin_on_cpu(prog, skb);
-               if (ret == SK_PASS)
-                       skb_bpf_set_strparser(skb);
+               skb_bpf_set_strparser(skb);
                ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
                skb->sk = NULL;
        }
@@ -1183,12 +1170,11 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
        int ret = __SK_DROP;
        int len = skb->len;
 
-       skb_get(skb);
-
        rcu_read_lock();
        psock = sk_psock(sk);
        if (unlikely(!psock)) {
                len = 0;
+               tcp_eat_skb(sk, skb);
                sock_drop(sk, skb);
                goto out;
        }
@@ -1212,12 +1198,22 @@ out:
 static void sk_psock_verdict_data_ready(struct sock *sk)
 {
        struct socket *sock = sk->sk_socket;
+       int copied;
 
        trace_sk_data_ready(sk);
 
        if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
                return;
-       sock->ops->read_skb(sk, sk_psock_verdict_recv);
+       copied = sock->ops->read_skb(sk, sk_psock_verdict_recv);
+       if (copied >= 0) {
+               struct sk_psock *psock;
+
+               rcu_read_lock();
+               psock = sk_psock(sk);
+               if (psock)
+                       psock->saved_data_ready(sk);
+               rcu_read_unlock();
+       }
 }
 
 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
index 5440e67..24f2761 100644 (file)
@@ -2381,7 +2381,6 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
 {
        u32 max_segs = 1;
 
-       sk_dst_set(sk, dst);
        sk->sk_route_caps = dst->dev->features;
        if (sk_is_tcp(sk))
                sk->sk_route_caps |= NETIF_F_GSO;
@@ -2400,6 +2399,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
                }
        }
        sk->sk_gso_max_segs = max_segs;
+       sk_dst_set(sk, dst);
 }
 EXPORT_SYMBOL_GPL(sk_setup_caps);
 
index 7c189c2..00afb66 100644 (file)
@@ -1644,9 +1644,10 @@ void sock_map_close(struct sock *sk, long timeout)
                rcu_read_unlock();
                sk_psock_stop(psock);
                release_sock(sk);
-               cancel_work_sync(&psock->work);
+               cancel_delayed_work_sync(&psock->work);
                sk_psock_put(sk, psock);
        }
+
        /* Make sure we do not recurse. This is a bug.
         * Leak the socket instead of crashing on a stack overflow.
         */
index 777b091..c23ebab 100644 (file)
@@ -204,11 +204,6 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
        if (ret < 0)
                goto err_xa_alloc;
 
-       devlink->netdevice_nb.notifier_call = devlink_port_netdevice_event;
-       ret = register_netdevice_notifier(&devlink->netdevice_nb);
-       if (ret)
-               goto err_register_netdevice_notifier;
-
        devlink->dev = dev;
        devlink->ops = ops;
        xa_init_flags(&devlink->ports, XA_FLAGS_ALLOC);
@@ -233,8 +228,6 @@ struct devlink *devlink_alloc_ns(const struct devlink_ops *ops,
 
        return devlink;
 
-err_register_netdevice_notifier:
-       xa_erase(&devlinks, devlink->index);
 err_xa_alloc:
        kfree(devlink);
        return NULL;
@@ -266,8 +259,6 @@ void devlink_free(struct devlink *devlink)
        xa_destroy(&devlink->params);
        xa_destroy(&devlink->ports);
 
-       WARN_ON_ONCE(unregister_netdevice_notifier(&devlink->netdevice_nb));
-
        xa_erase(&devlinks, devlink->index);
 
        devlink_put(devlink);
@@ -303,6 +294,10 @@ static struct pernet_operations devlink_pernet_ops __net_initdata = {
        .pre_exit = devlink_pernet_pre_exit,
 };
 
+static struct notifier_block devlink_port_netdevice_nb = {
+       .notifier_call = devlink_port_netdevice_event,
+};
+
 static int __init devlink_init(void)
 {
        int err;
@@ -311,6 +306,9 @@ static int __init devlink_init(void)
        if (err)
                goto out;
        err = register_pernet_subsys(&devlink_pernet_ops);
+       if (err)
+               goto out;
+       err = register_netdevice_notifier(&devlink_port_netdevice_nb);
 
 out:
        WARN_ON(err);
index e133f42..62921b2 100644 (file)
@@ -50,7 +50,6 @@ struct devlink {
        u8 reload_failed:1;
        refcount_t refcount;
        struct rcu_work rwork;
-       struct notifier_block netdevice_nb;
        char priv[] __aligned(NETDEV_ALIGN);
 };
 
index dffca2f..cd02549 100644 (file)
@@ -7073,10 +7073,9 @@ int devlink_port_netdevice_event(struct notifier_block *nb,
        struct devlink_port *devlink_port = netdev->devlink_port;
        struct devlink *devlink;
 
-       devlink = container_of(nb, struct devlink, netdevice_nb);
-
-       if (!devlink_port || devlink_port->devlink != devlink)
+       if (!devlink_port)
                return NOTIFY_OK;
+       devlink = devlink_port->devlink;
 
        switch (event) {
        case NETDEV_POST_INIT:
index e6adc5d..6d37bab 100644 (file)
@@ -102,7 +102,7 @@ struct handshake_req_alloc_test_param handshake_req_alloc_params[] = {
        {
                .desc                   = "handshake_req_alloc excessive privsize",
                .proto                  = &handshake_req_alloc_proto_6,
-               .gfp                    = GFP_KERNEL,
+               .gfp                    = GFP_KERNEL | __GFP_NOWARN,
                .expect_success         = false,
        },
        {
@@ -209,6 +209,7 @@ static void handshake_req_submit_test4(struct kunit *test)
 {
        struct handshake_req *req, *result;
        struct socket *sock;
+       struct file *filp;
        int err;
 
        /* Arrange */
@@ -218,9 +219,10 @@ static void handshake_req_submit_test4(struct kunit *test)
        err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
        KUNIT_ASSERT_NOT_NULL(test, sock->sk);
+       sock->file = filp;
 
        err = handshake_req_submit(sock, req, GFP_KERNEL);
        KUNIT_ASSERT_EQ(test, err, 0);
@@ -241,6 +243,7 @@ static void handshake_req_submit_test5(struct kunit *test)
        struct handshake_req *req;
        struct handshake_net *hn;
        struct socket *sock;
+       struct file *filp;
        struct net *net;
        int saved, err;
 
@@ -251,9 +254,10 @@ static void handshake_req_submit_test5(struct kunit *test)
        err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
        KUNIT_ASSERT_NOT_NULL(test, sock->sk);
+       sock->file = filp;
 
        net = sock_net(sock->sk);
        hn = handshake_pernet(net);
@@ -276,6 +280,7 @@ static void handshake_req_submit_test6(struct kunit *test)
 {
        struct handshake_req *req1, *req2;
        struct socket *sock;
+       struct file *filp;
        int err;
 
        /* Arrange */
@@ -287,9 +292,10 @@ static void handshake_req_submit_test6(struct kunit *test)
        err = __sock_create(&init_net, PF_INET, SOCK_STREAM, IPPROTO_TCP,
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
        KUNIT_ASSERT_NOT_NULL(test, sock->sk);
+       sock->file = filp;
 
        /* Act */
        err = handshake_req_submit(sock, req1, GFP_KERNEL);
@@ -307,6 +313,7 @@ static void handshake_req_cancel_test1(struct kunit *test)
 {
        struct handshake_req *req;
        struct socket *sock;
+       struct file *filp;
        bool result;
        int err;
 
@@ -318,8 +325,9 @@ static void handshake_req_cancel_test1(struct kunit *test)
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
 
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
+       sock->file = filp;
 
        err = handshake_req_submit(sock, req, GFP_KERNEL);
        KUNIT_ASSERT_EQ(test, err, 0);
@@ -340,6 +348,7 @@ static void handshake_req_cancel_test2(struct kunit *test)
        struct handshake_req *req, *next;
        struct handshake_net *hn;
        struct socket *sock;
+       struct file *filp;
        struct net *net;
        bool result;
        int err;
@@ -352,8 +361,9 @@ static void handshake_req_cancel_test2(struct kunit *test)
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
 
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
+       sock->file = filp;
 
        err = handshake_req_submit(sock, req, GFP_KERNEL);
        KUNIT_ASSERT_EQ(test, err, 0);
@@ -380,6 +390,7 @@ static void handshake_req_cancel_test3(struct kunit *test)
        struct handshake_req *req, *next;
        struct handshake_net *hn;
        struct socket *sock;
+       struct file *filp;
        struct net *net;
        bool result;
        int err;
@@ -392,8 +403,9 @@ static void handshake_req_cancel_test3(struct kunit *test)
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
 
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
+       sock->file = filp;
 
        err = handshake_req_submit(sock, req, GFP_KERNEL);
        KUNIT_ASSERT_EQ(test, err, 0);
@@ -436,6 +448,7 @@ static void handshake_req_destroy_test1(struct kunit *test)
 {
        struct handshake_req *req;
        struct socket *sock;
+       struct file *filp;
        int err;
 
        /* Arrange */
@@ -448,8 +461,9 @@ static void handshake_req_destroy_test1(struct kunit *test)
                            &sock, 1);
        KUNIT_ASSERT_EQ(test, err, 0);
 
-       sock->file = sock_alloc_file(sock, O_NONBLOCK, NULL);
-       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sock->file);
+       filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, filp);
+       sock->file = filp;
 
        err = handshake_req_submit(sock, req, GFP_KERNEL);
        KUNIT_ASSERT_EQ(test, err, 0);
index 4dac965..8aeaadc 100644 (file)
@@ -31,6 +31,7 @@ struct handshake_req {
        struct list_head                hr_list;
        struct rhash_head               hr_rhash;
        unsigned long                   hr_flags;
+       struct file                     *hr_file;
        const struct handshake_proto    *hr_proto;
        struct sock                     *hr_sk;
        void                            (*hr_odestruct)(struct sock *sk);
index 35c9c44..1086653 100644 (file)
@@ -48,7 +48,7 @@ int handshake_genl_notify(struct net *net, const struct handshake_proto *proto,
                                proto->hp_handler_class))
                return -ESRCH;
 
-       msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+       msg = genlmsg_new(GENLMSG_DEFAULT_SIZE, flags);
        if (!msg)
                return -ENOMEM;
 
@@ -99,9 +99,6 @@ static int handshake_dup(struct socket *sock)
        struct file *file;
        int newfd;
 
-       if (!sock->file)
-               return -EBADF;
-
        file = get_file(sock->file);
        newfd = get_unused_fd_flags(O_CLOEXEC);
        if (newfd < 0) {
@@ -142,15 +139,16 @@ int handshake_nl_accept_doit(struct sk_buff *skb, struct genl_info *info)
                goto out_complete;
        }
        err = req->hr_proto->hp_accept(req, info, fd);
-       if (err)
+       if (err) {
+               fput(sock->file);
                goto out_complete;
+       }
 
        trace_handshake_cmd_accept(net, req, req->hr_sk, fd);
        return 0;
 
 out_complete:
        handshake_complete(req, -EIO, NULL);
-       fput(sock->file);
 out_status:
        trace_handshake_cmd_accept_err(net, req, NULL, err);
        return err;
@@ -159,8 +157,8 @@ out_status:
 int handshake_nl_done_doit(struct sk_buff *skb, struct genl_info *info)
 {
        struct net *net = sock_net(skb->sk);
+       struct handshake_req *req = NULL;
        struct socket *sock = NULL;
-       struct handshake_req *req;
        int fd, status, err;
 
        if (GENL_REQ_ATTR_CHECK(info, HANDSHAKE_A_DONE_SOCKFD))
index 94d5cef..d78d41a 100644 (file)
@@ -239,6 +239,7 @@ int handshake_req_submit(struct socket *sock, struct handshake_req *req,
        }
        req->hr_odestruct = req->hr_sk->sk_destruct;
        req->hr_sk->sk_destruct = handshake_sk_destruct;
+       req->hr_file = sock->file;
 
        ret = -EOPNOTSUPP;
        net = sock_net(req->hr_sk);
@@ -334,6 +335,9 @@ bool handshake_req_cancel(struct sock *sk)
                return false;
        }
 
+       /* Request accepted and waiting for DONE */
+       fput(req->hr_file);
+
 out_true:
        trace_handshake_cancel(net, req, sk);
 
index fcbeb63..b735f5c 100644 (file)
@@ -31,6 +31,7 @@ struct tls_handshake_req {
        int                     th_type;
        unsigned int            th_timeout_ms;
        int                     th_auth_mode;
+       const char              *th_peername;
        key_serial_t            th_keyring;
        key_serial_t            th_certificate;
        key_serial_t            th_privkey;
@@ -48,6 +49,7 @@ tls_handshake_req_init(struct handshake_req *req,
        treq->th_timeout_ms = args->ta_timeout_ms;
        treq->th_consumer_done = args->ta_done;
        treq->th_consumer_data = args->ta_data;
+       treq->th_peername = args->ta_peername;
        treq->th_keyring = args->ta_keyring;
        treq->th_num_peerids = 0;
        treq->th_certificate = TLS_NO_CERT;
@@ -214,6 +216,12 @@ static int tls_handshake_accept(struct handshake_req *req,
        ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_MESSAGE_TYPE, treq->th_type);
        if (ret < 0)
                goto out_cancel;
+       if (treq->th_peername) {
+               ret = nla_put_string(msg, HANDSHAKE_A_ACCEPT_PEERNAME,
+                                    treq->th_peername);
+               if (ret < 0)
+                       goto out_cancel;
+       }
        if (treq->th_timeout_ms) {
                ret = nla_put_u32(msg, HANDSHAKE_A_ACCEPT_TIMEOUT, treq->th_timeout_ms);
                if (ret < 0)
index c4aab3a..4a76ebf 100644 (file)
@@ -586,6 +586,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
 
        add_wait_queue(sk_sleep(sk), &wait);
        sk->sk_write_pending += writebias;
+       sk->sk_wait_pending++;
 
        /* Basic assumption: if someone sets sk->sk_err, he _must_
         * change state of the socket from TCP_SYN_*.
@@ -601,6 +602,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
        }
        remove_wait_queue(sk_sleep(sk), &wait);
        sk->sk_write_pending -= writebias;
+       sk->sk_wait_pending--;
        return timeo;
 }
 
index 65ad425..1386787 100644 (file)
@@ -1142,6 +1142,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
        if (newsk) {
                struct inet_connection_sock *newicsk = inet_csk(newsk);
 
+               newsk->sk_wait_pending = 0;
                inet_sk_set_state(newsk, TCP_SYN_RECV);
                newicsk->icsk_bind_hash = NULL;
                newicsk->icsk_bind2_hash = NULL;
index b511ff0..8e97d8d 100644 (file)
@@ -317,7 +317,14 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
                        ipc->tos = val;
                        ipc->priority = rt_tos2priority(ipc->tos);
                        break;
-
+               case IP_PROTOCOL:
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
+                               return -EINVAL;
+                       val = *(int *)CMSG_DATA(cmsg);
+                       if (val < 1 || val > 255)
+                               return -EINVAL;
+                       ipc->protocol = val;
+                       break;
                default:
                        return -EINVAL;
                }
@@ -1761,6 +1768,9 @@ int do_ip_getsockopt(struct sock *sk, int level, int optname,
        case IP_LOCAL_PORT_RANGE:
                val = inet->local_port_range.hi << 16 | inet->local_port_range.lo;
                break;
+       case IP_PROTOCOL:
+               val = inet_sk(sk)->inet_num;
+               break;
        default:
                sockopt_release_sock(sk);
                return -ENOPROTOOPT;
index ff712bf..eadf1c9 100644 (file)
@@ -532,6 +532,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
        }
 
        ipcm_init_sk(&ipc, inet);
+       /* Keep backward compat */
+       if (hdrincl)
+               ipc.protocol = IPPROTO_RAW;
 
        if (msg->msg_controllen) {
                err = ip_cmsg_send(sk, msg, &ipc, false);
@@ -599,7 +602,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
        flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos,
                           RT_SCOPE_UNIVERSE,
-                          hdrincl ? IPPROTO_RAW : sk->sk_protocol,
+                          hdrincl ? ipc.protocol : sk->sk_protocol,
                           inet_sk_flowi_flags(sk) |
                            (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
                           daddr, saddr, 0, 0, sk->sk_uid);
index 40fe70f..88dfe51 100644 (file)
@@ -34,8 +34,8 @@ static int ip_ttl_min = 1;
 static int ip_ttl_max = 255;
 static int tcp_syn_retries_min = 1;
 static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
-static int ip_ping_group_range_min[] = { 0, 0 };
-static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+static unsigned long ip_ping_group_range_min[] = { 0, 0 };
+static unsigned long ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
 static u32 u32_max_div_HZ = UINT_MAX / HZ;
 static int one_day_secs = 24 * 3600;
 static u32 fib_multipath_hash_fields_all_mask __maybe_unused =
@@ -165,7 +165,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
 {
        struct user_namespace *user_ns = current_user_ns();
        int ret;
-       gid_t urange[2];
+       unsigned long urange[2];
        kgid_t low, high;
        struct ctl_table tmp = {
                .data = &urange,
@@ -178,7 +178,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
        inet_get_ping_group_range_table(table, &low, &high);
        urange[0] = from_kgid_munged(user_ns, low);
        urange[1] = from_kgid_munged(user_ns, high);
-       ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+       ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
 
        if (write && ret == 0) {
                low = make_kgid(user_ns, urange[0]);
index 4d6392c..8d20d92 100644 (file)
@@ -1571,7 +1571,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
  * calculation of whether or not we must ACK for the sake of
  * a window update.
  */
-static void __tcp_cleanup_rbuf(struct sock *sk, int copied)
+void __tcp_cleanup_rbuf(struct sock *sk, int copied)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        bool time_to_ack = false;
@@ -1773,7 +1773,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
                WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
                tcp_flags = TCP_SKB_CB(skb)->tcp_flags;
                used = recv_actor(sk, skb);
-               consume_skb(skb);
                if (used < 0) {
                        if (!copied)
                                copied = used;
@@ -1787,14 +1786,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
                        break;
                }
        }
-       WRITE_ONCE(tp->copied_seq, seq);
-
-       tcp_rcv_space_adjust(sk);
-
-       /* Clean up data we have read: This will do ACK frames. */
-       if (copied > 0)
-               __tcp_cleanup_rbuf(sk, copied);
-
        return copied;
 }
 EXPORT_SYMBOL(tcp_read_skb);
@@ -3090,6 +3081,12 @@ int tcp_disconnect(struct sock *sk, int flags)
        int old_state = sk->sk_state;
        u32 seq;
 
+       /* Deny disconnect if other threads are blocked in sk_wait_event()
+        * or inet_wait_for_connect().
+        */
+       if (sk->sk_wait_pending)
+               return -EBUSY;
+
        if (old_state != TCP_CLOSE)
                tcp_set_state(sk, TCP_CLOSE);
 
@@ -4081,7 +4078,8 @@ int do_tcp_getsockopt(struct sock *sk, int level,
        switch (optname) {
        case TCP_MAXSEG:
                val = tp->mss_cache;
-               if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
+               if (tp->rx_opt.user_mss &&
+                   ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
                        val = tp->rx_opt.user_mss;
                if (tp->repair)
                        val = tp->rx_opt.mss_clamp;
index 2e95474..5f93918 100644 (file)
 #include <net/inet_common.h>
 #include <net/tls.h>
 
+void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
+{
+       struct tcp_sock *tcp;
+       int copied;
+
+       if (!skb || !skb->len || !sk_is_tcp(sk))
+               return;
+
+       if (skb_bpf_strparser(skb))
+               return;
+
+       tcp = tcp_sk(sk);
+       copied = tcp->copied_seq + skb->len;
+       WRITE_ONCE(tcp->copied_seq, copied);
+       tcp_rcv_space_adjust(sk);
+       __tcp_cleanup_rbuf(sk, skb->len);
+}
+
 static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
                           struct sk_msg *msg, u32 apply_bytes, int flags)
 {
@@ -174,14 +192,34 @@ static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
        return ret;
 }
 
+static bool is_next_msg_fin(struct sk_psock *psock)
+{
+       struct scatterlist *sge;
+       struct sk_msg *msg_rx;
+       int i;
+
+       msg_rx = sk_psock_peek_msg(psock);
+       i = msg_rx->sg.start;
+       sge = sk_msg_elem(msg_rx, i);
+       if (!sge->length) {
+               struct sk_buff *skb = msg_rx->skb;
+
+               if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+                       return true;
+       }
+       return false;
+}
+
 static int tcp_bpf_recvmsg_parser(struct sock *sk,
                                  struct msghdr *msg,
                                  size_t len,
                                  int flags,
                                  int *addr_len)
 {
+       struct tcp_sock *tcp = tcp_sk(sk);
+       u32 seq = tcp->copied_seq;
        struct sk_psock *psock;
-       int copied;
+       int copied = 0;
 
        if (unlikely(flags & MSG_ERRQUEUE))
                return inet_recv_error(sk, msg, len, addr_len);
@@ -194,8 +232,43 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
                return tcp_recvmsg(sk, msg, len, flags, addr_len);
 
        lock_sock(sk);
+
+       /* We may have received data on the sk_receive_queue pre-accept and
+        * then we can not use read_skb in this context because we haven't
+        * assigned a sk_socket yet so have no link to the ops. The work-around
+        * is to check the sk_receive_queue and in these cases read skbs off
+        * queue again. The read_skb hook is not running at this point because
+        * of lock_sock so we avoid having multiple runners in read_skb.
+        */
+       if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
+               tcp_data_ready(sk);
+               /* This handles the ENOMEM errors if we both receive data
+                * pre accept and are already under memory pressure. At least
+                * let user know to retry.
+                */
+               if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
+                       copied = -EAGAIN;
+                       goto out;
+               }
+       }
+
 msg_bytes_ready:
        copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
+       /* The typical case for EFAULT is the socket was gracefully
+        * shutdown with a FIN pkt. So check here the other case is
+        * some error on copy_page_to_iter which would be unexpected.
+        * On fin return correct return code to zero.
+        */
+       if (copied == -EFAULT) {
+               bool is_fin = is_next_msg_fin(psock);
+
+               if (is_fin) {
+                       copied = 0;
+                       seq++;
+                       goto out;
+               }
+       }
+       seq += copied;
        if (!copied) {
                long timeo;
                int data;
@@ -233,6 +306,10 @@ msg_bytes_ready:
                copied = -EAGAIN;
        }
 out:
+       WRITE_ONCE(tcp->copied_seq, seq);
+       tcp_rcv_space_adjust(sk);
+       if (copied > 0)
+               __tcp_cleanup_rbuf(sk, copied);
        release_sock(sk);
        sk_psock_put(sk, psock);
        return copied;
index 61b6710..bf8b222 100644 (file)
@@ -4530,7 +4530,7 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
        }
 }
 
-static void tcp_sack_compress_send_ack(struct sock *sk)
+void tcp_sack_compress_send_ack(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
index 39bda2b..06d2573 100644 (file)
@@ -829,6 +829,9 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
                                   inet_twsk(sk)->tw_priority : sk->sk_priority;
                transmit_time = tcp_transmit_time(sk);
                xfrm_sk_clone_policy(ctl_sk, sk);
+       } else {
+               ctl_sk->sk_mark = 0;
+               ctl_sk->sk_priority = 0;
        }
        ip_send_unicast_reply(ctl_sk,
                              skb, &TCP_SKB_CB(skb)->header.h4.opt,
@@ -836,7 +839,6 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
                              &arg, arg.iov[0].iov_len,
                              transmit_time);
 
-       ctl_sk->sk_mark = 0;
        xfrm_sk_free_policy(ctl_sk);
        sock_net_set(ctl_sk, &init_net);
        __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
@@ -935,7 +937,6 @@ static void tcp_v4_send_ack(const struct sock *sk,
                              &arg, arg.iov[0].iov_len,
                              transmit_time);
 
-       ctl_sk->sk_mark = 0;
        sock_net_set(ctl_sk, &init_net);
        __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
        local_bh_enable();
index 45dda78..4851211 100644 (file)
@@ -60,12 +60,12 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
        struct tcphdr *th;
        unsigned int thlen;
        unsigned int seq;
-       __be32 delta;
        unsigned int oldlen;
        unsigned int mss;
        struct sk_buff *gso_skb = skb;
        __sum16 newcheck;
        bool ooo_okay, copy_destructor;
+       __wsum delta;
 
        th = tcp_hdr(skb);
        thlen = th->doff * 4;
@@ -75,7 +75,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
        if (!pskb_may_pull(skb, thlen))
                goto out;
 
-       oldlen = (u16)~skb->len;
+       oldlen = ~skb->len;
        __skb_pull(skb, thlen);
 
        mss = skb_shinfo(skb)->gso_size;
@@ -110,7 +110,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
        if (skb_is_gso(segs))
                mss *= skb_shinfo(segs)->gso_segs;
 
-       delta = htonl(oldlen + (thlen + mss));
+       delta = (__force __wsum)htonl(oldlen + thlen + mss);
 
        skb = segs;
        th = tcp_hdr(skb);
@@ -119,8 +119,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
        if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
                tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
 
-       newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
-                                              (__force u32)delta));
+       newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
 
        while (skb->next) {
                th->fin = th->psh = 0;
@@ -165,11 +164,11 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
                        WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
        }
 
-       delta = htonl(oldlen + (skb_tail_pointer(skb) -
-                               skb_transport_header(skb)) +
-                     skb->data_len);
-       th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
-                               (__force u32)delta));
+       delta = (__force __wsum)htonl(oldlen +
+                                     (skb_tail_pointer(skb) -
+                                      skb_transport_header(skb)) +
+                                     skb->data_len);
+       th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
        if (skb->ip_summed == CHECKSUM_PARTIAL)
                gso_reset_checksum(skb, ~th->check);
        else
index b839c2f..39eb947 100644 (file)
@@ -290,9 +290,19 @@ static int tcp_write_timeout(struct sock *sk)
 void tcp_delack_timer_handler(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct tcp_sock *tp = tcp_sk(sk);
 
-       if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
-           !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
+       if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
+               return;
+
+       /* Handling the sack compression case */
+       if (tp->compressed_ack) {
+               tcp_mstamp_refresh(tp);
+               tcp_sack_compress_send_ack(sk);
+               return;
+       }
+
+       if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
                return;
 
        if (time_after(icsk->icsk_ack.timeout, jiffies)) {
@@ -312,7 +322,7 @@ void tcp_delack_timer_handler(struct sock *sk)
                        inet_csk_exit_pingpong_mode(sk);
                        icsk->icsk_ack.ato      = TCP_ATO_MIN;
                }
-               tcp_mstamp_refresh(tcp_sk(sk));
+               tcp_mstamp_refresh(tp);
                tcp_send_ack(sk);
                __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
        }
index aa32afd..9482def 100644 (file)
@@ -1818,7 +1818,7 @@ EXPORT_SYMBOL(__skb_recv_udp);
 int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
 {
        struct sk_buff *skb;
-       int err, copied;
+       int err;
 
 try_again:
        skb = skb_recv_udp(sk, MSG_DONTWAIT, &err);
@@ -1837,10 +1837,7 @@ try_again:
        }
 
        WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
-       copied = recv_actor(sk, skb);
-       kfree_skb(skb);
-
-       return copied;
+       return recv_actor(sk, skb);
 }
 EXPORT_SYMBOL(udp_read_skb);
 
index e0c9cc3..56d94d2 100644 (file)
@@ -64,6 +64,8 @@ struct proto  udplite_prot = {
        .per_cpu_fw_alloc  = &udp_memory_per_cpu_fw_alloc,
 
        .sysctl_mem        = sysctl_udp_mem,
+       .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
+       .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
        .obj_size          = sizeof(struct udp_sock),
        .h.udp_table       = &udplite_table,
 };
index a8d961d..5fa0e37 100644 (file)
@@ -569,24 +569,6 @@ looped_back:
                return -1;
        }
 
-       if (skb_cloned(skb)) {
-               if (pskb_expand_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE, 0,
-                                    GFP_ATOMIC)) {
-                       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-                                       IPSTATS_MIB_OUTDISCARDS);
-                       kfree_skb(skb);
-                       return -1;
-               }
-       } else {
-               err = skb_cow_head(skb, IPV6_RPL_SRH_WORST_SWAP_SIZE);
-               if (unlikely(err)) {
-                       kfree_skb(skb);
-                       return -1;
-               }
-       }
-
-       hdr = (struct ipv6_rpl_sr_hdr *)skb_transport_header(skb);
-
        if (!pskb_may_pull(skb, ipv6_rpl_srh_size(n, hdr->cmpri,
                                                  hdr->cmpre))) {
                kfree_skb(skb);
@@ -630,6 +612,17 @@ looped_back:
        skb_pull(skb, ((hdr->hdrlen + 1) << 3));
        skb_postpull_rcsum(skb, oldhdr,
                           sizeof(struct ipv6hdr) + ((hdr->hdrlen + 1) << 3));
+       if (unlikely(!hdr->segments_left)) {
+               if (pskb_expand_head(skb, sizeof(struct ipv6hdr) + ((chdr->hdrlen + 1) << 3), 0,
+                                    GFP_ATOMIC)) {
+                       __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTDISCARDS);
+                       kfree_skb(skb);
+                       kfree(buf);
+                       return -1;
+               }
+
+               oldhdr = ipv6_hdr(skb);
+       }
        skb_push(skb, ((chdr->hdrlen + 1) << 3) + sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        skb_mac_header_rebuild(skb);
index da46c42..49e31e4 100644 (file)
@@ -143,6 +143,8 @@ int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type)
                        optlen = 1;
                        break;
                default:
+                       if (len < 2)
+                               goto bad;
                        optlen = nh[offset + 1] + 2;
                        if (optlen > len)
                                goto bad;
index 2438da5..bac768d 100644 (file)
@@ -2491,7 +2491,7 @@ static int ipv6_route_native_seq_show(struct seq_file *seq, void *v)
        const struct net_device *dev;
 
        if (rt->nh)
-               fib6_nh = nexthop_fib6_nh_bh(rt->nh);
+               fib6_nh = nexthop_fib6_nh(rt->nh);
 
        seq_printf(seq, "%pi6 %02x ", &rt->fib6_dst.addr, rt->fib6_dst.plen);
 
@@ -2556,14 +2556,14 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
 
        if (tbl) {
                h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1;
-               node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist));
+               node = rcu_dereference(hlist_next_rcu(&tbl->tb6_hlist));
        } else {
                h = 0;
                node = NULL;
        }
 
        while (!node && h < FIB6_TABLE_HASHSZ) {
-               node = rcu_dereference_bh(
+               node = rcu_dereference(
                        hlist_first_rcu(&net->ipv6.fib_table_hash[h++]));
        }
        return hlist_entry_safe(node, struct fib6_table, tb6_hlist);
@@ -2593,7 +2593,7 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        if (!v)
                goto iter_table;
 
-       n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
+       n = rcu_dereference(((struct fib6_info *)v)->fib6_next);
        if (n)
                return n;
 
@@ -2619,12 +2619,12 @@ iter_table:
 }
 
 static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(RCU_BH)
+       __acquires(RCU)
 {
        struct net *net = seq_file_net(seq);
        struct ipv6_route_iter *iter = seq->private;
 
-       rcu_read_lock_bh();
+       rcu_read_lock();
        iter->tbl = ipv6_route_seq_next_table(NULL, net);
        iter->skip = *pos;
 
@@ -2645,7 +2645,7 @@ static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
 }
 
 static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v)
-       __releases(RCU_BH)
+       __releases(RCU)
 {
        struct net *net = seq_file_net(seq);
        struct ipv6_route_iter *iter = seq->private;
@@ -2653,7 +2653,7 @@ static void ipv6_route_native_seq_stop(struct seq_file *seq, void *v)
        if (ipv6_route_iter_active(iter))
                fib6_walker_unlink(net, &iter->w);
 
-       rcu_read_unlock_bh();
+       rcu_read_unlock();
 }
 
 #if IS_BUILTIN(CONFIG_IPV6) && defined(CONFIG_BPF_SYSCALL)
index a4ecfc9..da80974 100644 (file)
@@ -1015,12 +1015,14 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                                            ntohl(tun_id),
                                            ntohl(md->u.index), truncate,
                                            false);
+                       proto = htons(ETH_P_ERSPAN);
                } else if (md->version == 2) {
                        erspan_build_header_v2(skb,
                                               ntohl(tun_id),
                                               md->u.md2.dir,
                                               get_hwid(&md->u.md2),
                                               truncate, false);
+                       proto = htons(ETH_P_ERSPAN2);
                } else {
                        goto tx_err;
                }
@@ -1043,24 +1045,25 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                        break;
                }
 
-               if (t->parms.erspan_ver == 1)
+               if (t->parms.erspan_ver == 1) {
                        erspan_build_header(skb, ntohl(t->parms.o_key),
                                            t->parms.index,
                                            truncate, false);
-               else if (t->parms.erspan_ver == 2)
+                       proto = htons(ETH_P_ERSPAN);
+               } else if (t->parms.erspan_ver == 2) {
                        erspan_build_header_v2(skb, ntohl(t->parms.o_key),
                                               t->parms.dir,
                                               t->parms.hwid,
                                               truncate, false);
-               else
+                       proto = htons(ETH_P_ERSPAN2);
+               } else {
                        goto tx_err;
+               }
 
                fl6.daddr = t->parms.raddr;
        }
 
        /* Push GRE header. */
-       proto = (t->parms.erspan_ver == 1) ? htons(ETH_P_ERSPAN)
-                                          : htons(ETH_P_ERSPAN2);
        gre_build_header(skb, 8, TUNNEL_SEQ, proto, 0, htonl(atomic_fetch_inc(&t->o_seqno)));
 
        /* TooBig packet may have updated dst->dev's mtu */
index 7d0adb6..44ee7a2 100644 (file)
@@ -793,7 +793,8 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 
                if (!proto)
                        proto = inet->inet_num;
-               else if (proto != inet->inet_num)
+               else if (proto != inet->inet_num &&
+                        inet->inet_num != IPPROTO_RAW)
                        return -EINVAL;
 
                if (proto > 255)
index e3aec46..392aaa3 100644 (file)
@@ -6412,9 +6412,9 @@ static struct ctl_table ipv6_route_table_template[] = {
        {
                .procname       =       "skip_notify_on_dev_down",
                .data           =       &init_net.ipv6.sysctl.skip_notify_on_dev_down,
-               .maxlen         =       sizeof(int),
+               .maxlen         =       sizeof(u8),
                .mode           =       0644,
-               .proc_handler   =       proc_dointvec_minmax,
+               .proc_handler   =       proc_dou8vec_minmax,
                .extra1         =       SYSCTL_ZERO,
                .extra2         =       SYSCTL_ONE,
        },
index 67eaf3c..3bab0cc 100644 (file)
@@ -60,6 +60,8 @@ struct proto udplitev6_prot = {
        .per_cpu_fw_alloc  = &udp_memory_per_cpu_fw_alloc,
 
        .sysctl_mem        = sysctl_udp_mem,
+       .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_udp_wmem_min),
+       .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_udp_rmem_min),
        .obj_size          = sizeof(struct udp6_sock),
        .h.udp_table       = &udplite_table,
 };
index a815f5a..31ab12f 100644 (file)
@@ -1940,7 +1940,8 @@ static u32 gen_reqid(struct net *net)
 }
 
 static int
-parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
+parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_policy *pol,
+                  struct sadb_x_ipsecrequest *rq)
 {
        struct net *net = xp_net(xp);
        struct xfrm_tmpl *t = xp->xfrm_vec + xp->xfrm_nr;
@@ -1958,9 +1959,12 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
        if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0)
                return -EINVAL;
        t->mode = mode;
-       if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE)
+       if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) {
+               if ((mode == XFRM_MODE_TUNNEL || mode == XFRM_MODE_BEET) &&
+                   pol->sadb_x_policy_dir == IPSEC_DIR_OUTBOUND)
+                       return -EINVAL;
                t->optional = 1;
-       else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
+       else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) {
                t->reqid = rq->sadb_x_ipsecrequest_reqid;
                if (t->reqid > IPSEC_MANUAL_REQID_MAX)
                        t->reqid = 0;
@@ -2002,7 +2006,7 @@ parse_ipsecrequests(struct xfrm_policy *xp, struct sadb_x_policy *pol)
                    rq->sadb_x_ipsecrequest_len < sizeof(*rq))
                        return -EINVAL;
 
-               if ((err = parse_ipsecrequest(xp, rq)) < 0)
+               if ((err = parse_ipsecrequest(xp, pol, rq)) < 0)
                        return err;
                len -= rq->sadb_x_ipsecrequest_len;
                rq = (void*)((u8*)rq + rq->sadb_x_ipsecrequest_len);
index 7317e4a..86b2036 100644 (file)
@@ -1578,9 +1578,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
                sdata_dereference(link->u.ap.unsol_bcast_probe_resp,
                                  sdata);
 
-       /* abort any running channel switch */
+       /* abort any running channel switch or color change */
        mutex_lock(&local->mtx);
        link_conf->csa_active = false;
+       link_conf->color_change_active = false;
        if (link->csa_block_tx) {
                ieee80211_wake_vif_queues(local, sdata,
                                          IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -3589,7 +3590,7 @@ void ieee80211_channel_switch_disconnect(struct ieee80211_vif *vif, bool block_t
 EXPORT_SYMBOL(ieee80211_channel_switch_disconnect);
 
 static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
-                                         u32 *changed)
+                                         u64 *changed)
 {
        int err;
 
@@ -3632,7 +3633,7 @@ static int ieee80211_set_after_csa_beacon(struct ieee80211_sub_if_data *sdata,
 static int __ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_local *local = sdata->local;
-       u32 changed = 0;
+       u64 changed = 0;
        int err;
 
        sdata_assert_lock(sdata);
index dbc34fb..77c90ed 100644 (file)
@@ -258,7 +258,8 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata,
 
 static enum nl80211_chan_width
 ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
-                                         struct ieee80211_chanctx_conf *conf)
+                                         struct ieee80211_chanctx *ctx,
+                                         struct ieee80211_link_data *rsvd_for)
 {
        enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
        struct ieee80211_vif *vif = &sdata->vif;
@@ -267,13 +268,14 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
        rcu_read_lock();
        for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
                enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT;
-               struct ieee80211_bss_conf *link_conf =
-                       rcu_dereference(sdata->vif.link_conf[link_id]);
+               struct ieee80211_link_data *link =
+                       rcu_dereference(sdata->link[link_id]);
 
-               if (!link_conf)
+               if (!link)
                        continue;
 
-               if (rcu_access_pointer(link_conf->chanctx_conf) != conf)
+               if (link != rsvd_for &&
+                   rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf)
                        continue;
 
                switch (vif->type) {
@@ -287,7 +289,7 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
                         * point, so take the width from the chandef, but
                         * account also for TDLS peers
                         */
-                       width = max(link_conf->chandef.width,
+                       width = max(link->conf->chandef.width,
                                    ieee80211_get_max_required_bw(sdata, link_id));
                        break;
                case NL80211_IFTYPE_P2P_DEVICE:
@@ -296,7 +298,7 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
                case NL80211_IFTYPE_ADHOC:
                case NL80211_IFTYPE_MESH_POINT:
                case NL80211_IFTYPE_OCB:
-                       width = link_conf->chandef.width;
+                       width = link->conf->chandef.width;
                        break;
                case NL80211_IFTYPE_WDS:
                case NL80211_IFTYPE_UNSPECIFIED:
@@ -316,7 +318,8 @@ ieee80211_get_chanctx_vif_max_required_bw(struct ieee80211_sub_if_data *sdata,
 
 static enum nl80211_chan_width
 ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
-                                     struct ieee80211_chanctx_conf *conf)
+                                     struct ieee80211_chanctx *ctx,
+                                     struct ieee80211_link_data *rsvd_for)
 {
        struct ieee80211_sub_if_data *sdata;
        enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT;
@@ -328,7 +331,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
                if (!ieee80211_sdata_running(sdata))
                        continue;
 
-               width = ieee80211_get_chanctx_vif_max_required_bw(sdata, conf);
+               width = ieee80211_get_chanctx_vif_max_required_bw(sdata, ctx,
+                                                                 rsvd_for);
 
                max_bw = max(max_bw, width);
        }
@@ -336,8 +340,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
        /* use the configured bandwidth in case of monitor interface */
        sdata = rcu_dereference(local->monitor_sdata);
        if (sdata &&
-           rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == conf)
-               max_bw = max(max_bw, conf->def.width);
+           rcu_access_pointer(sdata->vif.bss_conf.chanctx_conf) == &ctx->conf)
+               max_bw = max(max_bw, ctx->conf.def.width);
 
        rcu_read_unlock();
 
@@ -349,8 +353,10 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
  * the max of min required widths of all the interfaces bound to this
  * channel context.
  */
-static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
-                                            struct ieee80211_chanctx *ctx)
+static u32
+_ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
+                                 struct ieee80211_chanctx *ctx,
+                                 struct ieee80211_link_data *rsvd_for)
 {
        enum nl80211_chan_width max_bw;
        struct cfg80211_chan_def min_def;
@@ -370,7 +376,7 @@ static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
                return 0;
        }
 
-       max_bw = ieee80211_get_chanctx_max_required_bw(local, &ctx->conf);
+       max_bw = ieee80211_get_chanctx_max_required_bw(local, ctx, rsvd_for);
 
        /* downgrade chandef up to max_bw */
        min_def = ctx->conf.def;
@@ -448,9 +454,10 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local,
  * channel context.
  */
 void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
-                                     struct ieee80211_chanctx *ctx)
+                                     struct ieee80211_chanctx *ctx,
+                                     struct ieee80211_link_data *rsvd_for)
 {
-       u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx);
+       u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
 
        if (!changed)
                return;
@@ -464,10 +471,11 @@ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
        ieee80211_chan_bw_change(local, ctx, false);
 }
 
-static void ieee80211_change_chanctx(struct ieee80211_local *local,
-                                    struct ieee80211_chanctx *ctx,
-                                    struct ieee80211_chanctx *old_ctx,
-                                    const struct cfg80211_chan_def *chandef)
+static void _ieee80211_change_chanctx(struct ieee80211_local *local,
+                                     struct ieee80211_chanctx *ctx,
+                                     struct ieee80211_chanctx *old_ctx,
+                                     const struct cfg80211_chan_def *chandef,
+                                     struct ieee80211_link_data *rsvd_for)
 {
        u32 changed;
 
@@ -492,7 +500,7 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
        ieee80211_chan_bw_change(local, old_ctx, true);
 
        if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) {
-               ieee80211_recalc_chanctx_min_def(local, ctx);
+               ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
                return;
        }
 
@@ -502,7 +510,7 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
 
        /* check if min chanctx also changed */
        changed = IEEE80211_CHANCTX_CHANGE_WIDTH |
-                 _ieee80211_recalc_chanctx_min_def(local, ctx);
+                 _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for);
        drv_change_chanctx(local, ctx, changed);
 
        if (!local->use_chanctx) {
@@ -514,6 +522,14 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local,
        ieee80211_chan_bw_change(local, old_ctx, false);
 }
 
+static void ieee80211_change_chanctx(struct ieee80211_local *local,
+                                    struct ieee80211_chanctx *ctx,
+                                    struct ieee80211_chanctx *old_ctx,
+                                    const struct cfg80211_chan_def *chandef)
+{
+       _ieee80211_change_chanctx(local, ctx, old_ctx, chandef, NULL);
+}
+
 static struct ieee80211_chanctx *
 ieee80211_find_chanctx(struct ieee80211_local *local,
                       const struct cfg80211_chan_def *chandef,
@@ -638,7 +654,7 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local,
        ctx->conf.rx_chains_dynamic = 1;
        ctx->mode = mode;
        ctx->conf.radar_enabled = false;
-       ieee80211_recalc_chanctx_min_def(local, ctx);
+       _ieee80211_recalc_chanctx_min_def(local, ctx, NULL);
 
        return ctx;
 }
@@ -855,6 +871,9 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link,
        }
 
        if (new_ctx) {
+               /* recalc considering the link we'll use it for now */
+               ieee80211_recalc_chanctx_min_def(local, new_ctx, link);
+
                ret = drv_assign_vif_chanctx(local, sdata, link->conf, new_ctx);
                if (ret)
                        goto out;
@@ -873,12 +892,12 @@ out:
                ieee80211_recalc_chanctx_chantype(local, curr_ctx);
                ieee80211_recalc_smps_chanctx(local, curr_ctx);
                ieee80211_recalc_radar_chanctx(local, curr_ctx);
-               ieee80211_recalc_chanctx_min_def(local, curr_ctx);
+               ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL);
        }
 
        if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) {
                ieee80211_recalc_txpower(sdata, false);
-               ieee80211_recalc_chanctx_min_def(local, new_ctx);
+               ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL);
        }
 
        if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
@@ -1270,7 +1289,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
 
        ieee80211_link_update_chandef(link, &link->reserved_chandef);
 
-       ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef);
+       _ieee80211_change_chanctx(local, new_ctx, old_ctx, chandef, link);
 
        vif_chsw[0].vif = &sdata->vif;
        vif_chsw[0].old_ctx = &old_ctx->conf;
@@ -1300,7 +1319,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link)
        if (ieee80211_chanctx_refcount(local, old_ctx) == 0)
                ieee80211_free_chanctx(local, old_ctx);
 
-       ieee80211_recalc_chanctx_min_def(local, new_ctx);
+       ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL);
        ieee80211_recalc_smps_chanctx(local, new_ctx);
        ieee80211_recalc_radar_chanctx(local, new_ctx);
 
@@ -1665,7 +1684,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
                ieee80211_recalc_chanctx_chantype(local, ctx);
                ieee80211_recalc_smps_chanctx(local, ctx);
                ieee80211_recalc_radar_chanctx(local, ctx);
-               ieee80211_recalc_chanctx_min_def(local, ctx);
+               ieee80211_recalc_chanctx_min_def(local, ctx, NULL);
 
                list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links,
                                         reserved_chanctx_list) {
index 729f261..0322aba 100644 (file)
@@ -3,7 +3,7 @@
  * HE handling
  *
  * Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 - 2022 Intel Corporation
+ * Copyright(c) 2019 - 2023 Intel Corporation
  */
 
 #include "ieee80211_i.h"
@@ -114,6 +114,7 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
                                  struct link_sta_info *link_sta)
 {
        struct ieee80211_sta_he_cap *he_cap = &link_sta->pub->he_cap;
+       const struct ieee80211_sta_he_cap *own_he_cap_ptr;
        struct ieee80211_sta_he_cap own_he_cap;
        struct ieee80211_he_cap_elem *he_cap_ie_elem = (void *)he_cap_ie;
        u8 he_ppe_size;
@@ -123,12 +124,16 @@ ieee80211_he_cap_ie_to_sta_he_cap(struct ieee80211_sub_if_data *sdata,
 
        memset(he_cap, 0, sizeof(*he_cap));
 
-       if (!he_cap_ie ||
-           !ieee80211_get_he_iftype_cap(sband,
-                                        ieee80211_vif_type_p2p(&sdata->vif)))
+       if (!he_cap_ie)
                return;
 
-       own_he_cap = sband->iftype_data->he_cap;
+       own_he_cap_ptr =
+               ieee80211_get_he_iftype_cap(sband,
+                                           ieee80211_vif_type_p2p(&sdata->vif));
+       if (!own_he_cap_ptr)
+               return;
+
+       own_he_cap = *own_he_cap_ptr;
 
        /* Make sure size is OK */
        mcs_nss_size = ieee80211_he_mcs_nss_size(he_cap_ie_elem);
index a0a7839..b0372e7 100644 (file)
@@ -2537,7 +2537,8 @@ int ieee80211_chanctx_refcount(struct ieee80211_local *local,
 void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local,
                                   struct ieee80211_chanctx *chanctx);
 void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local,
-                                     struct ieee80211_chanctx *ctx);
+                                     struct ieee80211_chanctx *ctx,
+                                     struct ieee80211_link_data *rsvd_for);
 bool ieee80211_is_radar_required(struct ieee80211_local *local);
 
 void ieee80211_dfs_cac_timer(unsigned long data);
index e13a035..bd8d6f9 100644 (file)
@@ -1217,6 +1217,7 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
                                               const u16 *inner)
 {
        unsigned int skb_len = skb->len;
+       bool at_extension = false;
        bool added = false;
        int i, j;
        u8 *len, *list_len = NULL;
@@ -1228,7 +1229,6 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
        for (i = 0; i < PRESENT_ELEMS_MAX && outer[i]; i++) {
                u16 elem = outer[i];
                bool have_inner = false;
-               bool at_extension = false;
 
                /* should at least be sorted in the sense of normal -> ext */
                WARN_ON(at_extension && elem < PRESENT_ELEM_EXT_OFFS);
@@ -1257,8 +1257,14 @@ static void ieee80211_add_non_inheritance_elem(struct sk_buff *skb,
                }
                *list_len += 1;
                skb_put_u8(skb, (u8)elem);
+               added = true;
        }
 
+       /* if we added a list but no extension list, make a zero-len one */
+       if (added && (!at_extension || !list_len))
+               skb_put_u8(skb, 0);
+
+       /* if nothing added remove extension element completely */
        if (!added)
                skb_trim(skb, skb_len);
        else
index 58222c0..d996aa2 100644 (file)
@@ -4965,7 +4965,9 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
        }
 
        if (unlikely(rx->sta && rx->sta->sta.mlo) &&
-           is_unicast_ether_addr(hdr->addr1)) {
+           is_unicast_ether_addr(hdr->addr1) &&
+           !ieee80211_is_probe_resp(hdr->frame_control) &&
+           !ieee80211_is_beacon(hdr->frame_control)) {
                /* translate to MLD addresses */
                if (ether_addr_equal(link->conf->addr, hdr->addr1))
                        ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
index de5d69f..db0d013 100644 (file)
@@ -67,7 +67,7 @@
                        __entry->min_freq_offset = (c)->chan ? (c)->chan->freq_offset : 0;      \
                        __entry->min_chan_width = (c)->width;                           \
                        __entry->min_center_freq1 = (c)->center_freq1;                  \
-                       __entry->freq1_offset = (c)->freq1_offset;                      \
+                       __entry->min_freq1_offset = (c)->freq1_offset;                  \
                        __entry->min_center_freq2 = (c)->center_freq2;
 #define MIN_CHANDEF_PR_FMT     " min_control:%d.%03d MHz min_width:%d min_center: %d.%03d/%d MHz"
 #define MIN_CHANDEF_PR_ARG     __entry->min_control_freq, __entry->min_freq_offset,    \
index 1a33274..7f1c7f6 100644 (file)
@@ -3791,6 +3791,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
        ieee80211_tx_result r;
        struct ieee80211_vif *vif = txq->vif;
        int q = vif->hw_queue[txq->ac];
+       unsigned long flags;
        bool q_stopped;
 
        WARN_ON_ONCE(softirq_count() == 0);
@@ -3799,9 +3800,9 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
                return NULL;
 
 begin:
-       spin_lock(&local->queue_stop_reason_lock);
+       spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
        q_stopped = local->queue_stop_reasons[q];
-       spin_unlock(&local->queue_stop_reason_lock);
+       spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
 
        if (unlikely(q_stopped)) {
                /* mark for waking later */
@@ -5527,7 +5528,7 @@ ieee80211_beacon_get_template_ema_list(struct ieee80211_hw *hw,
 {
        struct ieee80211_ema_beacons *ema_beacons = NULL;
 
-       WARN_ON(__ieee80211_beacon_get(hw, vif, NULL, false, link_id, 0,
+       WARN_ON(__ieee80211_beacon_get(hw, vif, NULL, true, link_id, 0,
                                       &ema_beacons));
 
        return ema_beacons;
index 1527d6a..4bf7615 100644 (file)
@@ -3015,7 +3015,7 @@ void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata,
 
                chanctx = container_of(chanctx_conf, struct ieee80211_chanctx,
                                       conf);
-               ieee80211_recalc_chanctx_min_def(local, chanctx);
+               ieee80211_recalc_chanctx_min_def(local, chanctx, NULL);
        }
  unlock:
        mutex_unlock(&local->chanctx_mtx);
index 78c9245..76612bc 100644 (file)
@@ -87,8 +87,15 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
        unsigned int subflows_max;
        int ret = 0;
 
-       if (mptcp_pm_is_userspace(msk))
-               return mptcp_userspace_pm_active(msk);
+       if (mptcp_pm_is_userspace(msk)) {
+               if (mptcp_userspace_pm_active(msk)) {
+                       spin_lock_bh(&pm->lock);
+                       pm->subflows++;
+                       spin_unlock_bh(&pm->lock);
+                       return true;
+               }
+               return false;
+       }
 
        subflows_max = mptcp_pm_get_subflows_max(msk);
 
@@ -181,8 +188,16 @@ void mptcp_pm_subflow_check_next(struct mptcp_sock *msk, const struct sock *ssk,
        struct mptcp_pm_data *pm = &msk->pm;
        bool update_subflows;
 
-       update_subflows = (subflow->request_join || subflow->mp_join) &&
-                         mptcp_pm_is_kernel(msk);
+       update_subflows = subflow->request_join || subflow->mp_join;
+       if (mptcp_pm_is_userspace(msk)) {
+               if (update_subflows) {
+                       spin_lock_bh(&pm->lock);
+                       pm->subflows--;
+                       spin_unlock_bh(&pm->lock);
+               }
+               return;
+       }
+
        if (!READ_ONCE(pm->work_pending) && !update_subflows)
                return;
 
index bc343da..59f8f31 100644 (file)
@@ -1558,6 +1558,24 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
        return ret;
 }
 
+void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list)
+{
+       struct mptcp_rm_list alist = { .nr = 0 };
+       struct mptcp_pm_addr_entry *entry;
+
+       list_for_each_entry(entry, rm_list, list) {
+               remove_anno_list_by_saddr(msk, &entry->addr);
+               if (alist.nr < MPTCP_RM_IDS_MAX)
+                       alist.ids[alist.nr++] = entry->addr.id;
+       }
+
+       if (alist.nr) {
+               spin_lock_bh(&msk->pm.lock);
+               mptcp_pm_remove_addr(msk, &alist);
+               spin_unlock_bh(&msk->pm.lock);
+       }
+}
+
 void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
                                        struct list_head *rm_list)
 {
index 27a2758..b06aa58 100644 (file)
@@ -69,6 +69,7 @@ static int mptcp_userspace_pm_append_new_local_addr(struct mptcp_sock *msk,
                                                        MPTCP_PM_MAX_ADDR_ID + 1,
                                                        1);
                list_add_tail_rcu(&e->list, &msk->pm.userspace_pm_local_addr_list);
+               msk->pm.local_addr_used++;
                ret = e->addr.id;
        } else if (match) {
                ret = entry->addr.id;
@@ -79,6 +80,31 @@ append_err:
        return ret;
 }
 
+/* If the subflow is closed from the other peer (not via a
+ * subflow destroy command then), we want to keep the entry
+ * not to assign the same ID to another address and to be
+ * able to send RM_ADDR after the removal of the subflow.
+ */
+static int mptcp_userspace_pm_delete_local_addr(struct mptcp_sock *msk,
+                                               struct mptcp_pm_addr_entry *addr)
+{
+       struct mptcp_pm_addr_entry *entry, *tmp;
+
+       list_for_each_entry_safe(entry, tmp, &msk->pm.userspace_pm_local_addr_list, list) {
+               if (mptcp_addresses_equal(&entry->addr, &addr->addr, false)) {
+                       /* TODO: a refcount is needed because the entry can
+                        * be used multiple times (e.g. fullmesh mode).
+                        */
+                       list_del_rcu(&entry->list);
+                       kfree(entry);
+                       msk->pm.local_addr_used--;
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+
 int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk,
                                                   unsigned int id,
                                                   u8 *flags, int *ifindex)
@@ -171,6 +197,7 @@ int mptcp_nl_cmd_announce(struct sk_buff *skb, struct genl_info *info)
        spin_lock_bh(&msk->pm.lock);
 
        if (mptcp_pm_alloc_anno_list(msk, &addr_val)) {
+               msk->pm.add_addr_signaled++;
                mptcp_pm_announce_addr(msk, &addr_val.addr, false);
                mptcp_pm_nl_addr_send_ack(msk);
        }
@@ -232,7 +259,7 @@ int mptcp_nl_cmd_remove(struct sk_buff *skb, struct genl_info *info)
 
        list_move(&match->list, &free_list);
 
-       mptcp_pm_remove_addrs_and_subflows(msk, &free_list);
+       mptcp_pm_remove_addrs(msk, &free_list);
 
        release_sock((struct sock *)msk);
 
@@ -251,6 +278,7 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
        struct nlattr *raddr = info->attrs[MPTCP_PM_ATTR_ADDR_REMOTE];
        struct nlattr *token = info->attrs[MPTCP_PM_ATTR_TOKEN];
        struct nlattr *laddr = info->attrs[MPTCP_PM_ATTR_ADDR];
+       struct mptcp_pm_addr_entry local = { 0 };
        struct mptcp_addr_info addr_r;
        struct mptcp_addr_info addr_l;
        struct mptcp_sock *msk;
@@ -302,12 +330,26 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
                goto create_err;
        }
 
+       local.addr = addr_l;
+       err = mptcp_userspace_pm_append_new_local_addr(msk, &local);
+       if (err < 0) {
+               GENL_SET_ERR_MSG(info, "did not match address and id");
+               goto create_err;
+       }
+
        lock_sock(sk);
 
        err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
 
        release_sock(sk);
 
+       spin_lock_bh(&msk->pm.lock);
+       if (err)
+               mptcp_userspace_pm_delete_local_addr(msk, &local);
+       else
+               msk->pm.subflows++;
+       spin_unlock_bh(&msk->pm.lock);
+
  create_err:
        sock_put((struct sock *)msk);
        return err;
@@ -420,7 +462,11 @@ int mptcp_nl_cmd_sf_destroy(struct sk_buff *skb, struct genl_info *info)
        ssk = mptcp_nl_find_ssk(msk, &addr_l, &addr_r);
        if (ssk) {
                struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+               struct mptcp_pm_addr_entry entry = { .addr = addr_l };
 
+               spin_lock_bh(&msk->pm.lock);
+               mptcp_userspace_pm_delete_local_addr(msk, &entry);
+               spin_unlock_bh(&msk->pm.lock);
                mptcp_subflow_shutdown(sk, ssk, RCV_SHUTDOWN | SEND_SHUTDOWN);
                mptcp_close_ssk(sk, ssk, subflow);
                MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMSUBFLOW);
index 08dc53f..67311e7 100644 (file)
@@ -90,8 +90,8 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
        if (err)
                return err;
 
-       msk->first = ssock->sk;
-       msk->subflow = ssock;
+       WRITE_ONCE(msk->first, ssock->sk);
+       WRITE_ONCE(msk->subflow, ssock);
        subflow = mptcp_subflow_ctx(ssock->sk);
        list_add(&subflow->node, &msk->conn_list);
        sock_hold(ssock->sk);
@@ -603,7 +603,7 @@ static bool mptcp_check_data_fin(struct sock *sk)
                WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
                WRITE_ONCE(msk->rcv_data_fin, 0);
 
-               sk->sk_shutdown |= RCV_SHUTDOWN;
+               WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
                smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
 
                switch (sk->sk_state) {
@@ -825,6 +825,13 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
        mptcp_data_unlock(sk);
 }
 
+static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk)
+{
+       mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq);
+       WRITE_ONCE(msk->allow_infinite_fallback, false);
+       mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
+}
+
 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
 {
        struct sock *sk = (struct sock *)msk;
@@ -839,6 +846,7 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
                mptcp_sock_graft(ssk, sk->sk_socket);
 
        mptcp_sockopt_sync_locked(msk, ssk);
+       mptcp_subflow_joined(msk, ssk);
        return true;
 }
 
@@ -910,7 +918,7 @@ static void mptcp_check_for_eof(struct mptcp_sock *msk)
                /* hopefully temporary hack: propagate shutdown status
                 * to msk, when all subflows agree on it
                 */
-               sk->sk_shutdown |= RCV_SHUTDOWN;
+               WRITE_ONCE(sk->sk_shutdown, sk->sk_shutdown | RCV_SHUTDOWN);
 
                smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
                sk->sk_data_ready(sk);
@@ -1702,7 +1710,6 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
 
        lock_sock(ssk);
        msg->msg_flags |= MSG_DONTWAIT;
-       msk->connect_flags = O_NONBLOCK;
        msk->fastopening = 1;
        ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL);
        msk->fastopening = 0;
@@ -2283,7 +2290,7 @@ static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk)
 {
        if (msk->subflow) {
                iput(SOCK_INODE(msk->subflow));
-               msk->subflow = NULL;
+               WRITE_ONCE(msk->subflow, NULL);
        }
 }
 
@@ -2420,7 +2427,7 @@ out_release:
        sock_put(ssk);
 
        if (ssk == msk->first)
-               msk->first = NULL;
+               WRITE_ONCE(msk->first, NULL);
 
 out:
        if (ssk == msk->last_snd)
@@ -2527,7 +2534,7 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
        }
 
        inet_sk_state_store(sk, TCP_CLOSE);
-       sk->sk_shutdown = SHUTDOWN_MASK;
+       WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
        smp_mb__before_atomic(); /* SHUTDOWN must be visible first */
        set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
 
@@ -2721,7 +2728,7 @@ static int __mptcp_init_sock(struct sock *sk)
        WRITE_ONCE(msk->rmem_released, 0);
        msk->timer_ival = TCP_RTO_MIN;
 
-       msk->first = NULL;
+       WRITE_ONCE(msk->first, NULL);
        inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
        WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
        WRITE_ONCE(msk->allow_infinite_fallback, true);
@@ -2959,7 +2966,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
        bool do_cancel_work = false;
        int subflows_alive = 0;
 
-       sk->sk_shutdown = SHUTDOWN_MASK;
+       WRITE_ONCE(sk->sk_shutdown, SHUTDOWN_MASK);
 
        if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) {
                mptcp_listen_inuse_dec(sk);
@@ -3039,7 +3046,7 @@ static void mptcp_close(struct sock *sk, long timeout)
        sock_put(sk);
 }
 
-void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
+static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
 {
 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
        const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
@@ -3102,7 +3109,7 @@ static int mptcp_disconnect(struct sock *sk, int flags)
        mptcp_pm_data_reset(msk);
        mptcp_ca_reset(sk);
 
-       sk->sk_shutdown = 0;
+       WRITE_ONCE(sk->sk_shutdown, 0);
        sk_error_report(sk);
        return 0;
 }
@@ -3116,9 +3123,10 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
 }
 #endif
 
-struct sock *mptcp_sk_clone(const struct sock *sk,
-                           const struct mptcp_options_received *mp_opt,
-                           struct request_sock *req)
+struct sock *mptcp_sk_clone_init(const struct sock *sk,
+                                const struct mptcp_options_received *mp_opt,
+                                struct sock *ssk,
+                                struct request_sock *req)
 {
        struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req);
        struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
@@ -3137,7 +3145,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
        msk = mptcp_sk(nsk);
        msk->local_key = subflow_req->local_key;
        msk->token = subflow_req->token;
-       msk->subflow = NULL;
+       WRITE_ONCE(msk->subflow, NULL);
        msk->in_accept_queue = 1;
        WRITE_ONCE(msk->fully_established, false);
        if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD)
@@ -3150,10 +3158,30 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
        msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
 
        sock_reset_flag(nsk, SOCK_RCU_FREE);
-       /* will be fully established after successful MPC subflow creation */
-       inet_sk_state_store(nsk, TCP_SYN_RECV);
-
        security_inet_csk_clone(nsk, req);
+
+       /* this can't race with mptcp_close(), as the msk is
+        * not yet exposted to user-space
+        */
+       inet_sk_state_store(nsk, TCP_ESTABLISHED);
+
+       /* The msk maintain a ref to each subflow in the connections list */
+       WRITE_ONCE(msk->first, ssk);
+       list_add(&mptcp_subflow_ctx(ssk)->node, &msk->conn_list);
+       sock_hold(ssk);
+
+       /* new mpc subflow takes ownership of the newly
+        * created mptcp socket
+        */
+       mptcp_token_accept(subflow_req, msk);
+
+       /* set msk addresses early to ensure mptcp_pm_get_local_id()
+        * uses the correct data
+        */
+       mptcp_copy_inaddrs(nsk, ssk);
+       mptcp_propagate_sndbuf(nsk, ssk);
+
+       mptcp_rcv_space_init(msk, ssk);
        bh_unlock_sock(nsk);
 
        /* note: the newly allocated socket refcount is 2 now */
@@ -3185,7 +3213,7 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
        struct socket *listener;
        struct sock *newsk;
 
-       listener = msk->subflow;
+       listener = READ_ONCE(msk->subflow);
        if (WARN_ON_ONCE(!listener)) {
                *err = -EINVAL;
                return NULL;
@@ -3465,14 +3493,16 @@ bool mptcp_finish_join(struct sock *ssk)
                return false;
        }
 
-       if (!list_empty(&subflow->node))
-               goto out;
+       /* active subflow, already present inside the conn_list */
+       if (!list_empty(&subflow->node)) {
+               mptcp_subflow_joined(msk, ssk);
+               return true;
+       }
 
        if (!mptcp_pm_allow_new_subflow(msk))
                goto err_prohibited;
 
-       /* active connections are already on conn_list.
-        * If we can't acquire msk socket lock here, let the release callback
+       /* If we can't acquire msk socket lock here, let the release callback
         * handle it
         */
        mptcp_data_lock(parent);
@@ -3495,11 +3525,6 @@ err_prohibited:
                return false;
        }
 
-       subflow->map_seq = READ_ONCE(msk->ack_seq);
-       WRITE_ONCE(msk->allow_infinite_fallback, false);
-
-out:
-       mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
        return true;
 }
 
@@ -3617,9 +3642,9 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
         * acquired the subflow socket lock, too.
         */
        if (msk->fastopening)
-               err = __inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags, 1);
+               err = __inet_stream_connect(ssock, uaddr, addr_len, O_NONBLOCK, 1);
        else
-               err = inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags);
+               err = inet_stream_connect(ssock, uaddr, addr_len, O_NONBLOCK);
        inet_sk(sk)->defer_connect = inet_sk(ssock->sk)->defer_connect;
 
        /* on successful connect, the msk state will be moved to established by
@@ -3632,12 +3657,10 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
        mptcp_copy_inaddrs(sk, ssock->sk);
 
-       /* unblocking connect, mptcp-level inet_stream_connect will error out
-        * without changing the socket state, update it here.
+       /* silence EINPROGRESS and let the caller inet_stream_connect
+        * handle the connection in progress
         */
-       if (err == -EINPROGRESS)
-               sk->sk_socket->state = ssock->state;
-       return err;
+       return 0;
 }
 
 static struct proto mptcp_prot = {
@@ -3696,18 +3719,6 @@ unlock:
        return err;
 }
 
-static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr,
-                               int addr_len, int flags)
-{
-       int ret;
-
-       lock_sock(sock->sk);
-       mptcp_sk(sock->sk)->connect_flags = flags;
-       ret = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
-       release_sock(sock->sk);
-       return ret;
-}
-
 static int mptcp_listen(struct socket *sock, int backlog)
 {
        struct mptcp_sock *msk = mptcp_sk(sock->sk);
@@ -3751,10 +3762,10 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock,
 
        pr_debug("msk=%p", msk);
 
-       /* buggy applications can call accept on socket states other then LISTEN
+       /* Buggy applications can call accept on socket states other then LISTEN
         * but no need to allocate the first subflow just to error out.
         */
-       ssock = msk->subflow;
+       ssock = READ_ONCE(msk->subflow);
        if (!ssock)
                return -EINVAL;
 
@@ -3800,9 +3811,6 @@ static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
 {
        struct sock *sk = (struct sock *)msk;
 
-       if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
-               return EPOLLOUT | EPOLLWRNORM;
-
        if (sk_stream_is_writeable(sk))
                return EPOLLOUT | EPOLLWRNORM;
 
@@ -3820,6 +3828,7 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
        struct sock *sk = sock->sk;
        struct mptcp_sock *msk;
        __poll_t mask = 0;
+       u8 shutdown;
        int state;
 
        msk = mptcp_sk(sk);
@@ -3828,23 +3837,30 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
        state = inet_sk_state_load(sk);
        pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
        if (state == TCP_LISTEN) {
-               if (WARN_ON_ONCE(!msk->subflow || !msk->subflow->sk))
+               struct socket *ssock = READ_ONCE(msk->subflow);
+
+               if (WARN_ON_ONCE(!ssock || !ssock->sk))
                        return 0;
 
-               return inet_csk_listen_poll(msk->subflow->sk);
+               return inet_csk_listen_poll(ssock->sk);
        }
 
+       shutdown = READ_ONCE(sk->sk_shutdown);
+       if (shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
+               mask |= EPOLLHUP;
+       if (shutdown & RCV_SHUTDOWN)
+               mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
+
        if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
                mask |= mptcp_check_readable(msk);
-               mask |= mptcp_check_writeable(msk);
+               if (shutdown & SEND_SHUTDOWN)
+                       mask |= EPOLLOUT | EPOLLWRNORM;
+               else
+                       mask |= mptcp_check_writeable(msk);
        } else if (state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
                /* cf tcp_poll() note about TFO */
                mask |= EPOLLOUT | EPOLLWRNORM;
        }
-       if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
-               mask |= EPOLLHUP;
-       if (sk->sk_shutdown & RCV_SHUTDOWN)
-               mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
 
        /* This barrier is coupled with smp_wmb() in __mptcp_error_report() */
        smp_rmb();
@@ -3859,7 +3875,7 @@ static const struct proto_ops mptcp_stream_ops = {
        .owner             = THIS_MODULE,
        .release           = inet_release,
        .bind              = mptcp_bind,
-       .connect           = mptcp_stream_connect,
+       .connect           = inet_stream_connect,
        .socketpair        = sock_no_socketpair,
        .accept            = mptcp_stream_accept,
        .getname           = inet_getname,
@@ -3954,7 +3970,7 @@ static const struct proto_ops mptcp_v6_stream_ops = {
        .owner             = THIS_MODULE,
        .release           = inet6_release,
        .bind              = mptcp_bind,
-       .connect           = mptcp_stream_connect,
+       .connect           = inet_stream_connect,
        .socketpair        = sock_no_socketpair,
        .accept            = mptcp_stream_accept,
        .getname           = inet6_getname,
index 2d7b2c8..70c957b 100644 (file)
@@ -297,7 +297,6 @@ struct mptcp_sock {
                        nodelay:1,
                        fastopening:1,
                        in_accept_queue:1;
-       int             connect_flags;
        struct work_struct work;
        struct sk_buff  *ooo_last_skb;
        struct rb_root  out_of_order_queue;
@@ -306,7 +305,11 @@ struct mptcp_sock {
        struct list_head rtx_queue;
        struct mptcp_data_frag *first_pending;
        struct list_head join_list;
-       struct socket   *subflow; /* outgoing connect/listener/!mp_capable */
+       struct socket   *subflow; /* outgoing connect/listener/!mp_capable
+                                  * The mptcp ops can safely dereference, using suitable
+                                  * ONCE annotation, the subflow outside the socket
+                                  * lock as such sock is freed after close().
+                                  */
        struct sock     *first;
        struct mptcp_pm_data    pm;
        struct {
@@ -613,7 +616,6 @@ int mptcp_is_checksum_enabled(const struct net *net);
 int mptcp_allow_join_id0(const struct net *net);
 unsigned int mptcp_stale_loss_cnt(const struct net *net);
 int mptcp_get_pm_type(const struct net *net);
-void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk);
 void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
                                     const struct mptcp_options_received *mp_opt);
 bool __mptcp_retransmit_pending_data(struct sock *sk);
@@ -683,9 +685,10 @@ void __init mptcp_proto_init(void);
 int __init mptcp_proto_v6_init(void);
 #endif
 
-struct sock *mptcp_sk_clone(const struct sock *sk,
-                           const struct mptcp_options_received *mp_opt,
-                           struct request_sock *req);
+struct sock *mptcp_sk_clone_init(const struct sock *sk,
+                                const struct mptcp_options_received *mp_opt,
+                                struct sock *ssk,
+                                struct request_sock *req);
 void mptcp_get_options(const struct sk_buff *skb,
                       struct mptcp_options_received *mp_opt);
 
@@ -829,6 +832,7 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
                           bool echo);
 int mptcp_pm_remove_addr(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, const struct mptcp_rm_list *rm_list);
+void mptcp_pm_remove_addrs(struct mptcp_sock *msk, struct list_head *rm_list);
 void mptcp_pm_remove_addrs_and_subflows(struct mptcp_sock *msk,
                                        struct list_head *rm_list);
 
index ba065b6..4688daa 100644 (file)
@@ -815,38 +815,12 @@ create_child:
                ctx->setsockopt_seq = listener->setsockopt_seq;
 
                if (ctx->mp_capable) {
-                       ctx->conn = mptcp_sk_clone(listener->conn, &mp_opt, req);
+                       ctx->conn = mptcp_sk_clone_init(listener->conn, &mp_opt, child, req);
                        if (!ctx->conn)
                                goto fallback;
 
                        owner = mptcp_sk(ctx->conn);
-
-                       /* this can't race with mptcp_close(), as the msk is
-                        * not yet exposted to user-space
-                        */
-                       inet_sk_state_store(ctx->conn, TCP_ESTABLISHED);
-
-                       /* record the newly created socket as the first msk
-                        * subflow, but don't link it yet into conn_list
-                        */
-                       WRITE_ONCE(owner->first, child);
-
-                       /* new mpc subflow takes ownership of the newly
-                        * created mptcp socket
-                        */
-                       owner->setsockopt_seq = ctx->setsockopt_seq;
                        mptcp_pm_new_connection(owner, child, 1);
-                       mptcp_token_accept(subflow_req, owner);
-
-                       /* set msk addresses early to ensure mptcp_pm_get_local_id()
-                        * uses the correct data
-                        */
-                       mptcp_copy_inaddrs(ctx->conn, child);
-                       mptcp_propagate_sndbuf(ctx->conn, child);
-
-                       mptcp_rcv_space_init(owner, child);
-                       list_add(&ctx->node, &owner->conn_list);
-                       sock_hold(child);
 
                        /* with OoO packets we can reach here without ingress
                         * mpc option
index 46ebee9..9a6b647 100644 (file)
@@ -1694,6 +1694,14 @@ call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb,
        bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
 
        do {
+               if (retried) {
+                       __ip_set_get(set);
+                       nfnl_unlock(NFNL_SUBSYS_IPSET);
+                       cond_resched();
+                       nfnl_lock(NFNL_SUBSYS_IPSET);
+                       __ip_set_put(set);
+               }
+
                ip_set_lock(set);
                ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
                ip_set_unlock(set);
index c4ccfec..d119f1d 100644 (file)
@@ -2260,6 +2260,9 @@ static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct,
                return 0;
 
        helper = rcu_dereference(help->helper);
+       if (!helper)
+               return 0;
+
        if (!(helper->flags & NF_CT_HELPER_F_USERSPACE))
                return 0;
 
index d40544c..69c8c8c 100644 (file)
@@ -2976,7 +2976,9 @@ nla_put_failure:
        return -1;
 }
 
+#if IS_ENABLED(CONFIG_NF_NAT)
 static const union nf_inet_addr any_addr;
+#endif
 
 static __be32 nf_expect_get_id(const struct nf_conntrack_expect *exp)
 {
@@ -3460,10 +3462,12 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_NF_NAT)
 static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
        [CTA_EXPECT_NAT_DIR]    = { .type = NLA_U32 },
        [CTA_EXPECT_NAT_TUPLE]  = { .type = NLA_NESTED },
 };
+#endif
 
 static int
 ctnetlink_parse_expect_nat(const struct nlattr *attr,
index 59fb832..0519d45 100644 (file)
@@ -1600,6 +1600,8 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, int family,
 
        if (nft_base_chain_netdev(family, ops->hooknum)) {
                nest_devs = nla_nest_start_noflag(skb, NFTA_HOOK_DEVS);
+               if (!nest_devs)
+                       goto nla_put_failure;
 
                if (!hook_list)
                        hook_list = &basechain->hook_list;
@@ -3865,12 +3867,10 @@ static struct nft_rule *nft_rule_lookup_byid(const struct net *net,
        struct nft_trans *trans;
 
        list_for_each_entry(trans, &nft_net->commit_list, list) {
-               struct nft_rule *rule = nft_trans_rule(trans);
-
                if (trans->msg_type == NFT_MSG_NEWRULE &&
                    trans->ctx.chain == chain &&
                    id == nft_trans_rule_id(trans))
-                       return rule;
+                       return nft_trans_rule(trans);
        }
        return ERR_PTR(-ENOENT);
 }
@@ -9007,7 +9007,7 @@ static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *cha
                                continue;
                        }
 
-                       if (WARN_ON_ONCE(data + expr->ops->size > data_boundary))
+                       if (WARN_ON_ONCE(data + size + expr->ops->size > data_boundary))
                                return -ENOMEM;
 
                        memcpy(data + size, expr, expr->ops->size);
index 84eae7c..2527a01 100644 (file)
@@ -323,7 +323,7 @@ static bool nft_bitwise_reduce(struct nft_regs_track *track,
        dreg = priv->dreg;
        regcount = DIV_ROUND_UP(priv->len, NFT_REG32_SIZE);
        for (i = 0; i < regcount; i++, dreg++)
-               track->regs[priv->dreg].bitwise = expr;
+               track->regs[dreg].bitwise = expr;
 
        return false;
 }
index 19ea4d3..2f114aa 100644 (file)
@@ -221,7 +221,7 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
 {
        struct nft_set *set = (struct nft_set *)__set;
        struct rb_node *prev = rb_prev(&rbe->node);
-       struct nft_rbtree_elem *rbe_prev;
+       struct nft_rbtree_elem *rbe_prev = NULL;
        struct nft_set_gc_batch *gcb;
 
        gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
@@ -229,17 +229,21 @@ static int nft_rbtree_gc_elem(const struct nft_set *__set,
                return -ENOMEM;
 
        /* search for expired end interval coming before this element. */
-       do {
+       while (prev) {
                rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
                if (nft_rbtree_interval_end(rbe_prev))
                        break;
 
                prev = rb_prev(prev);
-       } while (prev != NULL);
+       }
+
+       if (rbe_prev) {
+               rb_erase(&rbe_prev->node, &priv->root);
+               atomic_dec(&set->nelems);
+       }
 
-       rb_erase(&rbe_prev->node, &priv->root);
        rb_erase(&rbe->node, &priv->root);
-       atomic_sub(2, &set->nelems);
+       atomic_dec(&set->nelems);
 
        nft_set_gc_batch_add(gcb, rbe);
        nft_set_gc_batch_complete(gcb);
@@ -268,7 +272,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
                               struct nft_set_ext **ext)
 {
        struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL;
-       struct rb_node *node, *parent, **p, *first = NULL;
+       struct rb_node *node, *next, *parent, **p, *first = NULL;
        struct nft_rbtree *priv = nft_set_priv(set);
        u8 genmask = nft_genmask_next(net);
        int d, err;
@@ -307,7 +311,9 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
         * Values stored in the tree are in reversed order, starting from
         * highest to lowest value.
         */
-       for (node = first; node != NULL; node = rb_next(node)) {
+       for (node = first; node != NULL; node = next) {
+               next = rb_next(node);
+
                rbe = rb_entry(node, struct nft_rbtree_elem, node);
 
                if (!nft_set_elem_active(&rbe->ext, genmask))
index c878041..3a1e0fd 100644 (file)
@@ -1779,7 +1779,7 @@ static int netlink_getsockopt(struct socket *sock, int level, int optname,
                                break;
                        }
                }
-               if (put_user(ALIGN(nlk->ngroups / 8, sizeof(u32)), optlen))
+               if (put_user(ALIGN(BITS_TO_BYTES(nlk->ngroups), sizeof(u32)), optlen))
                        err = -EFAULT;
                netlink_unlock_table();
                return err;
index 3f99b43..e2d2af9 100644 (file)
@@ -123,7 +123,7 @@ void nr_write_internal(struct sock *sk, int frametype)
        unsigned char  *dptr;
        int len, timeout;
 
-       len = NR_NETWORK_LEN + NR_TRANSPORT_LEN;
+       len = NR_TRANSPORT_LEN;
 
        switch (frametype & 0x0F) {
        case NR_CONNREQ:
@@ -141,7 +141,8 @@ void nr_write_internal(struct sock *sk, int frametype)
                return;
        }
 
-       if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
+       skb = alloc_skb(NR_NETWORK_LEN + len, GFP_ATOMIC);
+       if (!skb)
                return;
 
        /*
@@ -149,7 +150,7 @@ void nr_write_internal(struct sock *sk, int frametype)
         */
        skb_reserve(skb, NR_NETWORK_LEN);
 
-       dptr = skb_put(skb, skb_tailroom(skb));
+       dptr = skb_put(skb, len);
 
        switch (frametype & 0x0F) {
        case NR_CONNREQ:
index e9ca007..0f23e5e 100644 (file)
@@ -77,13 +77,12 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
                                       netdev_features_t features)
 {
        struct sk_buff *segs = ERR_PTR(-EINVAL);
+       u16 mac_offset = skb->mac_header;
        unsigned int nsh_len, mac_len;
        __be16 proto;
-       int nhoff;
 
        skb_reset_network_header(skb);
 
-       nhoff = skb->network_header - skb->mac_header;
        mac_len = skb->mac_len;
 
        if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
@@ -108,15 +107,14 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
        segs = skb_mac_gso_segment(skb, features);
        if (IS_ERR_OR_NULL(segs)) {
                skb_gso_error_unwind(skb, htons(ETH_P_NSH), nsh_len,
-                                    skb->network_header - nhoff,
-                                    mac_len);
+                                    mac_offset, mac_len);
                goto out;
        }
 
        for (skb = segs; skb; skb = skb->next) {
                skb->protocol = htons(ETH_P_NSH);
                __skb_push(skb, nsh_len);
-               skb_set_mac_header(skb, -nhoff);
+               skb->mac_header = mac_offset;
                skb->network_header = skb->mac_header + mac_len;
                skb->mac_len = mac_len;
        }
index fcee601..58f530f 100644 (file)
@@ -236,9 +236,6 @@ void ovs_dp_detach_port(struct vport *p)
        /* First drop references to device. */
        hlist_del_rcu(&p->dp_hash_node);
 
-       /* Free percpu memory */
-       free_percpu(p->upcall_stats);
-
        /* Then destroy it. */
        ovs_vport_del(p);
 }
@@ -1858,12 +1855,6 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
                goto err_destroy_portids;
        }
 
-       vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
-       if (!vport->upcall_stats) {
-               err = -ENOMEM;
-               goto err_destroy_vport;
-       }
-
        err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
                                   info->snd_seq, 0, OVS_DP_CMD_NEW);
        BUG_ON(err < 0);
@@ -1876,8 +1867,6 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
        ovs_notify(&dp_datapath_genl_family, reply, info);
        return 0;
 
-err_destroy_vport:
-       ovs_dp_detach_port(vport);
 err_destroy_portids:
        kfree(rcu_dereference_raw(dp->upcall_portids));
 err_unlock_and_destroy_meters:
@@ -2322,12 +2311,6 @@ restart:
                goto exit_unlock_free;
        }
 
-       vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
-       if (!vport->upcall_stats) {
-               err = -ENOMEM;
-               goto exit_unlock_free_vport;
-       }
-
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
                                      OVS_VPORT_CMD_NEW, GFP_KERNEL);
@@ -2345,8 +2328,6 @@ restart:
        ovs_notify(&dp_vport_genl_family, reply, info);
        return 0;
 
-exit_unlock_free_vport:
-       ovs_dp_detach_port(vport);
 exit_unlock_free:
        ovs_unlock();
        kfree_skb(reply);
index 7e0f5c4..972ae01 100644 (file)
@@ -124,6 +124,7 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
 {
        struct vport *vport;
        size_t alloc_size;
+       int err;
 
        alloc_size = sizeof(struct vport);
        if (priv_size) {
@@ -135,17 +136,29 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
        if (!vport)
                return ERR_PTR(-ENOMEM);
 
+       vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
+       if (!vport->upcall_stats) {
+               err = -ENOMEM;
+               goto err_kfree_vport;
+       }
+
        vport->dp = parms->dp;
        vport->port_no = parms->port_no;
        vport->ops = ops;
        INIT_HLIST_NODE(&vport->dp_hash_node);
 
        if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids)) {
-               kfree(vport);
-               return ERR_PTR(-EINVAL);
+               err = -EINVAL;
+               goto err_free_percpu;
        }
 
        return vport;
+
+err_free_percpu:
+       free_percpu(vport->upcall_stats);
+err_kfree_vport:
+       kfree(vport);
+       return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(ovs_vport_alloc);
 
@@ -165,6 +178,7 @@ void ovs_vport_free(struct vport *vport)
         * it is safe to use raw dereference.
         */
        kfree(rcu_dereference_raw(vport->upcall_portids));
+       free_percpu(vport->upcall_stats);
        kfree(vport);
 }
 EXPORT_SYMBOL_GPL(ovs_vport_free);
index 94c6a1f..a2dbeb2 100644 (file)
@@ -3201,6 +3201,9 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
 
        lock_sock(sk);
        spin_lock(&po->bind_lock);
+       if (!proto)
+               proto = po->num;
+
        rcu_read_lock();
 
        if (po->fanout) {
@@ -3299,7 +3302,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
        memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min));
        name[sizeof(uaddr->sa_data_min)] = 0;
 
-       return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
+       return packet_do_bind(sk, name, 0, 0);
 }
 
 static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
@@ -3316,8 +3319,7 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
        if (sll->sll_family != AF_PACKET)
                return -EINVAL;
 
-       return packet_do_bind(sk, NULL, sll->sll_ifindex,
-                             sll->sll_protocol ? : pkt_sk(sk)->num);
+       return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol);
 }
 
 static struct proto packet_proto = {
index d0c4eda..f6b200c 100644 (file)
@@ -143,7 +143,7 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
        rp = nlmsg_data(nlh);
        rp->pdiag_family = AF_PACKET;
        rp->pdiag_type = sk->sk_type;
-       rp->pdiag_num = ntohs(po->num);
+       rp->pdiag_num = ntohs(READ_ONCE(po->num));
        rp->pdiag_ino = sk_ino;
        sock_diag_save_cookie(sk, rp->pdiag_cookie);
 
index 31f738d..da0b3b5 100644 (file)
@@ -980,6 +980,7 @@ static int __init af_rxrpc_init(void)
        BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb));
 
        ret = -ENOMEM;
+       rxrpc_gen_version_string();
        rxrpc_call_jar = kmem_cache_create(
                "rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
                SLAB_HWCACHE_ALIGN, NULL);
index 5d44dc0..e8e14c6 100644 (file)
@@ -1068,6 +1068,7 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *, const void *, time64_t,
 /*
  * local_event.c
  */
+void rxrpc_gen_version_string(void);
 void rxrpc_send_version_request(struct rxrpc_local *local,
                                struct rxrpc_host_header *hdr,
                                struct sk_buff *skb);
index 5e69ea6..993c69f 100644 (file)
 #include <generated/utsrelease.h>
 #include "ar-internal.h"
 
-static const char rxrpc_version_string[65] = "linux-" UTS_RELEASE " AF_RXRPC";
+static char rxrpc_version_string[65]; // "linux-" UTS_RELEASE " AF_RXRPC";
+
+/*
+ * Generate the VERSION packet string.
+ */
+void rxrpc_gen_version_string(void)
+{
+       snprintf(rxrpc_version_string, sizeof(rxrpc_version_string),
+                "linux-%.49s AF_RXRPC", UTS_RELEASE);
+}
 
 /*
  * Reply to a version request
index 227cba5..2e9dce0 100644 (file)
@@ -357,23 +357,23 @@ static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
        opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
        if (p->rate_present) {
                psched_ratecfg_getrate(&opt.rate, &p->rate);
-               if ((police->params->rate.rate_bytes_ps >= (1ULL << 32)) &&
+               if ((p->rate.rate_bytes_ps >= (1ULL << 32)) &&
                    nla_put_u64_64bit(skb, TCA_POLICE_RATE64,
-                                     police->params->rate.rate_bytes_ps,
+                                     p->rate.rate_bytes_ps,
                                      TCA_POLICE_PAD))
                        goto nla_put_failure;
        }
        if (p->peak_present) {
                psched_ratecfg_getrate(&opt.peakrate, &p->peak);
-               if ((police->params->peak.rate_bytes_ps >= (1ULL << 32)) &&
+               if ((p->peak.rate_bytes_ps >= (1ULL << 32)) &&
                    nla_put_u64_64bit(skb, TCA_POLICE_PEAKRATE64,
-                                     police->params->peak.rate_bytes_ps,
+                                     p->peak.rate_bytes_ps,
                                      TCA_POLICE_PAD))
                        goto nla_put_failure;
        }
        if (p->pps_present) {
                if (nla_put_u64_64bit(skb, TCA_POLICE_PKTRATE64,
-                                     police->params->ppsrate.rate_pkts_ps,
+                                     p->ppsrate.rate_pkts_ps,
                                      TCA_POLICE_PAD))
                        goto nla_put_failure;
                if (nla_put_u64_64bit(skb, TCA_POLICE_PKTBURST64,
index 2621550..c877a63 100644 (file)
@@ -43,8 +43,6 @@
 #include <net/flow_offload.h>
 #include <net/tc_wrapper.h>
 
-extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1];
-
 /* The list of all installed classifier types */
 static LIST_HEAD(tcf_proto_base);
 
@@ -2952,6 +2950,7 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
                return PTR_ERR(ops);
        if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
                NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
+               module_put(ops->owner);
                return -EOPNOTSUPP;
        }
 
index 9dbc433..815c3e4 100644 (file)
@@ -1153,6 +1153,9 @@ static int fl_set_geneve_opt(const struct nlattr *nla, struct fl_flow_key *key,
        if (option_len > sizeof(struct geneve_opt))
                data_len = option_len - sizeof(struct geneve_opt);
 
+       if (key->enc_opts.len > FLOW_DIS_TUN_OPTS_MAX - 4)
+               return -ERANGE;
+
        opt = (struct geneve_opt *)&key->enc_opts.data[key->enc_opts.len];
        memset(opt, 0xff, option_len);
        opt->length = data_len / 4;
index fdb8f42..e4b6452 100644 (file)
@@ -309,7 +309,7 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 
        if (dev_ingress_queue(dev))
                q = qdisc_match_from_root(
-                       dev_ingress_queue(dev)->qdisc_sleeping,
+                       rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping),
                        handle);
 out:
        return q;
@@ -328,7 +328,8 @@ struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
 
        nq = dev_ingress_queue_rcu(dev);
        if (nq)
-               q = qdisc_match_from_root(nq->qdisc_sleeping, handle);
+               q = qdisc_match_from_root(rcu_dereference(nq->qdisc_sleeping),
+                                         handle);
 out:
        return q;
 }
@@ -634,8 +635,13 @@ EXPORT_SYMBOL(qdisc_watchdog_init);
 void qdisc_watchdog_schedule_range_ns(struct qdisc_watchdog *wd, u64 expires,
                                      u64 delta_ns)
 {
-       if (test_bit(__QDISC_STATE_DEACTIVATED,
-                    &qdisc_root_sleeping(wd->qdisc)->state))
+       bool deactivated;
+
+       rcu_read_lock();
+       deactivated = test_bit(__QDISC_STATE_DEACTIVATED,
+                              &qdisc_root_sleeping(wd->qdisc)->state);
+       rcu_read_unlock();
+       if (deactivated)
                return;
 
        if (hrtimer_is_queued(&wd->timer)) {
@@ -1252,7 +1258,12 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
        sch->parent = parent;
 
        if (handle == TC_H_INGRESS) {
-               sch->flags |= TCQ_F_INGRESS;
+               if (!(sch->flags & TCQ_F_INGRESS)) {
+                       NL_SET_ERR_MSG(extack,
+                                      "Specified parent ID is reserved for ingress and clsact Qdiscs");
+                       err = -EINVAL;
+                       goto err_out3;
+               }
                handle = TC_H_MAKE(TC_H_INGRESS, 0);
        } else {
                if (handle == 0) {
@@ -1473,7 +1484,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
                                }
                                q = qdisc_leaf(p, clid);
                        } else if (dev_ingress_queue(dev)) {
-                               q = dev_ingress_queue(dev)->qdisc_sleeping;
+                               q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
                        }
                } else {
                        q = rtnl_dereference(dev->qdisc);
@@ -1559,7 +1570,7 @@ replay:
                                }
                                q = qdisc_leaf(p, clid);
                        } else if (dev_ingress_queue_create(dev)) {
-                               q = dev_ingress_queue(dev)->qdisc_sleeping;
+                               q = rtnl_dereference(dev_ingress_queue(dev)->qdisc_sleeping);
                        }
                } else {
                        q = rtnl_dereference(dev->qdisc);
@@ -1591,11 +1602,20 @@ replay:
                                        NL_SET_ERR_MSG(extack, "Invalid qdisc name");
                                        return -EINVAL;
                                }
+                               if (q->flags & TCQ_F_INGRESS) {
+                                       NL_SET_ERR_MSG(extack,
+                                                      "Cannot regraft ingress or clsact Qdiscs");
+                                       return -EINVAL;
+                               }
                                if (q == p ||
                                    (p && check_loop(q, p, 0))) {
                                        NL_SET_ERR_MSG(extack, "Qdisc parent/child loop detected");
                                        return -ELOOP;
                                }
+                               if (clid == TC_H_INGRESS) {
+                                       NL_SET_ERR_MSG(extack, "Ingress cannot graft directly");
+                                       return -EINVAL;
+                               }
                                qdisc_refcount_inc(q);
                                goto graft;
                        } else {
@@ -1791,8 +1811,8 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
 
                dev_queue = dev_ingress_queue(dev);
                if (dev_queue &&
-                   tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb,
-                                      &q_idx, s_q_idx, false,
+                   tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping),
+                                      skb, cb, &q_idx, s_q_idx, false,
                                       tca[TCA_DUMP_INVISIBLE]) < 0)
                        goto done;
 
@@ -2235,8 +2255,8 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
 
        dev_queue = dev_ingress_queue(dev);
        if (dev_queue &&
-           tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb,
-                               &t, s_t, false) < 0)
+           tc_dump_tclass_root(rtnl_dereference(dev_queue->qdisc_sleeping),
+                               skb, tcm, cb, &t, s_t, false) < 0)
                goto done;
 
 done:
@@ -2288,7 +2308,9 @@ static struct pernet_operations psched_net_ops = {
        .exit = psched_net_exit,
 };
 
+#if IS_ENABLED(CONFIG_RETPOLINE)
 DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
+#endif
 
 static int __init pktsched_init(void)
 {
index 6980796..591d87d 100644 (file)
@@ -201,6 +201,11 @@ out:
        return NET_XMIT_CN;
 }
 
+static struct netlink_range_validation fq_pie_q_range = {
+       .min = 1,
+       .max = 1 << 20,
+};
+
 static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
        [TCA_FQ_PIE_LIMIT]              = {.type = NLA_U32},
        [TCA_FQ_PIE_FLOWS]              = {.type = NLA_U32},
@@ -208,7 +213,8 @@ static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
        [TCA_FQ_PIE_TUPDATE]            = {.type = NLA_U32},
        [TCA_FQ_PIE_ALPHA]              = {.type = NLA_U32},
        [TCA_FQ_PIE_BETA]               = {.type = NLA_U32},
-       [TCA_FQ_PIE_QUANTUM]            = {.type = NLA_U32},
+       [TCA_FQ_PIE_QUANTUM]            =
+                       NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range),
        [TCA_FQ_PIE_MEMORY_LIMIT]       = {.type = NLA_U32},
        [TCA_FQ_PIE_ECN_PROB]           = {.type = NLA_U32},
        [TCA_FQ_PIE_ECN]                = {.type = NLA_U32},
@@ -373,6 +379,7 @@ static void fq_pie_timer(struct timer_list *t)
        spinlock_t *root_lock; /* to lock qdisc for probability calculations */
        u32 idx;
 
+       rcu_read_lock();
        root_lock = qdisc_lock(qdisc_root_sleeping(sch));
        spin_lock(root_lock);
 
@@ -385,6 +392,7 @@ static void fq_pie_timer(struct timer_list *t)
                mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate);
 
        spin_unlock(root_lock);
+       rcu_read_unlock();
 }
 
 static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
index 37e41f9..3248259 100644 (file)
@@ -648,7 +648,7 @@ struct Qdisc_ops noop_qdisc_ops __read_mostly = {
 
 static struct netdev_queue noop_netdev_queue = {
        RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
-       .qdisc_sleeping =       &noop_qdisc,
+       RCU_POINTER_INITIALIZER(qdisc_sleeping, &noop_qdisc),
 };
 
 struct Qdisc noop_qdisc = {
@@ -1103,7 +1103,7 @@ EXPORT_SYMBOL(qdisc_put_unlocked);
 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
                              struct Qdisc *qdisc)
 {
-       struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
+       struct Qdisc *oqdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
        spinlock_t *root_lock;
 
        root_lock = qdisc_lock(oqdisc);
@@ -1112,7 +1112,7 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
        /* ... and graft new one */
        if (qdisc == NULL)
                qdisc = &noop_qdisc;
-       dev_queue->qdisc_sleeping = qdisc;
+       rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
        rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
 
        spin_unlock_bh(root_lock);
@@ -1125,12 +1125,12 @@ static void shutdown_scheduler_queue(struct net_device *dev,
                                     struct netdev_queue *dev_queue,
                                     void *_qdisc_default)
 {
-       struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+       struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
        struct Qdisc *qdisc_default = _qdisc_default;
 
        if (qdisc) {
                rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
-               dev_queue->qdisc_sleeping = qdisc_default;
+               rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc_default);
 
                qdisc_put(qdisc);
        }
@@ -1154,7 +1154,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
 
        if (!netif_is_multiqueue(dev))
                qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
-       dev_queue->qdisc_sleeping = qdisc;
+       rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
 }
 
 static void attach_default_qdiscs(struct net_device *dev)
@@ -1167,7 +1167,7 @@ static void attach_default_qdiscs(struct net_device *dev)
        if (!netif_is_multiqueue(dev) ||
            dev->priv_flags & IFF_NO_QUEUE) {
                netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
-               qdisc = txq->qdisc_sleeping;
+               qdisc = rtnl_dereference(txq->qdisc_sleeping);
                rcu_assign_pointer(dev->qdisc, qdisc);
                qdisc_refcount_inc(qdisc);
        } else {
@@ -1186,7 +1186,7 @@ static void attach_default_qdiscs(struct net_device *dev)
                netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
                dev->priv_flags |= IFF_NO_QUEUE;
                netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
-               qdisc = txq->qdisc_sleeping;
+               qdisc = rtnl_dereference(txq->qdisc_sleeping);
                rcu_assign_pointer(dev->qdisc, qdisc);
                qdisc_refcount_inc(qdisc);
                dev->priv_flags ^= IFF_NO_QUEUE;
@@ -1202,7 +1202,7 @@ static void transition_one_qdisc(struct net_device *dev,
                                 struct netdev_queue *dev_queue,
                                 void *_need_watchdog)
 {
-       struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
+       struct Qdisc *new_qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
        int *need_watchdog_p = _need_watchdog;
 
        if (!(new_qdisc->flags & TCQ_F_BUILTIN))
@@ -1272,7 +1272,7 @@ static void dev_reset_queue(struct net_device *dev,
        struct Qdisc *qdisc;
        bool nolock;
 
-       qdisc = dev_queue->qdisc_sleeping;
+       qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
        if (!qdisc)
                return;
 
@@ -1303,7 +1303,7 @@ static bool some_qdisc_is_busy(struct net_device *dev)
                int val;
 
                dev_queue = netdev_get_tx_queue(dev, i);
-               q = dev_queue->qdisc_sleeping;
+               q = rtnl_dereference(dev_queue->qdisc_sleeping);
 
                root_lock = qdisc_lock(q);
                spin_lock_bh(root_lock);
@@ -1379,7 +1379,7 @@ EXPORT_SYMBOL(dev_deactivate);
 static int qdisc_change_tx_queue_len(struct net_device *dev,
                                     struct netdev_queue *dev_queue)
 {
-       struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+       struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
        const struct Qdisc_ops *ops = qdisc->ops;
 
        if (ops->change_tx_queue_len)
@@ -1404,7 +1404,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
        unsigned int i;
 
        for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
-               qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
+               qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
                /* Only update the default qdiscs we created,
                 * qdiscs with handles are always hashed.
                 */
@@ -1412,7 +1412,7 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
                        qdisc_hash_del(qdisc);
        }
        for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
-               qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
+               qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc_sleeping);
                if (qdisc != &noop_qdisc && !qdisc->handle)
                        qdisc_hash_add(qdisc, false);
        }
@@ -1449,7 +1449,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
        struct Qdisc *qdisc = _qdisc;
 
        rcu_assign_pointer(dev_queue->qdisc, qdisc);
-       dev_queue->qdisc_sleeping = qdisc;
+       rcu_assign_pointer(dev_queue->qdisc_sleeping, qdisc);
 }
 
 void dev_init_scheduler(struct net_device *dev)
index 8483812..e43a454 100644 (file)
@@ -80,6 +80,9 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt,
        struct net_device *dev = qdisc_dev(sch);
        int err;
 
+       if (sch->parent != TC_H_INGRESS)
+               return -EOPNOTSUPP;
+
        net_inc_ingress_queue();
 
        mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress);
@@ -101,6 +104,9 @@ static void ingress_destroy(struct Qdisc *sch)
 {
        struct ingress_sched_data *q = qdisc_priv(sch);
 
+       if (sch->parent != TC_H_INGRESS)
+               return;
+
        tcf_block_put_ext(q->block, sch, &q->block_info);
        net_dec_ingress_queue();
 }
@@ -134,7 +140,7 @@ static struct Qdisc_ops ingress_qdisc_ops __read_mostly = {
        .cl_ops                 =       &ingress_class_ops,
        .id                     =       "ingress",
        .priv_size              =       sizeof(struct ingress_sched_data),
-       .static_flags           =       TCQ_F_CPUSTATS,
+       .static_flags           =       TCQ_F_INGRESS | TCQ_F_CPUSTATS,
        .init                   =       ingress_init,
        .destroy                =       ingress_destroy,
        .dump                   =       ingress_dump,
@@ -219,6 +225,9 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt,
        struct net_device *dev = qdisc_dev(sch);
        int err;
 
+       if (sch->parent != TC_H_CLSACT)
+               return -EOPNOTSUPP;
+
        net_inc_ingress_queue();
        net_inc_egress_queue();
 
@@ -248,6 +257,9 @@ static void clsact_destroy(struct Qdisc *sch)
 {
        struct clsact_sched_data *q = qdisc_priv(sch);
 
+       if (sch->parent != TC_H_CLSACT)
+               return;
+
        tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info);
        tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info);
 
@@ -269,7 +281,7 @@ static struct Qdisc_ops clsact_qdisc_ops __read_mostly = {
        .cl_ops                 =       &clsact_class_ops,
        .id                     =       "clsact",
        .priv_size              =       sizeof(struct clsact_sched_data),
-       .static_flags           =       TCQ_F_CPUSTATS,
+       .static_flags           =       TCQ_F_INGRESS | TCQ_F_CPUSTATS,
        .init                   =       clsact_init,
        .destroy                =       clsact_destroy,
        .dump                   =       ingress_dump,
index d0bc660..c860119 100644 (file)
@@ -141,7 +141,7 @@ static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
         * qdisc totals are added at end.
         */
        for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
-               qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
+               qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
                spin_lock_bh(qdisc_lock(qdisc));
 
                gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
@@ -202,7 +202,7 @@ static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
 {
        struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
 
-       return dev_queue->qdisc_sleeping;
+       return rtnl_dereference(dev_queue->qdisc_sleeping);
 }
 
 static unsigned long mq_find(struct Qdisc *sch, u32 classid)
@@ -221,7 +221,7 @@ static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
 
        tcm->tcm_parent = TC_H_ROOT;
        tcm->tcm_handle |= TC_H_MIN(cl);
-       tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+       tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
        return 0;
 }
 
@@ -230,7 +230,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 {
        struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
 
-       sch = dev_queue->qdisc_sleeping;
+       sch = rtnl_dereference(dev_queue->qdisc_sleeping);
        if (gnet_stats_copy_basic(d, sch->cpu_bstats, &sch->bstats, true) < 0 ||
            qdisc_qstats_copy(d, sch) < 0)
                return -1;
index dc5a0ff..ab69ff7 100644 (file)
@@ -557,7 +557,7 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
         * qdisc totals are added at end.
         */
        for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
-               qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
+               qdisc = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping);
                spin_lock_bh(qdisc_lock(qdisc));
 
                gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
@@ -604,7 +604,7 @@ static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
        if (!dev_queue)
                return NULL;
 
-       return dev_queue->qdisc_sleeping;
+       return rtnl_dereference(dev_queue->qdisc_sleeping);
 }
 
 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
@@ -637,7 +637,7 @@ static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
                tcm->tcm_parent = (tc < 0) ? 0 :
                        TC_H_MAKE(TC_H_MAJ(sch->handle),
                                  TC_H_MIN(tc + TC_H_MIN_PRIORITY));
-               tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+               tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
        } else {
                tcm->tcm_parent = TC_H_ROOT;
                tcm->tcm_info = 0;
@@ -693,7 +693,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
        } else {
                struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
 
-               sch = dev_queue->qdisc_sleeping;
+               sch = rtnl_dereference(dev_queue->qdisc_sleeping);
                if (gnet_stats_copy_basic(d, sch->cpu_bstats,
                                          &sch->bstats, true) < 0 ||
                    qdisc_qstats_copy(d, sch) < 0)
index 2152a56..2da6250 100644 (file)
@@ -421,8 +421,10 @@ static void pie_timer(struct timer_list *t)
 {
        struct pie_sched_data *q = from_timer(q, t, adapt_timer);
        struct Qdisc *sch = q->sch;
-       spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+       spinlock_t *root_lock;
 
+       rcu_read_lock();
+       root_lock = qdisc_lock(qdisc_root_sleeping(sch));
        spin_lock(root_lock);
        pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog);
 
@@ -430,6 +432,7 @@ static void pie_timer(struct timer_list *t)
        if (q->params.tupdate)
                mod_timer(&q->adapt_timer, jiffies + q->params.tupdate);
        spin_unlock(root_lock);
+       rcu_read_unlock();
 }
 
 static int pie_init(struct Qdisc *sch, struct nlattr *opt,
index 9812932..16277b6 100644 (file)
@@ -321,12 +321,15 @@ static inline void red_adaptative_timer(struct timer_list *t)
 {
        struct red_sched_data *q = from_timer(q, t, adapt_timer);
        struct Qdisc *sch = q->sch;
-       spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+       spinlock_t *root_lock;
 
+       rcu_read_lock();
+       root_lock = qdisc_lock(qdisc_root_sleeping(sch));
        spin_lock(root_lock);
        red_adaptative_algo(&q->parms, &q->vars);
        mod_timer(&q->adapt_timer, jiffies + HZ/2);
        spin_unlock(root_lock);
+       rcu_read_unlock();
 }
 
 static int red_init(struct Qdisc *sch, struct nlattr *opt,
index abd4363..66dcb18 100644 (file)
@@ -606,10 +606,12 @@ static void sfq_perturbation(struct timer_list *t)
 {
        struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
        struct Qdisc *sch = q->sch;
-       spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+       spinlock_t *root_lock;
        siphash_key_t nkey;
 
        get_random_bytes(&nkey, sizeof(nkey));
+       rcu_read_lock();
+       root_lock = qdisc_lock(qdisc_root_sleeping(sch));
        spin_lock(root_lock);
        q->perturbation = nkey;
        if (!q->filter_list && q->tail)
@@ -618,6 +620,7 @@ static void sfq_perturbation(struct timer_list *t)
 
        if (q->perturb_period)
                mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
+       rcu_read_unlock();
 }
 
 static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
index 76db9a1..dd7dea2 100644 (file)
@@ -2358,7 +2358,7 @@ static struct Qdisc *taprio_leaf(struct Qdisc *sch, unsigned long cl)
        if (!dev_queue)
                return NULL;
 
-       return dev_queue->qdisc_sleeping;
+       return rtnl_dereference(dev_queue->qdisc_sleeping);
 }
 
 static unsigned long taprio_find(struct Qdisc *sch, u32 classid)
@@ -2377,7 +2377,7 @@ static int taprio_dump_class(struct Qdisc *sch, unsigned long cl,
 
        tcm->tcm_parent = TC_H_ROOT;
        tcm->tcm_handle |= TC_H_MIN(cl);
-       tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+       tcm->tcm_info = rtnl_dereference(dev_queue->qdisc_sleeping)->handle;
 
        return 0;
 }
@@ -2389,7 +2389,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
 {
        struct netdev_queue *dev_queue = taprio_queue_get(sch, cl);
 
-       sch = dev_queue->qdisc_sleeping;
+       sch = rtnl_dereference(dev_queue->qdisc_sleeping);
        if (gnet_stats_copy_basic(d, NULL, &sch->bstats, true) < 0 ||
            qdisc_qstats_copy(d, sch) < 0)
                return -1;
index 16f9238..7721239 100644 (file)
@@ -297,7 +297,7 @@ restart:
                struct net_device *slave = qdisc_dev(q);
                struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
 
-               if (slave_txq->qdisc_sleeping != q)
+               if (rcu_access_pointer(slave_txq->qdisc_sleeping) != q)
                        continue;
                if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
                    !netif_running(slave)) {
index 2f66a20..2abe45a 100644 (file)
@@ -324,9 +324,12 @@ bool sctp_transport_pl_recv(struct sctp_transport *t)
                t->pl.probe_size += SCTP_PL_BIG_STEP;
        } else if (t->pl.state == SCTP_PL_SEARCH) {
                if (!t->pl.probe_high) {
-                       t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
-                                              SCTP_MAX_PLPMTU);
-                       return false;
+                       if (t->pl.probe_size < SCTP_MAX_PLPMTU) {
+                               t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
+                                                      SCTP_MAX_PLPMTU);
+                               return false;
+                       }
+                       t->pl.probe_high = SCTP_MAX_PLPMTU;
                }
                t->pl.probe_size += SCTP_PL_MIN_STEP;
                if (t->pl.probe_size >= t->pl.probe_high) {
@@ -341,7 +344,7 @@ bool sctp_transport_pl_recv(struct sctp_transport *t)
        } else if (t->pl.state == SCTP_PL_COMPLETE) {
                /* Raise probe_size again after 30 * interval in Search Complete */
                t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
-               t->pl.probe_size += SCTP_PL_MIN_STEP;
+               t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_MIN_STEP, SCTP_MAX_PLPMTU);
        }
 
        return t->pl.state == SCTP_PL_COMPLETE;
index 50c38b6..538e9c6 100644 (file)
@@ -2000,8 +2000,10 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
                return rc;
 
        /* create send buffer and rmb */
-       if (smc_buf_create(new_smc, false))
+       if (smc_buf_create(new_smc, false)) {
+               smc_conn_abort(new_smc, ini->first_contact_local);
                return SMC_CLC_DECL_MEM;
+       }
 
        return 0;
 }
@@ -2217,8 +2219,11 @@ static void smc_find_rdma_v2_device_serv(struct smc_sock *new_smc,
        smcr_version = ini->smcr_version;
        ini->smcr_version = SMC_V2;
        rc = smc_listen_rdma_init(new_smc, ini);
-       if (!rc)
+       if (!rc) {
                rc = smc_listen_rdma_reg(new_smc, ini->first_contact_local);
+               if (rc)
+                       smc_conn_abort(new_smc, ini->first_contact_local);
+       }
        if (!rc)
                return;
        ini->smcr_version = smcr_version;
index 4543567..3f465fa 100644 (file)
@@ -127,6 +127,7 @@ static int smcr_lgr_conn_assign_link(struct smc_connection *conn, bool first)
        int i, j;
 
        /* do link balancing */
+       conn->lnk = NULL;       /* reset conn->lnk first */
        for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
                struct smc_link *lnk = &conn->lgr->lnk[i];
 
index a0840b8..90f0b60 100644 (file)
@@ -578,7 +578,10 @@ static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr,
 {
        struct smc_buf_desc *buf_next;
 
-       if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
+       if (!buf_pos)
+               return _smc_llc_get_next_rmb(lgr, buf_lst);
+
+       if (list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) {
                (*buf_lst)++;
                return _smc_llc_get_next_rmb(lgr, buf_lst);
        }
@@ -614,6 +617,8 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
                goto out;
        buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
        for (i = 0; i < ext->num_rkeys; i++) {
+               while (buf_pos && !(buf_pos)->used)
+                       buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
                if (!buf_pos)
                        break;
                rmb = buf_pos;
@@ -623,8 +628,6 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
                        cpu_to_be64((uintptr_t)rmb->cpu_addr) :
                        cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl));
                buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
-               while (buf_pos && !(buf_pos)->used)
-                       buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos);
        }
        len += i * sizeof(ext->rt[0]);
 out:
@@ -848,6 +851,8 @@ static int smc_llc_add_link_cont(struct smc_link *link,
        addc_llc->num_rkeys = *num_rkeys_todo;
        n = *num_rkeys_todo;
        for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) {
+               while (*buf_pos && !(*buf_pos)->used)
+                       *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
                if (!*buf_pos) {
                        addc_llc->num_rkeys = addc_llc->num_rkeys -
                                              *num_rkeys_todo;
@@ -864,8 +869,6 @@ static int smc_llc_add_link_cont(struct smc_link *link,
 
                (*num_rkeys_todo)--;
                *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
-               while (*buf_pos && !(*buf_pos)->used)
-                       *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos);
        }
        addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT;
        addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont);
index 212c5d5..9734e1d 100644 (file)
@@ -639,6 +639,16 @@ gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf,
 
        ret = write_bytes_to_xdr_buf(buf, offset, data, len);
 
+#if IS_ENABLED(CONFIG_KUNIT)
+       /*
+        * CBC-CTS does not define an output IV but RFC 3962 defines it as the
+        * penultimate block of ciphertext, so copy that into the IV buffer
+        * before returning.
+        */
+       if (encrypt)
+               memcpy(iv, data, crypto_sync_skcipher_ivsize(cipher));
+#endif
+
 out:
        kfree(data);
        return ret;
index c8321de..6debf4f 100644 (file)
@@ -927,11 +927,10 @@ static void __rpc_execute(struct rpc_task *task)
                 */
                do_action = task->tk_action;
                /* Tasks with an RPC error status should exit */
-               if (do_action != rpc_exit_task &&
+               if (do_action && do_action != rpc_exit_task &&
                    (status = READ_ONCE(task->tk_rpc_status)) != 0) {
                        task->tk_status = status;
-                       if (do_action != NULL)
-                               do_action = rpc_exit_task;
+                       do_action = rpc_exit_task;
                }
                /* Callbacks override all actions */
                if (task->tk_callback) {
index 26367cf..79967b6 100644 (file)
@@ -1052,7 +1052,7 @@ static int __svc_register(struct net *net, const char *progname,
 #endif
        }
 
-       trace_svc_register(progname, version, protocol, port, family, error);
+       trace_svc_register(progname, version, family, protocol, port, error);
        return error;
 }
 
@@ -1416,7 +1416,7 @@ err_bad_rpc:
        /* Only RPCv2 supported */
        xdr_stream_encode_u32(xdr, RPC_VERSION);
        xdr_stream_encode_u32(xdr, RPC_VERSION);
-       goto sendit;
+       return 1;       /* don't wrap */
 
 err_bad_auth:
        dprintk("svc: authentication failed (%d)\n",
@@ -1432,7 +1432,7 @@ err_bad_auth:
 err_bad_prog:
        dprintk("svc: unknown program %d\n", rqstp->rq_prog);
        serv->sv_stats->rpcbadfmt++;
-       xdr_stream_encode_u32(xdr, RPC_PROG_UNAVAIL);
+       *rqstp->rq_accept_statp = rpc_prog_unavail;
        goto sendit;
 
 err_bad_vers:
@@ -1440,7 +1440,12 @@ err_bad_vers:
                       rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
 
        serv->sv_stats->rpcbadfmt++;
-       xdr_stream_encode_u32(xdr, RPC_PROG_MISMATCH);
+       *rqstp->rq_accept_statp = rpc_prog_mismatch;
+
+       /*
+        * svc_authenticate() has already added the verifier and
+        * advanced the stream just past rq_accept_statp.
+        */
        xdr_stream_encode_u32(xdr, process.mismatch.lovers);
        xdr_stream_encode_u32(xdr, process.mismatch.hivers);
        goto sendit;
@@ -1449,19 +1454,19 @@ err_bad_proc:
        svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
 
        serv->sv_stats->rpcbadfmt++;
-       xdr_stream_encode_u32(xdr, RPC_PROC_UNAVAIL);
+       *rqstp->rq_accept_statp = rpc_proc_unavail;
        goto sendit;
 
 err_garbage_args:
        svc_printk(rqstp, "failed to decode RPC header\n");
 
        serv->sv_stats->rpcbadfmt++;
-       xdr_stream_encode_u32(xdr, RPC_GARBAGE_ARGS);
+       *rqstp->rq_accept_statp = rpc_garbage_args;
        goto sendit;
 
 err_system_err:
        serv->sv_stats->rpcbadfmt++;
-       xdr_stream_encode_u32(xdr, RPC_SYSTEM_ERR);
+       *rqstp->rq_accept_statp = rpc_system_err;
        goto sendit;
 }
 
index 84e5d7d..13a1489 100644 (file)
@@ -532,13 +532,23 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
 }
 EXPORT_SYMBOL_GPL(svc_reserve);
 
+static void free_deferred(struct svc_xprt *xprt, struct svc_deferred_req *dr)
+{
+       if (!dr)
+               return;
+
+       xprt->xpt_ops->xpo_release_ctxt(xprt, dr->xprt_ctxt);
+       kfree(dr);
+}
+
 static void svc_xprt_release(struct svc_rqst *rqstp)
 {
        struct svc_xprt *xprt = rqstp->rq_xprt;
 
-       xprt->xpt_ops->xpo_release_rqst(rqstp);
+       xprt->xpt_ops->xpo_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
+       rqstp->rq_xprt_ctxt = NULL;
 
-       kfree(rqstp->rq_deferred);
+       free_deferred(xprt, rqstp->rq_deferred);
        rqstp->rq_deferred = NULL;
 
        svc_rqst_release_pages(rqstp);
@@ -1054,7 +1064,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
        spin_unlock_bh(&serv->sv_lock);
 
        while ((dr = svc_deferred_dequeue(xprt)) != NULL)
-               kfree(dr);
+               free_deferred(xprt, dr);
 
        call_xpt_users(xprt);
        svc_xprt_put(xprt);
@@ -1176,8 +1186,8 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
        if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
                spin_unlock(&xprt->xpt_lock);
                trace_svc_defer_drop(dr);
+               free_deferred(xprt, dr);
                svc_xprt_put(xprt);
-               kfree(dr);
                return;
        }
        dr->xprt = NULL;
@@ -1222,14 +1232,14 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
                dr->addrlen = rqstp->rq_addrlen;
                dr->daddr = rqstp->rq_daddr;
                dr->argslen = rqstp->rq_arg.len >> 2;
-               dr->xprt_ctxt = rqstp->rq_xprt_ctxt;
-               rqstp->rq_xprt_ctxt = NULL;
 
                /* back up head to the start of the buffer and copy */
                skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
                memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
                       dr->argslen << 2);
        }
+       dr->xprt_ctxt = rqstp->rq_xprt_ctxt;
+       rqstp->rq_xprt_ctxt = NULL;
        trace_svc_defer(rqstp);
        svc_xprt_get(rqstp->rq_xprt);
        dr->xprt = rqstp->rq_xprt;
@@ -1262,6 +1272,8 @@ static noinline int svc_deferred_recv(struct svc_rqst *rqstp)
        rqstp->rq_daddr       = dr->daddr;
        rqstp->rq_respages    = rqstp->rq_pages;
        rqstp->rq_xprt_ctxt   = dr->xprt_ctxt;
+
+       dr->xprt_ctxt = NULL;
        svc_xprt_received(rqstp->rq_xprt);
        return dr->argslen << 2;
 }
index a51c9b9..f77cebe 100644 (file)
@@ -121,27 +121,27 @@ static void svc_reclassify_socket(struct socket *sock)
 #endif
 
 /**
- * svc_tcp_release_rqst - Release transport-related resources
- * @rqstp: request structure with resources to be released
+ * svc_tcp_release_ctxt - Release transport-related resources
+ * @xprt: the transport which owned the context
+ * @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
  *
  */
-static void svc_tcp_release_rqst(struct svc_rqst *rqstp)
+static void svc_tcp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
 {
 }
 
 /**
- * svc_udp_release_rqst - Release transport-related resources
- * @rqstp: request structure with resources to be released
+ * svc_udp_release_ctxt - Release transport-related resources
+ * @xprt: the transport which owned the context
+ * @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
  *
  */
-static void svc_udp_release_rqst(struct svc_rqst *rqstp)
+static void svc_udp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
 {
-       struct sk_buff *skb = rqstp->rq_xprt_ctxt;
+       struct sk_buff *skb = ctxt;
 
-       if (skb) {
-               rqstp->rq_xprt_ctxt = NULL;
+       if (skb)
                consume_skb(skb);
-       }
 }
 
 union svc_pktinfo_u {
@@ -696,7 +696,8 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
        unsigned int sent;
        int err;
 
-       svc_udp_release_rqst(rqstp);
+       svc_udp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
+       rqstp->rq_xprt_ctxt = NULL;
 
        svc_set_cmsg_data(rqstp, cmh);
 
@@ -768,7 +769,7 @@ static const struct svc_xprt_ops svc_udp_ops = {
        .xpo_recvfrom = svc_udp_recvfrom,
        .xpo_sendto = svc_udp_sendto,
        .xpo_result_payload = svc_sock_result_payload,
-       .xpo_release_rqst = svc_udp_release_rqst,
+       .xpo_release_ctxt = svc_udp_release_ctxt,
        .xpo_detach = svc_sock_detach,
        .xpo_free = svc_sock_free,
        .xpo_has_wspace = svc_udp_has_wspace,
@@ -895,6 +896,9 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
                trace_svcsock_accept_err(xprt, serv->sv_name, err);
                return NULL;
        }
+       if (IS_ERR(sock_alloc_file(newsock, O_NONBLOCK, NULL)))
+               return NULL;
+
        set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
 
        err = kernel_getpeername(newsock, sin);
@@ -935,7 +939,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
        return &newsvsk->sk_xprt;
 
 failed:
-       sock_release(newsock);
+       sockfd_put(newsock);
        return NULL;
 }
 
@@ -1298,7 +1302,8 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
        unsigned int sent;
        int err;
 
-       svc_tcp_release_rqst(rqstp);
+       svc_tcp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
+       rqstp->rq_xprt_ctxt = NULL;
 
        atomic_inc(&svsk->sk_sendqlen);
        mutex_lock(&xprt->xpt_mutex);
@@ -1343,7 +1348,7 @@ static const struct svc_xprt_ops svc_tcp_ops = {
        .xpo_recvfrom = svc_tcp_recvfrom,
        .xpo_sendto = svc_tcp_sendto,
        .xpo_result_payload = svc_sock_result_payload,
-       .xpo_release_rqst = svc_tcp_release_rqst,
+       .xpo_release_ctxt = svc_tcp_release_ctxt,
        .xpo_detach = svc_tcp_sock_detach,
        .xpo_free = svc_sock_free,
        .xpo_has_wspace = svc_tcp_has_wspace,
@@ -1430,7 +1435,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
                                                struct socket *sock,
                                                int flags)
 {
-       struct file     *filp = NULL;
        struct svc_sock *svsk;
        struct sock     *inet;
        int             pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
@@ -1439,14 +1443,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
        if (!svsk)
                return ERR_PTR(-ENOMEM);
 
-       if (!sock->file) {
-               filp = sock_alloc_file(sock, O_NONBLOCK, NULL);
-               if (IS_ERR(filp)) {
-                       kfree(svsk);
-                       return ERR_CAST(filp);
-               }
-       }
-
        inet = sock->sk;
 
        if (pmap_register) {
@@ -1456,8 +1452,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
                                     inet->sk_protocol,
                                     ntohs(inet_sk(inet)->inet_sport));
                if (err < 0) {
-                       if (filp)
-                               fput(filp);
                        kfree(svsk);
                        return ERR_PTR(err);
                }
@@ -1486,25 +1480,10 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
        return svsk;
 }
 
-bool svc_alien_sock(struct net *net, int fd)
-{
-       int err;
-       struct socket *sock = sockfd_lookup(fd, &err);
-       bool ret = false;
-
-       if (!sock)
-               goto out;
-       if (sock_net(sock->sk) != net)
-               ret = true;
-       sockfd_put(sock);
-out:
-       return ret;
-}
-EXPORT_SYMBOL_GPL(svc_alien_sock);
-
 /**
  * svc_addsock - add a listener socket to an RPC service
  * @serv: pointer to RPC service to which to add a new listener
+ * @net: caller's network namespace
  * @fd: file descriptor of the new listener
  * @name_return: pointer to buffer to fill in with name of listener
  * @len: size of the buffer
@@ -1514,8 +1493,8 @@ EXPORT_SYMBOL_GPL(svc_alien_sock);
  * Name is terminated with '\n'.  On error, returns a negative errno
  * value.
  */
-int svc_addsock(struct svc_serv *serv, const int fd, char *name_return,
-               const size_t len, const struct cred *cred)
+int svc_addsock(struct svc_serv *serv, struct net *net, const int fd,
+               char *name_return, const size_t len, const struct cred *cred)
 {
        int err = 0;
        struct socket *so = sockfd_lookup(fd, &err);
@@ -1526,6 +1505,9 @@ int svc_addsock(struct svc_serv *serv, const int fd, char *name_return,
 
        if (!so)
                return err;
+       err = -EINVAL;
+       if (sock_net(so->sk) != net)
+               goto out;
        err = -EAFNOSUPPORT;
        if ((so->sk->sk_family != PF_INET) && (so->sk->sk_family != PF_INET6))
                goto out;
index 1c658fa..a22fe75 100644 (file)
@@ -239,21 +239,20 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
 }
 
 /**
- * svc_rdma_release_rqst - Release transport-specific per-rqst resources
- * @rqstp: svc_rqst being released
+ * svc_rdma_release_ctxt - Release transport-specific per-rqst resources
+ * @xprt: the transport which owned the context
+ * @vctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
  *
  * Ensure that the recv_ctxt is released whether or not a Reply
  * was sent. For example, the client could close the connection,
  * or svc_process could drop an RPC, before the Reply is sent.
  */
-void svc_rdma_release_rqst(struct svc_rqst *rqstp)
+void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt)
 {
-       struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt;
-       struct svc_xprt *xprt = rqstp->rq_xprt;
+       struct svc_rdma_recv_ctxt *ctxt = vctxt;
        struct svcxprt_rdma *rdma =
                container_of(xprt, struct svcxprt_rdma, sc_xprt);
 
-       rqstp->rq_xprt_ctxt = NULL;
        if (ctxt)
                svc_rdma_recv_ctxt_put(rdma, ctxt);
 }
index 416b298..ca04f7a 100644 (file)
@@ -80,7 +80,7 @@ static const struct svc_xprt_ops svc_rdma_ops = {
        .xpo_recvfrom = svc_rdma_recvfrom,
        .xpo_sendto = svc_rdma_sendto,
        .xpo_result_payload = svc_rdma_result_payload,
-       .xpo_release_rqst = svc_rdma_release_rqst,
+       .xpo_release_ctxt = svc_rdma_release_ctxt,
        .xpo_detach = svc_rdma_detach,
        .xpo_free = svc_rdma_free,
        .xpo_has_wspace = svc_rdma_has_wspace,
index 35cac77..5388140 100644 (file)
@@ -541,6 +541,19 @@ int tipc_bearer_mtu(struct net *net, u32 bearer_id)
        return mtu;
 }
 
+int tipc_bearer_min_mtu(struct net *net, u32 bearer_id)
+{
+       int mtu = TIPC_MIN_BEARER_MTU;
+       struct tipc_bearer *b;
+
+       rcu_read_lock();
+       b = bearer_get(net, bearer_id);
+       if (b)
+               mtu += b->encap_hlen;
+       rcu_read_unlock();
+       return mtu;
+}
+
 /* tipc_bearer_xmit_skb - sends buffer to destination over bearer
  */
 void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
@@ -1138,8 +1151,8 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
                                return -EINVAL;
                        }
 #ifdef CONFIG_TIPC_MEDIA_UDP
-                       if (tipc_udp_mtu_bad(nla_get_u32
-                                            (props[TIPC_NLA_PROP_MTU]))) {
+                       if (nla_get_u32(props[TIPC_NLA_PROP_MTU]) <
+                           b->encap_hlen + TIPC_MIN_BEARER_MTU) {
                                NL_SET_ERR_MSG(info->extack,
                                               "MTU value is out-of-range");
                                return -EINVAL;
index 490ad6e..bd0cc5c 100644 (file)
@@ -146,6 +146,7 @@ struct tipc_media {
  * @identity: array index of this bearer within TIPC bearer array
  * @disc: ptr to link setup request
  * @net_plane: network plane ('A' through 'H') currently associated with bearer
+ * @encap_hlen: encap headers length
  * @up: bearer up flag (bit 0)
  * @refcnt: tipc_bearer reference counter
  *
@@ -170,6 +171,7 @@ struct tipc_bearer {
        u32 identity;
        struct tipc_discoverer *disc;
        char net_plane;
+       u16 encap_hlen;
        unsigned long up;
        refcount_t refcnt;
 };
@@ -232,6 +234,7 @@ int tipc_bearer_setup(void);
 void tipc_bearer_cleanup(void);
 void tipc_bearer_stop(struct net *net);
 int tipc_bearer_mtu(struct net *net, u32 bearer_id);
+int tipc_bearer_min_mtu(struct net *net, u32 bearer_id);
 bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id);
 void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
                          struct sk_buff *skb,
index b3ce248..2eff1c7 100644 (file)
@@ -2200,7 +2200,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
        struct tipc_msg *hdr = buf_msg(skb);
        struct tipc_gap_ack_blks *ga = NULL;
        bool reply = msg_probe(hdr), retransmitted = false;
-       u32 dlen = msg_data_sz(hdr), glen = 0;
+       u32 dlen = msg_data_sz(hdr), glen = 0, msg_max;
        u16 peers_snd_nxt =  msg_next_sent(hdr);
        u16 peers_tol = msg_link_tolerance(hdr);
        u16 peers_prio = msg_linkprio(hdr);
@@ -2239,6 +2239,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
        switch (mtyp) {
        case RESET_MSG:
        case ACTIVATE_MSG:
+               msg_max = msg_max_pkt(hdr);
+               if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id))
+                       break;
                /* Complete own link name with peer's interface name */
                if_name =  strrchr(l->name, ':') + 1;
                if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
@@ -2283,8 +2286,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                l->peer_session = msg_session(hdr);
                l->in_session = true;
                l->peer_bearer_id = msg_bearer_id(hdr);
-               if (l->mtu > msg_max_pkt(hdr))
-                       l->mtu = msg_max_pkt(hdr);
+               if (l->mtu > msg_max)
+                       l->mtu = msg_max;
                break;
 
        case STATE_MSG:
index c2bb818..0a85244 100644 (file)
@@ -738,8 +738,8 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
                        udp_conf.local_ip.s_addr = local.ipv4.s_addr;
                udp_conf.use_udp_checksums = false;
                ub->ifindex = dev->ifindex;
-               if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
-                                     sizeof(struct udphdr))) {
+               b->encap_hlen = sizeof(struct iphdr) + sizeof(struct udphdr);
+               if (tipc_mtu_bad(dev, b->encap_hlen)) {
                        err = -EINVAL;
                        goto err;
                }
@@ -760,6 +760,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
                else
                        udp_conf.local_ip6 = local.ipv6;
                ub->ifindex = dev->ifindex;
+               b->encap_hlen = sizeof(struct ipv6hdr) + sizeof(struct udphdr);
                b->mtu = 1280;
 #endif
        } else {
index 804c388..0672aca 100644 (file)
@@ -167,6 +167,11 @@ static inline bool tls_strp_msg_ready(struct tls_sw_context_rx *ctx)
        return ctx->strp.msg_ready;
 }
 
+static inline bool tls_strp_msg_mixed_decrypted(struct tls_sw_context_rx *ctx)
+{
+       return ctx->strp.mixed_decrypted;
+}
+
 #ifdef CONFIG_TLS_DEVICE
 int tls_device_init(void);
 void tls_device_cleanup(void);
index a7cc4f9..bf69c9d 100644 (file)
@@ -1007,20 +1007,14 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx)
        struct tls_sw_context_rx *sw_ctx = tls_sw_ctx_rx(tls_ctx);
        struct sk_buff *skb = tls_strp_msg(sw_ctx);
        struct strp_msg *rxm = strp_msg(skb);
-       int is_decrypted = skb->decrypted;
-       int is_encrypted = !is_decrypted;
-       struct sk_buff *skb_iter;
-       int left;
-
-       left = rxm->full_len - skb->len;
-       /* Check if all the data is decrypted already */
-       skb_iter = skb_shinfo(skb)->frag_list;
-       while (skb_iter && left > 0) {
-               is_decrypted &= skb_iter->decrypted;
-               is_encrypted &= !skb_iter->decrypted;
-
-               left -= skb_iter->len;
-               skb_iter = skb_iter->next;
+       int is_decrypted, is_encrypted;
+
+       if (!tls_strp_msg_mixed_decrypted(sw_ctx)) {
+               is_decrypted = skb->decrypted;
+               is_encrypted = !is_decrypted;
+       } else {
+               is_decrypted = 0;
+               is_encrypted = 0;
        }
 
        trace_tls_device_decrypted(sk, tcp_sk(sk)->copied_seq - rxm->full_len,
index 955ac3e..f37f4a0 100644 (file)
@@ -20,7 +20,9 @@ static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
        strp->stopped = 1;
 
        /* Report an error on the lower socket */
-       strp->sk->sk_err = -err;
+       WRITE_ONCE(strp->sk->sk_err, -err);
+       /* Paired with smp_rmb() in tcp_poll() */
+       smp_wmb();
        sk_error_report(strp->sk);
 }
 
@@ -29,34 +31,50 @@ static void tls_strp_anchor_free(struct tls_strparser *strp)
        struct skb_shared_info *shinfo = skb_shinfo(strp->anchor);
 
        DEBUG_NET_WARN_ON_ONCE(atomic_read(&shinfo->dataref) != 1);
-       shinfo->frag_list = NULL;
+       if (!strp->copy_mode)
+               shinfo->frag_list = NULL;
        consume_skb(strp->anchor);
        strp->anchor = NULL;
 }
 
-/* Create a new skb with the contents of input copied to its page frags */
-static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
+static struct sk_buff *
+tls_strp_skb_copy(struct tls_strparser *strp, struct sk_buff *in_skb,
+                 int offset, int len)
 {
-       struct strp_msg *rxm;
        struct sk_buff *skb;
-       int i, err, offset;
+       int i, err;
 
-       skb = alloc_skb_with_frags(0, strp->stm.full_len, TLS_PAGE_ORDER,
+       skb = alloc_skb_with_frags(0, len, TLS_PAGE_ORDER,
                                   &err, strp->sk->sk_allocation);
        if (!skb)
                return NULL;
 
-       offset = strp->stm.offset;
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
-               WARN_ON_ONCE(skb_copy_bits(strp->anchor, offset,
+               WARN_ON_ONCE(skb_copy_bits(in_skb, offset,
                                           skb_frag_address(frag),
                                           skb_frag_size(frag)));
                offset += skb_frag_size(frag);
        }
 
-       skb_copy_header(skb, strp->anchor);
+       skb->len = len;
+       skb->data_len = len;
+       skb_copy_header(skb, in_skb);
+       return skb;
+}
+
+/* Create a new skb with the contents of input copied to its page frags */
+static struct sk_buff *tls_strp_msg_make_copy(struct tls_strparser *strp)
+{
+       struct strp_msg *rxm;
+       struct sk_buff *skb;
+
+       skb = tls_strp_skb_copy(strp, strp->anchor, strp->stm.offset,
+                               strp->stm.full_len);
+       if (!skb)
+               return NULL;
+
        rxm = strp_msg(skb);
        rxm->offset = 0;
        return skb;
@@ -180,22 +198,22 @@ static void tls_strp_flush_anchor_copy(struct tls_strparser *strp)
        for (i = 0; i < shinfo->nr_frags; i++)
                __skb_frag_unref(&shinfo->frags[i], false);
        shinfo->nr_frags = 0;
+       if (strp->copy_mode) {
+               kfree_skb_list(shinfo->frag_list);
+               shinfo->frag_list = NULL;
+       }
        strp->copy_mode = 0;
+       strp->mixed_decrypted = 0;
 }
 
-static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
-                          unsigned int offset, size_t in_len)
+static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
+                               struct sk_buff *in_skb, unsigned int offset,
+                               size_t in_len)
 {
-       struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
-       struct sk_buff *skb;
-       skb_frag_t *frag;
        size_t len, chunk;
+       skb_frag_t *frag;
        int sz;
 
-       if (strp->msg_ready)
-               return 0;
-
-       skb = strp->anchor;
        frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
 
        len = in_len;
@@ -208,19 +226,26 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
                                           skb_frag_size(frag),
                                           chunk));
 
-               sz = tls_rx_msg_size(strp, strp->anchor);
-               if (sz < 0) {
-                       desc->error = sz;
-                       return 0;
-               }
-
-               /* We may have over-read, sz == 0 is guaranteed under-read */
-               if (sz > 0)
-                       chunk = min_t(size_t, chunk, sz - skb->len);
-
                skb->len += chunk;
                skb->data_len += chunk;
                skb_frag_size_add(frag, chunk);
+
+               sz = tls_rx_msg_size(strp, skb);
+               if (sz < 0)
+                       return sz;
+
+               /* We may have over-read, sz == 0 is guaranteed under-read */
+               if (unlikely(sz && sz < skb->len)) {
+                       int over = skb->len - sz;
+
+                       WARN_ON_ONCE(over > chunk);
+                       skb->len -= over;
+                       skb->data_len -= over;
+                       skb_frag_size_add(frag, -over);
+
+                       chunk -= over;
+               }
+
                frag++;
                len -= chunk;
                offset += chunk;
@@ -247,15 +272,99 @@ static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
                offset += chunk;
        }
 
-       if (strp->stm.full_len == skb->len) {
+read_done:
+       return in_len - len;
+}
+
+static int tls_strp_copyin_skb(struct tls_strparser *strp, struct sk_buff *skb,
+                              struct sk_buff *in_skb, unsigned int offset,
+                              size_t in_len)
+{
+       struct sk_buff *nskb, *first, *last;
+       struct skb_shared_info *shinfo;
+       size_t chunk;
+       int sz;
+
+       if (strp->stm.full_len)
+               chunk = strp->stm.full_len - skb->len;
+       else
+               chunk = TLS_MAX_PAYLOAD_SIZE + PAGE_SIZE;
+       chunk = min(chunk, in_len);
+
+       nskb = tls_strp_skb_copy(strp, in_skb, offset, chunk);
+       if (!nskb)
+               return -ENOMEM;
+
+       shinfo = skb_shinfo(skb);
+       if (!shinfo->frag_list) {
+               shinfo->frag_list = nskb;
+               nskb->prev = nskb;
+       } else {
+               first = shinfo->frag_list;
+               last = first->prev;
+               last->next = nskb;
+               first->prev = nskb;
+       }
+
+       skb->len += chunk;
+       skb->data_len += chunk;
+
+       if (!strp->stm.full_len) {
+               sz = tls_rx_msg_size(strp, skb);
+               if (sz < 0)
+                       return sz;
+
+               /* We may have over-read, sz == 0 is guaranteed under-read */
+               if (unlikely(sz && sz < skb->len)) {
+                       int over = skb->len - sz;
+
+                       WARN_ON_ONCE(over > chunk);
+                       skb->len -= over;
+                       skb->data_len -= over;
+                       __pskb_trim(nskb, nskb->len - over);
+
+                       chunk -= over;
+               }
+
+               strp->stm.full_len = sz;
+       }
+
+       return chunk;
+}
+
+static int tls_strp_copyin(read_descriptor_t *desc, struct sk_buff *in_skb,
+                          unsigned int offset, size_t in_len)
+{
+       struct tls_strparser *strp = (struct tls_strparser *)desc->arg.data;
+       struct sk_buff *skb;
+       int ret;
+
+       if (strp->msg_ready)
+               return 0;
+
+       skb = strp->anchor;
+       if (!skb->len)
+               skb_copy_decrypted(skb, in_skb);
+       else
+               strp->mixed_decrypted |= !!skb_cmp_decrypted(skb, in_skb);
+
+       if (IS_ENABLED(CONFIG_TLS_DEVICE) && strp->mixed_decrypted)
+               ret = tls_strp_copyin_skb(strp, skb, in_skb, offset, in_len);
+       else
+               ret = tls_strp_copyin_frag(strp, skb, in_skb, offset, in_len);
+       if (ret < 0) {
+               desc->error = ret;
+               ret = 0;
+       }
+
+       if (strp->stm.full_len && strp->stm.full_len == skb->len) {
                desc->count = 0;
 
                strp->msg_ready = 1;
                tls_rx_msg_ready(strp);
        }
 
-read_done:
-       return in_len - len;
+       return ret;
 }
 
 static int tls_strp_read_copyin(struct tls_strparser *strp)
@@ -315,15 +424,19 @@ static int tls_strp_read_copy(struct tls_strparser *strp, bool qshort)
        return 0;
 }
 
-static bool tls_strp_check_no_dup(struct tls_strparser *strp)
+static bool tls_strp_check_queue_ok(struct tls_strparser *strp)
 {
        unsigned int len = strp->stm.offset + strp->stm.full_len;
-       struct sk_buff *skb;
+       struct sk_buff *first, *skb;
        u32 seq;
 
-       skb = skb_shinfo(strp->anchor)->frag_list;
-       seq = TCP_SKB_CB(skb)->seq;
+       first = skb_shinfo(strp->anchor)->frag_list;
+       skb = first;
+       seq = TCP_SKB_CB(first)->seq;
 
+       /* Make sure there's no duplicate data in the queue,
+        * and the decrypted status matches.
+        */
        while (skb->len < len) {
                seq += skb->len;
                len -= skb->len;
@@ -331,6 +444,8 @@ static bool tls_strp_check_no_dup(struct tls_strparser *strp)
 
                if (TCP_SKB_CB(skb)->seq != seq)
                        return false;
+               if (skb_cmp_decrypted(first, skb))
+                       return false;
        }
 
        return true;
@@ -411,7 +526,7 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
                        return tls_strp_read_copy(strp, true);
        }
 
-       if (!tls_strp_check_no_dup(strp))
+       if (!tls_strp_check_queue_ok(strp))
                return tls_strp_read_copy(strp, false);
 
        strp->msg_ready = 1;
index 635b8bf..1a53c8f 100644 (file)
@@ -70,7 +70,9 @@ noinline void tls_err_abort(struct sock *sk, int err)
 {
        WARN_ON_ONCE(err >= 0);
        /* sk->sk_err should contain a positive error code. */
-       sk->sk_err = -err;
+       WRITE_ONCE(sk->sk_err, -err);
+       /* Paired with smp_rmb() in tcp_poll() */
+       smp_wmb();
        sk_error_report(sk);
 }
 
@@ -2304,10 +2306,14 @@ static void tls_data_ready(struct sock *sk)
        struct tls_context *tls_ctx = tls_get_ctx(sk);
        struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
        struct sk_psock *psock;
+       gfp_t alloc_save;
 
        trace_sk_data_ready(sk);
 
+       alloc_save = sk->sk_allocation;
+       sk->sk_allocation = GFP_ATOMIC;
        tls_strp_data_ready(&ctx->strp);
+       sk->sk_allocation = alloc_save;
 
        psock = sk_psock_get(sk);
        if (psock) {
index cc695c9..e7728b5 100644 (file)
@@ -2553,7 +2553,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
 {
        struct unix_sock *u = unix_sk(sk);
        struct sk_buff *skb;
-       int err, copied;
+       int err;
 
        mutex_lock(&u->iolock);
        skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
@@ -2561,10 +2561,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
        if (!skb)
                return err;
 
-       copied = recv_actor(sk, skb);
-       kfree_skb(skb);
-
-       return copied;
+       return recv_actor(sk, skb);
 }
 
 /*
index 413407b..efb8a09 100644 (file)
@@ -1462,7 +1462,7 @@ static int vsock_connect(struct socket *sock, struct sockaddr *addr,
                        vsock_transport_cancel_pkt(vsk);
                        vsock_remove_connected(vsk);
                        goto out_wait;
-               } else if (timeout == 0) {
+               } else if ((sk->sk_state != TCP_ESTABLISHED) && (timeout == 0)) {
                        err = -ETIMEDOUT;
                        sk->sk_state = TCP_CLOSE;
                        sock->state = SS_UNCONNECTED;
index e487855..b769fc2 100644 (file)
@@ -1441,7 +1441,6 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
        struct sock *sk = sk_vsock(vsk);
        struct sk_buff *skb;
        int off = 0;
-       int copied;
        int err;
 
        spin_lock_bh(&vvs->rx_lock);
@@ -1454,9 +1453,7 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
        if (!skb)
                return err;
 
-       copied = recv_actor(sk, skb);
-       kfree_skb(skb);
-       return copied;
+       return recv_actor(sk, skb);
 }
 EXPORT_SYMBOL_GPL(virtio_transport_read_skb);
 
index 5b0c4d5..b3ec9ea 100644 (file)
@@ -368,12 +368,12 @@ static void cfg80211_sched_scan_stop_wk(struct work_struct *work)
        rdev = container_of(work, struct cfg80211_registered_device,
                           sched_scan_stop_wk);
 
-       rtnl_lock();
+       wiphy_lock(&rdev->wiphy);
        list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) {
                if (req->nl_owner_dead)
                        cfg80211_stop_sched_scan_req(rdev, req, false);
        }
-       rtnl_unlock();
+       wiphy_unlock(&rdev->wiphy);
 }
 
 static void cfg80211_propagate_radar_detect_wk(struct work_struct *work)
index d95f805..087d60c 100644 (file)
@@ -10723,6 +10723,8 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
                if (!info->attrs[NL80211_ATTR_MLD_ADDR])
                        return -EINVAL;
                req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
+               if (!is_valid_ether_addr(req.ap_mld_addr))
+                       return -EINVAL;
        }
 
        req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
index 0d40d6a..949e1fb 100644 (file)
@@ -2440,11 +2440,11 @@ static void reg_leave_invalid_chans(struct wiphy *wiphy)
        struct wireless_dev *wdev;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
-       ASSERT_RTNL();
-
+       wiphy_lock(wiphy);
        list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
                if (!reg_wdev_chan_valid(wiphy, wdev))
                        cfg80211_leave(rdev, wdev);
+       wiphy_unlock(wiphy);
 }
 
 static void reg_check_chans_work(struct work_struct *work)
index a138225..c501db7 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright 2016      Intel Deutschland GmbH
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
  */
 #include <linux/kernel.h>
 #include <linux/slab.h>
@@ -540,6 +540,10 @@ static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry,
        /* skip the TBTT offset */
        pos++;
 
+       /* ignore entries with invalid BSSID */
+       if (!is_valid_ether_addr(pos))
+               return -EINVAL;
+
        memcpy(entry->bssid, pos, ETH_ALEN);
        pos += ETH_ALEN;
 
index bef28c6..408f5e5 100644 (file)
@@ -378,7 +378,7 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
                break;
        default:
                xdo->dev = NULL;
-               dev_put(dev);
+               netdev_put(dev, &xdo->dev_tracker);
                NL_SET_ERR_MSG(extack, "Unrecognized offload direction");
                return -EINVAL;
        }
index 35279c2..1f99dc4 100644 (file)
@@ -310,52 +310,6 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
        skb->mark = 0;
 }
 
-static int xfrmi_input(struct sk_buff *skb, int nexthdr, __be32 spi,
-                      int encap_type, unsigned short family)
-{
-       struct sec_path *sp;
-
-       sp = skb_sec_path(skb);
-       if (sp && (sp->len || sp->olen) &&
-           !xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
-               goto discard;
-
-       XFRM_SPI_SKB_CB(skb)->family = family;
-       if (family == AF_INET) {
-               XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
-               XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
-       } else {
-               XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct ipv6hdr, daddr);
-               XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = NULL;
-       }
-
-       return xfrm_input(skb, nexthdr, spi, encap_type);
-discard:
-       kfree_skb(skb);
-       return 0;
-}
-
-static int xfrmi4_rcv(struct sk_buff *skb)
-{
-       return xfrmi_input(skb, ip_hdr(skb)->protocol, 0, 0, AF_INET);
-}
-
-static int xfrmi6_rcv(struct sk_buff *skb)
-{
-       return xfrmi_input(skb, skb_network_header(skb)[IP6CB(skb)->nhoff],
-                          0, 0, AF_INET6);
-}
-
-static int xfrmi4_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
-{
-       return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET);
-}
-
-static int xfrmi6_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
-{
-       return xfrmi_input(skb, nexthdr, spi, encap_type, AF_INET6);
-}
-
 static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
 {
        const struct xfrm_mode *inner_mode;
@@ -991,8 +945,8 @@ static struct pernet_operations xfrmi_net_ops = {
 };
 
 static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly = {
-       .handler        =       xfrmi6_rcv,
-       .input_handler  =       xfrmi6_input,
+       .handler        =       xfrm6_rcv,
+       .input_handler  =       xfrm_input,
        .cb_handler     =       xfrmi_rcv_cb,
        .err_handler    =       xfrmi6_err,
        .priority       =       10,
@@ -1042,8 +996,8 @@ static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = {
 #endif
 
 static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly = {
-       .handler        =       xfrmi4_rcv,
-       .input_handler  =       xfrmi4_input,
+       .handler        =       xfrm4_rcv,
+       .input_handler  =       xfrm_input,
        .cb_handler     =       xfrmi_rcv_cb,
        .err_handler    =       xfrmi4_err,
        .priority       =       10,
index 5c61ec0..6d15788 100644 (file)
@@ -3312,7 +3312,7 @@ xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
 
 static inline int
 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
-             unsigned short family)
+             unsigned short family, u32 if_id)
 {
        if (xfrm_state_kern(x))
                return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
@@ -3323,7 +3323,8 @@ xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
                (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
                 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
                !(x->props.mode != XFRM_MODE_TRANSPORT &&
-                 xfrm_state_addr_cmp(tmpl, x, family));
+                 xfrm_state_addr_cmp(tmpl, x, family)) &&
+               (if_id == 0 || if_id == x->if_id);
 }
 
 /*
@@ -3335,7 +3336,7 @@ xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
  */
 static inline int
 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
-              unsigned short family)
+              unsigned short family, u32 if_id)
 {
        int idx = start;
 
@@ -3345,7 +3346,7 @@ xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int star
        } else
                start = -1;
        for (; idx < sp->len; idx++) {
-               if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
+               if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
                        return ++idx;
                if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
                        if (start == -1)
@@ -3712,12 +3713,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                }
                xfrm_nr = ti;
 
-               if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK &&
-                   !xfrm_nr) {
-                       XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
-                       goto reject;
-               }
-
                if (npols > 1) {
                        xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
                        tpp = stp;
@@ -3730,7 +3725,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                 * are implied between each two transformations.
                 */
                for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
-                       k = xfrm_policy_ok(tpp[i], sp, k, family);
+                       k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
                        if (k < 0) {
                                if (k < -1)
                                        /* "-2 - errored_index" returned */
@@ -3745,9 +3740,6 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
                        goto reject;
                }
 
-               if (if_id)
-                       secpath_reset(skb);
-
                xfrm_pols_put(pols, npols);
                return 1;
        }
index d720e16..c34a2a0 100644 (file)
@@ -1770,7 +1770,7 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
 }
 
 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
-                        struct netlink_ext_ack *extack)
+                        int dir, struct netlink_ext_ack *extack)
 {
        u16 prev_family;
        int i;
@@ -1796,6 +1796,10 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
                switch (ut[i].mode) {
                case XFRM_MODE_TUNNEL:
                case XFRM_MODE_BEET:
+                       if (ut[i].optional && dir == XFRM_POLICY_OUT) {
+                               NL_SET_ERR_MSG(extack, "Mode in optional template not allowed in outbound policy");
+                               return -EINVAL;
+                       }
                        break;
                default:
                        if (ut[i].family != prev_family) {
@@ -1833,7 +1837,7 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
 }
 
 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs,
-                              struct netlink_ext_ack *extack)
+                              int dir, struct netlink_ext_ack *extack)
 {
        struct nlattr *rt = attrs[XFRMA_TMPL];
 
@@ -1844,7 +1848,7 @@ static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs,
                int nr = nla_len(rt) / sizeof(*utmpl);
                int err;
 
-               err = validate_tmpl(nr, utmpl, pol->family, extack);
+               err = validate_tmpl(nr, utmpl, pol->family, dir, extack);
                if (err)
                        return err;
 
@@ -1921,7 +1925,7 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net,
        if (err)
                goto error;
 
-       if (!(err = copy_from_user_tmpl(xp, attrs, extack)))
+       if (!(err = copy_from_user_tmpl(xp, attrs, p->dir, extack)))
                err = copy_from_user_sec_ctx(xp, attrs);
        if (err)
                goto error;
@@ -1980,6 +1984,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        if (err) {
                xfrm_dev_policy_delete(xp);
+               xfrm_dev_policy_free(xp);
                security_xfrm_policy_free(xp->security);
                kfree(xp);
                return err;
@@ -3499,7 +3504,7 @@ static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
                return NULL;
 
        nr = ((len - sizeof(*p)) / sizeof(*ut));
-       if (validate_tmpl(nr, ut, p->sel.family, NULL))
+       if (validate_tmpl(nr, ut, p->sel.family, p->dir, NULL))
                return NULL;
 
        if (p->dir > XFRM_POLICY_OUT)
index 6448b78..bf66277 100644 (file)
@@ -498,7 +498,6 @@ int main(int argc, char **argv)
                                        "Option -%c requires an argument.\n\n",
                                        optopt);
                case 'h':
-                       __fallthrough;
                default:
                        Usage();
                        return 0;
index 0aecf93..8b21520 100644 (file)
@@ -26,5 +26,9 @@ quiet_cmd_flask = GEN     $(obj)/flask.h $(obj)/av_permissions.h
       cmd_flask = $< $(obj)/flask.h $(obj)/av_permissions.h
 
 targets += flask.h av_permissions.h
-$(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/genheaders/genheaders FORCE
+# once make >= 4.3 is required, we can use grouped targets in the rule below,
+# which basically involves adding both headers and a '&' before the colon, see
+# the example below:
+#   $(obj)/flask.h $(obj)/av_permissions.h &: scripts/selinux/...
+$(obj)/flask.h: scripts/selinux/genheaders/genheaders FORCE
        $(call if_changed,flask)
index 46e273b..50a6b50 100644 (file)
@@ -141,6 +141,14 @@ int snd_pcm_area_copy(const struct snd_pcm_channel_area *src_channel,
 
 void *snd_pcm_plug_buf_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t size);
 void snd_pcm_plug_buf_unlock(struct snd_pcm_substream *plug, void *ptr);
+#else
+
+static inline snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t drv_size) { return drv_size; }
+static inline snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t clt_size) { return clt_size; }
+static inline int snd_pcm_plug_slave_format(int format, const struct snd_mask *format_mask) { return format; }
+
+#endif
+
 snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream,
                                     const char *ptr, snd_pcm_uframes_t size,
                                     int in_kernel);
@@ -151,14 +159,6 @@ snd_pcm_sframes_t snd_pcm_oss_writev3(struct snd_pcm_substream *substream,
 snd_pcm_sframes_t snd_pcm_oss_readv3(struct snd_pcm_substream *substream,
                                     void **bufs, snd_pcm_uframes_t frames);
 
-#else
-
-static inline snd_pcm_sframes_t snd_pcm_plug_client_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t drv_size) { return drv_size; }
-static inline snd_pcm_sframes_t snd_pcm_plug_slave_size(struct snd_pcm_substream *handle, snd_pcm_uframes_t clt_size) { return clt_size; }
-static inline int snd_pcm_plug_slave_format(int format, const struct snd_mask *format_mask) { return format; }
-
-#endif
-
 #ifdef PLUGIN_DEBUG
 #define pdprintf(fmt, args...) printk(KERN_DEBUG "plugin: " fmt, ##args)
 #else
index a15f55b..295163b 100644 (file)
@@ -259,8 +259,10 @@ int snd_dg00x_stream_init_duplex(struct snd_dg00x *dg00x)
                return err;
 
        err = init_stream(dg00x, &dg00x->tx_stream);
-       if (err < 0)
+       if (err < 0) {
                destroy_stream(dg00x, &dg00x->rx_stream);
+               return err;
+       }
 
        err = amdtp_domain_init(&dg00x->domain);
        if (err < 0) {
index accc9d2..6c043fb 100644 (file)
@@ -611,7 +611,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_power_up_pm);
 int snd_hdac_keep_power_up(struct hdac_device *codec)
 {
        if (!atomic_inc_not_zero(&codec->in_pm)) {
-               int ret = pm_runtime_get_if_in_use(&codec->dev);
+               int ret = pm_runtime_get_if_active(&codec->dev, true);
                if (!ret)
                        return -1;
                if (ret < 0)
index 230f65a..388db5f 100644 (file)
@@ -892,10 +892,10 @@ int snd_gf1_pcm_new(struct snd_gus_card *gus, int pcm_dev, int control_index)
                kctl = snd_ctl_new1(&snd_gf1_pcm_volume_control1, gus);
        else
                kctl = snd_ctl_new1(&snd_gf1_pcm_volume_control, gus);
+       kctl->id.index = control_index;
        err = snd_ctl_add(card, kctl);
        if (err < 0)
                return err;
-       kctl->id.index = control_index;
 
        return 0;
 }
index 727db6d..6d25c12 100644 (file)
@@ -2688,20 +2688,20 @@ static int snd_cmipci_mixer_new(struct cmipci *cm, int pcm_spdif_device)
                }
                if (cm->can_ac3_hw) {
                        kctl = snd_ctl_new1(&snd_cmipci_spdif_default, cm);
+                       kctl->id.device = pcm_spdif_device;
                        err = snd_ctl_add(card, kctl);
                        if (err < 0)
                                return err;
-                       kctl->id.device = pcm_spdif_device;
                        kctl = snd_ctl_new1(&snd_cmipci_spdif_mask, cm);
+                       kctl->id.device = pcm_spdif_device;
                        err = snd_ctl_add(card, kctl);
                        if (err < 0)
                                return err;
-                       kctl->id.device = pcm_spdif_device;
                        kctl = snd_ctl_new1(&snd_cmipci_spdif_stream, cm);
+                       kctl->id.device = pcm_spdif_device;
                        err = snd_ctl_add(card, kctl);
                        if (err < 0)
                                return err;
-                       kctl->id.device = pcm_spdif_device;
                }
                if (cm->chip_version <= 37) {
                        sw = snd_cmipci_old_mixer_switches;
index 62f4584..7d882b3 100644 (file)
@@ -531,7 +531,7 @@ static int load_firmware(struct snd_cs46xx *chip)
        return err;
 }
 
-int snd_cs46xx_download_image(struct snd_cs46xx *chip)
+static __maybe_unused int snd_cs46xx_download_image(struct snd_cs46xx *chip)
 {
        int idx, err;
        unsigned int offset = 0;
index 9f79c0a..bd19f92 100644 (file)
@@ -2458,10 +2458,14 @@ int snd_hda_create_dig_out_ctls(struct hda_codec *codec,
                   type == HDA_PCM_TYPE_HDMI) {
                /* suppose a single SPDIF device */
                for (dig_mix = dig_mixes; dig_mix->name; dig_mix++) {
+                       struct snd_ctl_elem_id id;
+
                        kctl = find_mixer_ctl(codec, dig_mix->name, 0, 0);
                        if (!kctl)
                                break;
-                       kctl->id.index = spdif_index;
+                       id = kctl->id;
+                       id.index = spdif_index;
+                       snd_ctl_rename_id(codec->card, &kctl->id, &id);
                }
                bus->primary_dig_out_type = HDA_PCM_TYPE_HDMI;
        }
index fc114e5..dbf7aa8 100644 (file)
@@ -1155,8 +1155,8 @@ static bool path_has_mixer(struct hda_codec *codec, int path_idx, int ctl_type)
        return path && path->ctls[ctl_type];
 }
 
-static const char * const channel_name[4] = {
-       "Front", "Surround", "CLFE", "Side"
+static const char * const channel_name[] = {
+       "Front", "Surround", "CLFE", "Side", "Back",
 };
 
 /* give some appropriate ctl name prefix for the given line out channel */
@@ -1182,7 +1182,7 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
 
        /* multi-io channels */
        if (ch >= cfg->line_outs)
-               return channel_name[ch];
+               goto fixed_name;
 
        switch (cfg->line_out_type) {
        case AUTO_PIN_SPEAKER_OUT:
@@ -1234,6 +1234,7 @@ static const char *get_line_out_pfx(struct hda_codec *codec, int ch,
        if (cfg->line_outs == 1 && !spec->multi_ios)
                return "Line Out";
 
+ fixed_name:
        if (ch >= ARRAY_SIZE(channel_name)) {
                snd_BUG();
                return "PCM";
index 099722e..748a3c4 100644 (file)
@@ -1306,6 +1306,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
        SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
        SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
        SND_PCI_QUIRK(0x3842, 0x1038, "EVGA X99 Classified", QUIRK_R3DI),
+       SND_PCI_QUIRK(0x3842, 0x104b, "EVGA X299 Dark", QUIRK_R3DI),
        SND_PCI_QUIRK(0x3842, 0x1055, "EVGA Z390 DARK", QUIRK_R3DI),
        SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
        SND_PCI_QUIRK(0x1102, 0x0018, "Recon3D", QUIRK_R3D),
index 64a9440..5c0b1a0 100644 (file)
@@ -4589,6 +4589,11 @@ HDA_CODEC_ENTRY(0x10de009d, "GPU 9d HDMI/DP",    patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de009e, "GPU 9e HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de009f, "GPU 9f HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de00a0, "GPU a0 HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a3, "GPU a3 HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a4, "GPU a4 HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a5, "GPU a5 HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a6, "GPU a6 HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de00a7, "GPU a7 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",      patch_nvhdmi_2ch),
 HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI",   patch_nvhdmi_2ch),
 HDA_CODEC_ENTRY(0x67663d82, "Arise 82 HDMI/DP",        patch_gf_hdmi),
index 172ffc2..a5d55a7 100644 (file)
@@ -7063,6 +7063,8 @@ enum {
        ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC295_FIXUP_DISABLE_DAC3,
        ALC285_FIXUP_SPEAKER2_TO_DAC1,
+       ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1,
+       ALC285_FIXUP_ASUS_HEADSET_MIC,
        ALC280_FIXUP_HP_HEADSET_MIC,
        ALC221_FIXUP_HP_FRONT_MIC,
        ALC292_FIXUP_TPT460,
@@ -8033,6 +8035,22 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_THINKPAD_ACPI
        },
+       [ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_speaker2_to_dac1,
+               .chained = true,
+               .chain_id = ALC245_FIXUP_CS35L41_SPI_2
+       },
+       [ALC285_FIXUP_ASUS_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x03a11050 },
+                       { 0x1b, 0x03a11c30 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1
+       },
        [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -9363,7 +9381,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
        SND_PCI_QUIRK(0x103c, 0x8158, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
-       SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+       SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC295_FIXUP_HP_X360),
        SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
        SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
        SND_PCI_QUIRK(0x103c, 0x827f, "HP x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
@@ -9458,7 +9476,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8aa8, "HP EliteBook 640 G9 (MB 8AA6)", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8aab, "HP EliteBook 650 G9 (MB 8AA9)", ALC236_FIXUP_HP_GPIO_LED),
-        SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
@@ -9469,18 +9487,25 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8b63, "HP Elite Dragonfly 13.5 inch G4", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b65, "HP ProBook 455 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8b66, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8b70, "HP EliteBook 835 G10", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x103c, 0x8b72, "HP EliteBook 845 G10", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x103c, 0x8b74, "HP EliteBook 845W G10", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x103c, 0x8b77, "HP ElieBook 865 G10", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x103c, 0x8b7a, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b7d, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b87, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b8a, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b8b, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED),
-       SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
+       SND_PCI_QUIRK(0x103c, 0x8c26, "HP HP EliteBook 800G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -9500,6 +9525,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+       SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
        SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
@@ -9520,8 +9547,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
+       SND_PCI_QUIRK(0x1043, 0x1b93, "ASUS G614JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
        SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
@@ -9537,6 +9566,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1f12, "ASUS UM5302", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x1043, 0x1f92, "ASUS ROG Flow X16", ALC289_FIXUP_ASUS_GA401),
        SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+       SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
@@ -9560,6 +9594,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10ec, 0x124c, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
        SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x10ec, 0x12cc, "Intel Reference board", ALC225_FIXUP_HEADSET_JACK),
        SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
@@ -9608,6 +9643,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x5101, "Clevo S510WU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x5157, "Clevo W517GU1", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x51a1, "Clevo NS50MU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x51b1, "Clevo NS50AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x5630, "Clevo NP50RNJS", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x70a1, "Clevo NB70T[HJK]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x70b3, "Clevo NK70SB", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -9618,6 +9654,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1558, 0x7716, "Clevo NS50PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x7717, "Clevo NS70PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x7718, "Clevo L140PU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1558, 0x7724, "Clevo L140AU", ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8228, "Clevo NR40BU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8520, "Clevo NH50D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1558, 0x8521, "Clevo NH77D[CD]", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
@@ -9778,6 +9815,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
        SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
        SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
+       SND_PCI_QUIRK(0x8086, 0x3038, "Intel NUC 13", ALC225_FIXUP_HEADSET_JACK),
        SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
 
 #if 0
@@ -11663,7 +11701,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
        SND_PCI_QUIRK(0x103c, 0x870c, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
        SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
+       SND_PCI_QUIRK(0x103c, 0x872b, "HP", ALC897_FIXUP_HP_HSMIC_VERB),
        SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
+       SND_PCI_QUIRK(0x103c, 0x8768, "HP Slim Desktop S01", ALC671_FIXUP_HP_HEADSET_MIC2),
        SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2),
        SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2),
        SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
@@ -11685,10 +11725,13 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
        SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
        SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x1064, "Lenovo P3 Tower", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32ca, "Lenovo ThinkCentre M80", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32cb, "Lenovo ThinkCentre M70", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32cf, "Lenovo ThinkCentre M950", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x32f7, "Lenovo ThinkCentre M90", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x3321, "Lenovo ThinkCentre M70 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
+       SND_PCI_QUIRK(0x17aa, 0x331b, "Lenovo ThinkCentre M90 Gen4", ALC897_FIXUP_HEADSET_MIC_PIN),
        SND_PCI_QUIRK(0x17aa, 0x3742, "Lenovo TianYi510Pro-14IOB", ALC897_FIXUP_HEADSET_MIC_PIN2),
        SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
        SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
index 24b9782..0278493 100644 (file)
@@ -1899,11 +1899,12 @@ static int aureon_add_controls(struct snd_ice1712 *ice)
                else {
                        for (i = 0; i < ARRAY_SIZE(cs8415_controls); i++) {
                                struct snd_kcontrol *kctl;
-                               err = snd_ctl_add(ice->card, (kctl = snd_ctl_new1(&cs8415_controls[i], ice)));
-                               if (err < 0)
-                                       return err;
+                               kctl = snd_ctl_new1(&cs8415_controls[i], ice);
                                if (i > 1)
                                        kctl->id.device = ice->pcm->device;
+                               err = snd_ctl_add(ice->card, kctl);
+                               if (err < 0)
+                                       return err;
                        }
                }
        }
index a5241a2..3b0c3e7 100644 (file)
@@ -2371,22 +2371,26 @@ int snd_ice1712_spdif_build_controls(struct snd_ice1712 *ice)
 
        if (snd_BUG_ON(!ice->pcm_pro))
                return -EIO;
-       err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_ice1712_spdif_default, ice));
+       kctl = snd_ctl_new1(&snd_ice1712_spdif_default, ice);
+       kctl->id.device = ice->pcm_pro->device;
+       err = snd_ctl_add(ice->card, kctl);
        if (err < 0)
                return err;
+       kctl = snd_ctl_new1(&snd_ice1712_spdif_maskc, ice);
        kctl->id.device = ice->pcm_pro->device;
-       err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_ice1712_spdif_maskc, ice));
+       err = snd_ctl_add(ice->card, kctl);
        if (err < 0)
                return err;
+       kctl = snd_ctl_new1(&snd_ice1712_spdif_maskp, ice);
        kctl->id.device = ice->pcm_pro->device;
-       err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_ice1712_spdif_maskp, ice));
+       err = snd_ctl_add(ice->card, kctl);
        if (err < 0)
                return err;
+       kctl = snd_ctl_new1(&snd_ice1712_spdif_stream, ice);
        kctl->id.device = ice->pcm_pro->device;
-       err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_ice1712_spdif_stream, ice));
+       err = snd_ctl_add(ice->card, kctl);
        if (err < 0)
                return err;
-       kctl->id.device = ice->pcm_pro->device;
        ice->spdif.stream_ctl = kctl;
        return 0;
 }
index 6fab2ad..1dc776a 100644 (file)
@@ -2392,23 +2392,27 @@ static int snd_vt1724_spdif_build_controls(struct snd_ice1712 *ice)
        if (err < 0)
                return err;
 
-       err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_default, ice));
+       kctl = snd_ctl_new1(&snd_vt1724_spdif_default, ice);
+       kctl->id.device = ice->pcm->device;
+       err = snd_ctl_add(ice->card, kctl);
        if (err < 0)
                return err;
+       kctl = snd_ctl_new1(&snd_vt1724_spdif_maskc, ice);
        kctl->id.device = ice->pcm->device;
-       err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_maskc, ice));
+       err = snd_ctl_add(ice->card, kctl);
        if (err < 0)
                return err;
+       kctl = snd_ctl_new1(&snd_vt1724_spdif_maskp, ice);
        kctl->id.device = ice->pcm->device;
-       err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_maskp, ice));
+       err = snd_ctl_add(ice->card, kctl);
        if (err < 0)
                return err;
-       kctl->id.device = ice->pcm->device;
 #if 0 /* use default only */
-       err = snd_ctl_add(ice->card, kctl = snd_ctl_new1(&snd_vt1724_spdif_stream, ice));
+       kctl = snd_ctl_new1(&snd_vt1724_spdif_stream, ice);
+       kctl->id.device = ice->pcm->device;
+       err = snd_ctl_add(ice->card, kctl);
        if (err < 0)
                return err;
-       kctl->id.device = ice->pcm->device;
        ice->spdif.stream_ctl = kctl;
 #endif
        return 0;
index 6971eec..6b8d869 100644 (file)
@@ -1822,20 +1822,20 @@ int snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch)
        if (snd_BUG_ON(!chip->pcm_spdif))
                return -ENXIO;
        kctl = snd_ctl_new1(&snd_ymfpci_spdif_default, chip);
+       kctl->id.device = chip->pcm_spdif->device;
        err = snd_ctl_add(chip->card, kctl);
        if (err < 0)
                return err;
-       kctl->id.device = chip->pcm_spdif->device;
        kctl = snd_ctl_new1(&snd_ymfpci_spdif_mask, chip);
+       kctl->id.device = chip->pcm_spdif->device;
        err = snd_ctl_add(chip->card, kctl);
        if (err < 0)
                return err;
-       kctl->id.device = chip->pcm_spdif->device;
        kctl = snd_ctl_new1(&snd_ymfpci_spdif_stream, chip);
+       kctl->id.device = chip->pcm_spdif->device;
        err = snd_ctl_add(chip->card, kctl);
        if (err < 0)
                return err;
-       kctl->id.device = chip->pcm_spdif->device;
        chip->spdif_pcm_ctl = kctl;
 
        /* direct recording source */
index afddb9a..b1337b9 100644 (file)
@@ -211,8 +211,7 @@ static int create_acp63_platform_devs(struct pci_dev *pci, struct acp63_dev_data
        case ACP63_PDM_DEV_MASK:
                adata->pdm_dev_index  = 0;
                acp63_fill_platform_dev_info(&pdevinfo[0], parent, NULL, "acp_ps_pdm_dma",
-                                            0, adata->res, 1, &adata->acp_lock,
-                                            sizeof(adata->acp_lock));
+                                            0, adata->res, 1, NULL, 0);
                acp63_fill_platform_dev_info(&pdevinfo[1], parent, NULL, "dmic-codec",
                                             0, NULL, 0, NULL, 0);
                acp63_fill_platform_dev_info(&pdevinfo[2], parent, NULL, "acp_ps_mach",
index 46b9132..3a83dc1 100644 (file)
@@ -361,12 +361,12 @@ static int acp63_pdm_audio_probe(struct platform_device *pdev)
 {
        struct resource *res;
        struct pdm_dev_data *adata;
+       struct acp63_dev_data *acp_data;
+       struct device *parent;
        int status;
 
-       if (!pdev->dev.platform_data) {
-               dev_err(&pdev->dev, "platform_data not retrieved\n");
-               return -ENODEV;
-       }
+       parent = pdev->dev.parent;
+       acp_data = dev_get_drvdata(parent);
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!res) {
                dev_err(&pdev->dev, "IORESOURCE_MEM FAILED\n");
@@ -382,7 +382,7 @@ static int acp63_pdm_audio_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        adata->capture_stream = NULL;
-       adata->acp_lock = pdev->dev.platform_data;
+       adata->acp_lock = &acp_data->acp_lock;
        dev_set_drvdata(&pdev->dev, adata);
        status = devm_snd_soc_register_component(&pdev->dev,
                                                 &acp63_pdm_component,
index 0bc6e40..246299a 100644 (file)
@@ -175,6 +175,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                .driver_data = &acp6x_card,
                .matches = {
                        DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "21EF"),
+               }
+       },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "21EM"),
                }
        },
@@ -311,6 +318,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "8A22"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "System76"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "pang12"),
+               }
+       },
        {}
 };
 
index 8538e28..1e42052 100644 (file)
@@ -46,7 +46,7 @@ static const struct reg_default cs35l41_reg[] = {
        { CS35L41_DSP1_RX5_SRC,                 0x00000020 },
        { CS35L41_DSP1_RX6_SRC,                 0x00000021 },
        { CS35L41_DSP1_RX7_SRC,                 0x0000003A },
-       { CS35L41_DSP1_RX8_SRC,                 0x00000001 },
+       { CS35L41_DSP1_RX8_SRC,                 0x0000003B },
        { CS35L41_NGATE1_SRC,                   0x00000008 },
        { CS35L41_NGATE2_SRC,                   0x00000009 },
        { CS35L41_AMP_DIG_VOL_CTRL,             0x00008000 },
@@ -58,8 +58,8 @@ static const struct reg_default cs35l41_reg[] = {
        { CS35L41_IRQ1_MASK2,                   0xFFFFFFFF },
        { CS35L41_IRQ1_MASK3,                   0xFFFF87FF },
        { CS35L41_IRQ1_MASK4,                   0xFEFFFFFF },
-       { CS35L41_GPIO1_CTRL1,                  0xE1000001 },
-       { CS35L41_GPIO2_CTRL1,                  0xE1000001 },
+       { CS35L41_GPIO1_CTRL1,                  0x81000001 },
+       { CS35L41_GPIO2_CTRL1,                  0x81000001 },
        { CS35L41_MIXER_NGATE_CFG,              0x00000000 },
        { CS35L41_MIXER_NGATE_CH1_CFG,          0x00000303 },
        { CS35L41_MIXER_NGATE_CH2_CFG,          0x00000303 },
index 46762f7..e0d2b9b 100644 (file)
@@ -704,9 +704,6 @@ static int cs35l56_sdw_dai_hw_free(struct snd_pcm_substream *substream,
 static int cs35l56_sdw_dai_set_stream(struct snd_soc_dai *dai,
                                      void *sdw_stream, int direction)
 {
-       if (!sdw_stream)
-               return 0;
-
        snd_soc_dai_dma_data_set(dai, direction, sdw_stream);
 
        return 0;
@@ -852,10 +849,11 @@ static void cs35l56_dsp_work(struct work_struct *work)
         */
        if (cs35l56->sdw_peripheral) {
                cs35l56->sdw_irq_no_unmask = true;
-               cancel_work_sync(&cs35l56->sdw_irq_work);
+               flush_work(&cs35l56->sdw_irq_work);
                sdw_write_no_pm(cs35l56->sdw_peripheral, CS35L56_SDW_GEN_INT_MASK_1, 0);
                sdw_read_no_pm(cs35l56->sdw_peripheral, CS35L56_SDW_GEN_INT_STAT_1);
                sdw_write_no_pm(cs35l56->sdw_peripheral, CS35L56_SDW_GEN_INT_STAT_1, 0xFF);
+               flush_work(&cs35l56->sdw_irq_work);
        }
 
        ret = cs35l56_mbox_send(cs35l56, CS35L56_MBOX_CMD_SHUTDOWN);
index da6fcf7..de978c3 100644 (file)
@@ -746,6 +746,8 @@ static int tx_macro_put_dec_enum(struct snd_kcontrol *kcontrol,
        struct tx_macro *tx = snd_soc_component_get_drvdata(component);
 
        val = ucontrol->value.enumerated.item[0];
+       if (val >= e->items)
+               return -EINVAL;
 
        switch (e->reg) {
        case CDC_TX_INP_MUX_ADC_MUX0_CFG0:
@@ -772,6 +774,9 @@ static int tx_macro_put_dec_enum(struct snd_kcontrol *kcontrol,
        case CDC_TX_INP_MUX_ADC_MUX7_CFG0:
                mic_sel_reg = CDC_TX7_TX_PATH_CFG0;
                break;
+       default:
+               dev_err(component->dev, "Error in configuration!!\n");
+               return -EINVAL;
        }
 
        if (val != 0) {
index dcce06b..e6b84e2 100644 (file)
@@ -211,7 +211,7 @@ static int max98363_io_init(struct sdw_slave *slave)
 }
 
 #define MAX98363_RATES SNDRV_PCM_RATE_8000_192000
-#define MAX98363_FORMATS (SNDRV_PCM_FMTBIT_S32_LE)
+#define MAX98363_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
 
 static int max98363_sdw_dai_hw_params(struct snd_pcm_substream *substream,
                                      struct snd_pcm_hw_params *params,
@@ -246,7 +246,7 @@ static int max98363_sdw_dai_hw_params(struct snd_pcm_substream *substream,
        stream_config.frame_rate = params_rate(params);
        stream_config.bps = snd_pcm_format_width(params_format(params));
        stream_config.direction = direction;
-       stream_config.ch_count = params_channels(params);
+       stream_config.ch_count = 1;
 
        if (stream_config.ch_count > runtime->hw.channels_max) {
                stream_config.ch_count = runtime->hw.channels_max;
index 4f19fd9..5a4db89 100644 (file)
@@ -1903,6 +1903,30 @@ static const struct dmi_system_id nau8824_quirk_table[] = {
                },
                .driver_data = (void *)(NAU8824_MONO_SPEAKER),
        },
+       {
+               /* Positivo CW14Q01P */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
+                       DMI_MATCH(DMI_BOARD_NAME, "CW14Q01P"),
+               },
+               .driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
+       },
+       {
+               /* Positivo K1424G */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
+                       DMI_MATCH(DMI_BOARD_NAME, "K1424G"),
+               },
+               .driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
+       },
+       {
+               /* Positivo N14ZP74G */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Positivo Tecnologia SA"),
+                       DMI_MATCH(DMI_BOARD_NAME, "N14ZP74G"),
+               },
+               .driver_data = (void *)(NAU8824_JD_ACTIVE_HIGH),
+       },
        {}
 };
 
index 2935c1b..5bc46b0 100644 (file)
@@ -267,7 +267,9 @@ static int rt5682_i2c_probe(struct i2c_client *i2c)
                ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
                        rt5682_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
                        | IRQF_ONESHOT, "rt5682", rt5682);
-               if (ret)
+               if (!ret)
+                       rt5682->irq = i2c->irq;
+               else
                        dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
        }
 
index f6c798b..5d99254 100644 (file)
@@ -2959,6 +2959,9 @@ static int rt5682_suspend(struct snd_soc_component *component)
        if (rt5682->is_sdw)
                return 0;
 
+       if (rt5682->irq)
+               disable_irq(rt5682->irq);
+
        cancel_delayed_work_sync(&rt5682->jack_detect_work);
        cancel_delayed_work_sync(&rt5682->jd_check_work);
        if (rt5682->hs_jack && (rt5682->jack_type & SND_JACK_HEADSET) == SND_JACK_HEADSET) {
@@ -3027,6 +3030,9 @@ static int rt5682_resume(struct snd_soc_component *component)
        mod_delayed_work(system_power_efficient_wq,
                &rt5682->jack_detect_work, msecs_to_jiffies(0));
 
+       if (rt5682->irq)
+               enable_irq(rt5682->irq);
+
        return 0;
 }
 #else
index d568c69..e8efd8a 100644 (file)
@@ -1462,6 +1462,7 @@ struct rt5682_priv {
        int pll_out[RT5682_PLLS];
 
        int jack_type;
+       int irq;
        int irq_work_delay_time;
 };
 
index 00b6036..c293244 100644 (file)
@@ -53,6 +53,18 @@ static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
        { .reg = 0x09, .def = 0x0000 }
 };
 
+/*
+ * ssm2602 register patch
+ * Workaround for playback distortions after power up: activates digital
+ * core, and then powers on output, DAC, and whole chip at the same time
+ */
+
+static const struct reg_sequence ssm2602_patch[] = {
+       { SSM2602_ACTIVE, 0x01 },
+       { SSM2602_PWR,    0x07 },
+       { SSM2602_RESET,  0x00 },
+};
+
 
 /*Appending several "None"s just for OSS mixer use*/
 static const char *ssm2602_input_select[] = {
@@ -598,6 +610,9 @@ static int ssm260x_component_probe(struct snd_soc_component *component)
                return ret;
        }
 
+       regmap_register_patch(ssm2602->regmap, ssm2602_patch,
+                             ARRAY_SIZE(ssm2602_patch));
+
        /* set the update bits */
        regmap_update_bits(ssm2602->regmap, SSM2602_LINVOL,
                            LINVOL_LRIN_BOTH, LINVOL_LRIN_BOTH);
index 402286d..9c10200 100644 (file)
@@ -1190,7 +1190,6 @@ static const struct regmap_config wcd938x_regmap_config = {
        .readable_reg = wcd938x_readable_register,
        .writeable_reg = wcd938x_writeable_register,
        .volatile_reg = wcd938x_volatile_register,
-       .can_multi_write = true,
 };
 
 static const struct sdw_slave_ops wcd9380_slave_ops = {
index f709231..97f6873 100644 (file)
@@ -645,7 +645,6 @@ static struct regmap_config wsa881x_regmap_config = {
        .readable_reg = wsa881x_readable_register,
        .reg_format_endian = REGMAP_ENDIAN_NATIVE,
        .val_format_endian = REGMAP_ENDIAN_NATIVE,
-       .can_multi_write = true,
 };
 
 enum {
index c609cb6..e80b531 100644 (file)
@@ -946,7 +946,6 @@ static struct regmap_config wsa883x_regmap_config = {
        .writeable_reg = wsa883x_writeable_register,
        .reg_format_endian = REGMAP_ENDIAN_NATIVE,
        .val_format_endian = REGMAP_ENDIAN_NATIVE,
-       .can_multi_write = true,
        .use_single_read = true,
 };
 
index acdf98b..399a489 100644 (file)
@@ -132,13 +132,13 @@ static irqreturn_t i2s_irq_handler(int irq, void *dev_id)
 
                /* Error Handling: TX */
                if (isr[i] & ISR_TXFO) {
-                       dev_err(dev->dev, "TX overrun (ch_id=%d)\n", i);
+                       dev_err_ratelimited(dev->dev, "TX overrun (ch_id=%d)\n", i);
                        irq_valid = true;
                }
 
                /* Error Handling: TX */
                if (isr[i] & ISR_RXFO) {
-                       dev_err(dev->dev, "RX overrun (ch_id=%d)\n", i);
+                       dev_err_ratelimited(dev->dev, "RX overrun (ch_id=%d)\n", i);
                        irq_valid = true;
                }
        }
@@ -183,30 +183,6 @@ static void i2s_stop(struct dw_i2s_dev *dev,
        }
 }
 
-static int dw_i2s_startup(struct snd_pcm_substream *substream,
-               struct snd_soc_dai *cpu_dai)
-{
-       struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
-       union dw_i2s_snd_dma_data *dma_data = NULL;
-
-       if (!(dev->capability & DWC_I2S_RECORD) &&
-                       (substream->stream == SNDRV_PCM_STREAM_CAPTURE))
-               return -EINVAL;
-
-       if (!(dev->capability & DWC_I2S_PLAY) &&
-                       (substream->stream == SNDRV_PCM_STREAM_PLAYBACK))
-               return -EINVAL;
-
-       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               dma_data = &dev->play_dma_data;
-       else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-               dma_data = &dev->capture_dma_data;
-
-       snd_soc_dai_set_dma_data(cpu_dai, substream, (void *)dma_data);
-
-       return 0;
-}
-
 static void dw_i2s_config(struct dw_i2s_dev *dev, int stream)
 {
        u32 ch_reg;
@@ -305,12 +281,6 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
        return 0;
 }
 
-static void dw_i2s_shutdown(struct snd_pcm_substream *substream,
-               struct snd_soc_dai *dai)
-{
-       snd_soc_dai_set_dma_data(dai, substream, NULL);
-}
-
 static int dw_i2s_prepare(struct snd_pcm_substream *substream,
                          struct snd_soc_dai *dai)
 {
@@ -382,8 +352,6 @@ static int dw_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt)
 }
 
 static const struct snd_soc_dai_ops dw_i2s_dai_ops = {
-       .startup        = dw_i2s_startup,
-       .shutdown       = dw_i2s_shutdown,
        .hw_params      = dw_i2s_hw_params,
        .prepare        = dw_i2s_prepare,
        .trigger        = dw_i2s_trigger,
@@ -625,6 +593,14 @@ static int dw_configure_dai_by_dt(struct dw_i2s_dev *dev,
 
 }
 
+static int dw_i2s_dai_probe(struct snd_soc_dai *dai)
+{
+       struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+       snd_soc_dai_init_dma_data(dai, &dev->play_dma_data, &dev->capture_dma_data);
+       return 0;
+}
+
 static int dw_i2s_probe(struct platform_device *pdev)
 {
        const struct i2s_platform_data *pdata = pdev->dev.platform_data;
@@ -643,6 +619,7 @@ static int dw_i2s_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        dw_i2s_dai->ops = &dw_i2s_dai_ops;
+       dw_i2s_dai->probe = dw_i2s_dai_probe;
 
        dev->i2s_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
        if (IS_ERR(dev->i2s_base))
index 94341e4..3f08082 100644 (file)
@@ -1159,7 +1159,7 @@ static int fsl_micfil_probe(struct platform_device *pdev)
        ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
        if (ret) {
                dev_err(&pdev->dev, "failed to pcm register\n");
-               return ret;
+               goto err_pm_disable;
        }
 
        fsl_micfil_dai.capture.formats = micfil->soc->formats;
@@ -1169,9 +1169,20 @@ static int fsl_micfil_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev, "failed to register component %s\n",
                        fsl_micfil_component.name);
+               goto err_pm_disable;
        }
 
        return ret;
+
+err_pm_disable:
+       pm_runtime_disable(&pdev->dev);
+
+       return ret;
+}
+
+static void fsl_micfil_remove(struct platform_device *pdev)
+{
+       pm_runtime_disable(&pdev->dev);
 }
 
 static int __maybe_unused fsl_micfil_runtime_suspend(struct device *dev)
@@ -1232,6 +1243,7 @@ static const struct dev_pm_ops fsl_micfil_pm_ops = {
 
 static struct platform_driver fsl_micfil_driver = {
        .probe = fsl_micfil_probe,
+       .remove_new = fsl_micfil_remove,
        .driver = {
                .name = "fsl-micfil-dai",
                .pm = &fsl_micfil_pm_ops,
index abdaffb..e3105d4 100644 (file)
@@ -491,14 +491,21 @@ static int fsl_sai_set_bclk(struct snd_soc_dai *dai, bool tx, u32 freq)
        regmap_update_bits(sai->regmap, reg, FSL_SAI_CR2_MSEL_MASK,
                           FSL_SAI_CR2_MSEL(sai->mclk_id[tx]));
 
-       if (savediv == 1)
+       if (savediv == 1) {
                regmap_update_bits(sai->regmap, reg,
                                   FSL_SAI_CR2_DIV_MASK | FSL_SAI_CR2_BYP,
                                   FSL_SAI_CR2_BYP);
-       else
+               if (fsl_sai_dir_is_synced(sai, adir))
+                       regmap_update_bits(sai->regmap, FSL_SAI_xCR2(tx, ofs),
+                                          FSL_SAI_CR2_BCI, FSL_SAI_CR2_BCI);
+               else
+                       regmap_update_bits(sai->regmap, FSL_SAI_xCR2(tx, ofs),
+                                          FSL_SAI_CR2_BCI, 0);
+       } else {
                regmap_update_bits(sai->regmap, reg,
                                   FSL_SAI_CR2_DIV_MASK | FSL_SAI_CR2_BYP,
                                   savediv / 2 - 1);
+       }
 
        if (sai->soc_data->max_register >= FSL_SAI_MCTL) {
                /* SAI is in master mode at this point, so enable MCLK */
index 197748a..a53c4f0 100644 (file)
 
 /* SAI Transmit and Receive Configuration 2 Register */
 #define FSL_SAI_CR2_SYNC       BIT(30)
+#define FSL_SAI_CR2_BCI                BIT(28)
 #define FSL_SAI_CR2_MSEL_MASK  (0x3 << 26)
 #define FSL_SAI_CR2_MSEL_BUS   0
 #define FSL_SAI_CR2_MSEL_MCLK1 BIT(26)
index 467edd9..e5ff61c 100644 (file)
@@ -314,7 +314,7 @@ int asoc_simple_startup(struct snd_pcm_substream *substream)
                }
                ret = snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_RATE,
                        fixed_rate, fixed_rate);
-               if (ret)
+               if (ret < 0)
                        goto codec_err;
        }
 
index 6f044cc..5a5e4ec 100644 (file)
@@ -416,6 +416,7 @@ static int __simple_for_each_link(struct asoc_simple_priv *priv,
 
                        if (ret < 0) {
                                of_node_put(codec);
+                               of_node_put(plat);
                                of_node_put(np);
                                goto error;
                        }
index 02683dc..1860099 100644 (file)
@@ -169,6 +169,7 @@ static bool apl_lp_streaming(struct avs_dev *adev)
 {
        struct avs_path *path;
 
+       spin_lock(&adev->path_list_lock);
        /* Any gateway without buffer allocated in LP area disqualifies D0IX. */
        list_for_each_entry(path, &adev->path_list, node) {
                struct avs_path_pipeline *ppl;
@@ -188,11 +189,14 @@ static bool apl_lp_streaming(struct avs_dev *adev)
                                if (cfg->copier.dma_type == INVALID_OBJECT_ID)
                                        continue;
 
-                               if (!mod->gtw_attrs.lp_buffer_alloc)
+                               if (!mod->gtw_attrs.lp_buffer_alloc) {
+                                       spin_unlock(&adev->path_list_lock);
                                        return false;
+                               }
                        }
                }
        }
+       spin_unlock(&adev->path_list_lock);
 
        return true;
 }
index d7fccdc..0cf38c9 100644 (file)
@@ -283,8 +283,8 @@ void avs_release_firmwares(struct avs_dev *adev);
 
 int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id,
                        u8 core_id, u8 domain, void *param, u32 param_size,
-                       u16 *instance_id);
-void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u16 instance_id,
+                       u8 *instance_id);
+void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u8 instance_id,
                           u8 ppl_instance_id, u8 core_id);
 int avs_dsp_create_pipeline(struct avs_dev *adev, u16 req_size, u8 priority,
                            bool lp, u16 attributes, u8 *instance_id);
index b2823c2..60f8fb0 100644 (file)
@@ -443,7 +443,7 @@ static int avs_register_i2s_boards(struct avs_dev *adev)
        }
 
        for (mach = boards->machs; mach->id[0]; mach++) {
-               if (!acpi_dev_present(mach->id, NULL, -1))
+               if (!acpi_dev_present(mach->id, mach->uid, -1))
                        continue;
 
                if (mach->machine_quirk)
index a8b14b7..3dfa2e9 100644 (file)
@@ -21,17 +21,25 @@ static struct avs_dev *avs_get_kcontrol_adev(struct snd_kcontrol *kcontrol)
        return to_avs_dev(w->dapm->component->dev);
 }
 
-static struct avs_path_module *avs_get_kcontrol_module(struct avs_dev *adev, u32 id)
+static struct avs_path_module *avs_get_volume_module(struct avs_dev *adev, u32 id)
 {
        struct avs_path *path;
        struct avs_path_pipeline *ppl;
        struct avs_path_module *mod;
 
-       list_for_each_entry(path, &adev->path_list, node)
-               list_for_each_entry(ppl, &path->ppl_list, node)
-                       list_for_each_entry(mod, &ppl->mod_list, node)
-                               if (mod->template->ctl_id && mod->template->ctl_id == id)
+       spin_lock(&adev->path_list_lock);
+       list_for_each_entry(path, &adev->path_list, node) {
+               list_for_each_entry(ppl, &path->ppl_list, node) {
+                       list_for_each_entry(mod, &ppl->mod_list, node) {
+                               if (guid_equal(&mod->template->cfg_ext->type, &AVS_PEAKVOL_MOD_UUID)
+                                   && mod->template->ctl_id == id) {
+                                       spin_unlock(&adev->path_list_lock);
                                        return mod;
+                               }
+                       }
+               }
+       }
+       spin_unlock(&adev->path_list_lock);
 
        return NULL;
 }
@@ -49,7 +57,7 @@ int avs_control_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_va
        /* prevent access to modules while path is being constructed */
        mutex_lock(&adev->path_mutex);
 
-       active_module = avs_get_kcontrol_module(adev, ctl_data->id);
+       active_module = avs_get_volume_module(adev, ctl_data->id);
        if (active_module) {
                ret = avs_ipc_peakvol_get_volume(adev, active_module->module_id,
                                                 active_module->instance_id, &dspvols,
@@ -89,7 +97,7 @@ int avs_control_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_va
                changed = 1;
        }
 
-       active_module = avs_get_kcontrol_module(adev, ctl_data->id);
+       active_module = avs_get_volume_module(adev, ctl_data->id);
        if (active_module) {
                dspvol.channel_id = AVS_ALL_CHANNELS_MASK;
                dspvol.target_volume = *volume;
index b881100..aa03af4 100644 (file)
@@ -225,7 +225,7 @@ err:
 
 int avs_dsp_init_module(struct avs_dev *adev, u16 module_id, u8 ppl_instance_id,
                        u8 core_id, u8 domain, void *param, u32 param_size,
-                       u16 *instance_id)
+                       u8 *instance_id)
 {
        struct avs_module_entry mentry;
        bool was_loaded = false;
@@ -272,7 +272,7 @@ err_mod_entry:
        return ret;
 }
 
-void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u16 instance_id,
+void avs_dsp_delete_module(struct avs_dev *adev, u16 module_id, u8 instance_id,
                           u8 ppl_instance_id, u8 core_id)
 {
        struct avs_module_entry mentry;
index d3b60ae..7f23a30 100644 (file)
@@ -619,7 +619,7 @@ enum avs_channel_config {
        AVS_CHANNEL_CONFIG_DUAL_MONO = 9,
        AVS_CHANNEL_CONFIG_I2S_DUAL_STEREO_0 = 10,
        AVS_CHANNEL_CONFIG_I2S_DUAL_STEREO_1 = 11,
-       AVS_CHANNEL_CONFIG_4_CHANNEL = 12,
+       AVS_CHANNEL_CONFIG_7_1 = 12,
        AVS_CHANNEL_CONFIG_INVALID
 };
 
index 197222c..657f7b0 100644 (file)
@@ -37,7 +37,7 @@ struct avs_path_pipeline {
 
 struct avs_path_module {
        u16 module_id;
-       u16 instance_id;
+       u8 instance_id;
        union avs_gtw_attributes gtw_attrs;
 
        struct avs_tplg_module *template;
index 31c032a..1fbb2c2 100644 (file)
@@ -468,21 +468,34 @@ static int avs_dai_fe_startup(struct snd_pcm_substream *substream, struct snd_so
 
        host_stream = snd_hdac_ext_stream_assign(bus, substream, HDAC_EXT_STREAM_TYPE_HOST);
        if (!host_stream) {
-               kfree(data);
-               return -EBUSY;
+               ret = -EBUSY;
+               goto err;
        }
 
        data->host_stream = host_stream;
-       snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+       ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
+       if (ret < 0)
+               goto err;
+
        /* avoid wrap-around with wall-clock */
-       snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME, 20, 178000000);
-       snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_rates);
+       ret = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_TIME, 20, 178000000);
+       if (ret < 0)
+               goto err;
+
+       ret = snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_rates);
+       if (ret < 0)
+               goto err;
+
        snd_pcm_set_sync(substream);
 
        dev_dbg(dai->dev, "%s fe STARTUP tag %d str %p",
                __func__, hdac_stream(host_stream)->stream_tag, substream);
 
        return 0;
+
+err:
+       kfree(data);
+       return ret;
 }
 
 static void avs_dai_fe_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
index 70a9420..2759282 100644 (file)
@@ -18,7 +18,7 @@ static int avs_dsp_init_probe(struct avs_dev *adev, union avs_connector_node_id
 {
        struct avs_probe_cfg cfg = {{0}};
        struct avs_module_entry mentry;
-       u16 dummy;
+       u8 dummy;
 
        avs_get_module_entry(adev, &AVS_PROBE_MOD_UUID, &mentry);
 
index 6d9cfe0..d0f6c94 100644 (file)
@@ -218,18 +218,48 @@ static int jz4740_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        return 0;
 }
 
+static int jz4740_i2s_get_i2sdiv(unsigned long mclk, unsigned long rate,
+                                unsigned long i2sdiv_max)
+{
+       unsigned long div, rate1, rate2, err1, err2;
+
+       div = mclk / (64 * rate);
+       if (div == 0)
+               div = 1;
+
+       rate1 = mclk / (64 * div);
+       rate2 = mclk / (64 * (div + 1));
+
+       err1 = abs(rate1 - rate);
+       err2 = abs(rate2 - rate);
+
+       /*
+        * Choose the divider that produces the smallest error in the
+        * output rate and reject dividers with a 5% or higher error.
+        * In the event that both dividers are outside the acceptable
+        * error margin, reject the rate to prevent distorted audio.
+        * (The number 5% is arbitrary.)
+        */
+       if (div <= i2sdiv_max && err1 <= err2 && err1 < rate/20)
+               return div;
+       if (div < i2sdiv_max && err2 < rate/20)
+               return div + 1;
+
+       return -EINVAL;
+}
+
 static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream,
        struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
 {
        struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
        struct regmap_field *div_field;
+       unsigned long i2sdiv_max;
        unsigned int sample_size;
-       uint32_t ctrl;
-       int div;
+       uint32_t ctrl, conf;
+       int div = 1;
 
        regmap_read(i2s->regmap, JZ_REG_AIC_CTRL, &ctrl);
-
-       div = clk_get_rate(i2s->clk_i2s) / (64 * params_rate(params));
+       regmap_read(i2s->regmap, JZ_REG_AIC_CONF, &conf);
 
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S8:
@@ -258,11 +288,27 @@ static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream,
                        ctrl &= ~JZ_AIC_CTRL_MONO_TO_STEREO;
 
                div_field = i2s->field_i2sdiv_playback;
+               i2sdiv_max = GENMASK(i2s->soc_info->field_i2sdiv_playback.msb,
+                                    i2s->soc_info->field_i2sdiv_playback.lsb);
        } else {
                ctrl &= ~JZ_AIC_CTRL_INPUT_SAMPLE_SIZE;
                ctrl |= FIELD_PREP(JZ_AIC_CTRL_INPUT_SAMPLE_SIZE, sample_size);
 
                div_field = i2s->field_i2sdiv_capture;
+               i2sdiv_max = GENMASK(i2s->soc_info->field_i2sdiv_capture.msb,
+                                    i2s->soc_info->field_i2sdiv_capture.lsb);
+       }
+
+       /*
+        * Only calculate I2SDIV if we're supplying the bit or frame clock.
+        * If the codec is supplying both clocks then the divider output is
+        * unused, and we don't want it to limit the allowed sample rates.
+        */
+       if (conf & (JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER)) {
+               div = jz4740_i2s_get_i2sdiv(clk_get_rate(i2s->clk_i2s),
+                                           params_rate(params), i2sdiv_max);
+               if (div < 0)
+                       return div;
        }
 
        regmap_write(i2s->regmap, JZ_REG_AIC_CTRL, ctrl);
index a6b4f29..539e3a0 100644 (file)
@@ -644,9 +644,3 @@ int mt8186_init_clock(struct mtk_base_afe *afe)
 
        return 0;
 }
-
-void mt8186_deinit_clock(void *priv)
-{
-       struct mtk_base_afe *afe = priv;
-       mt8186_audsys_clk_unregister(afe);
-}
index d598871..a9d59e5 100644 (file)
@@ -81,7 +81,6 @@ enum {
 struct mtk_base_afe;
 int mt8186_set_audio_int_bus_parent(struct mtk_base_afe *afe, int clk_id);
 int mt8186_init_clock(struct mtk_base_afe *afe);
-void mt8186_deinit_clock(void *priv);
 int mt8186_afe_enable_cgs(struct mtk_base_afe *afe);
 void mt8186_afe_disable_cgs(struct mtk_base_afe *afe);
 int mt8186_afe_enable_clock(struct mtk_base_afe *afe);
index 41172a8..a868a04 100644 (file)
@@ -2848,10 +2848,6 @@ static int mt8186_afe_pcm_dev_probe(struct platform_device *pdev)
                return ret;
        }
 
-       ret = devm_add_action_or_reset(dev, mt8186_deinit_clock, (void *)afe);
-       if (ret)
-               return ret;
-
        /* init memif */
        afe->memif_32bit_supported = 0;
        afe->memif_size = MT8186_MEMIF_NUM;
index 578969c..5666be6 100644 (file)
@@ -84,6 +84,29 @@ static const struct afe_gate aud_clks[CLK_AUD_NR_CLK] = {
        GATE_AUD2(CLK_AUD_ETDM_OUT1_BCLK, "aud_etdm_out1_bclk", "top_audio", 24),
 };
 
+static void mt8186_audsys_clk_unregister(void *data)
+{
+       struct mtk_base_afe *afe = data;
+       struct mt8186_afe_private *afe_priv = afe->platform_priv;
+       struct clk *clk;
+       struct clk_lookup *cl;
+       int i;
+
+       if (!afe_priv)
+               return;
+
+       for (i = 0; i < CLK_AUD_NR_CLK; i++) {
+               cl = afe_priv->lookup[i];
+               if (!cl)
+                       continue;
+
+               clk = cl->clk;
+               clk_unregister_gate(clk);
+
+               clkdev_drop(cl);
+       }
+}
+
 int mt8186_audsys_clk_register(struct mtk_base_afe *afe)
 {
        struct mt8186_afe_private *afe_priv = afe->platform_priv;
@@ -124,27 +147,6 @@ int mt8186_audsys_clk_register(struct mtk_base_afe *afe)
                afe_priv->lookup[i] = cl;
        }
 
-       return 0;
+       return devm_add_action_or_reset(afe->dev, mt8186_audsys_clk_unregister, afe);
 }
 
-void mt8186_audsys_clk_unregister(struct mtk_base_afe *afe)
-{
-       struct mt8186_afe_private *afe_priv = afe->platform_priv;
-       struct clk *clk;
-       struct clk_lookup *cl;
-       int i;
-
-       if (!afe_priv)
-               return;
-
-       for (i = 0; i < CLK_AUD_NR_CLK; i++) {
-               cl = afe_priv->lookup[i];
-               if (!cl)
-                       continue;
-
-               clk = cl->clk;
-               clk_unregister_gate(clk);
-
-               clkdev_drop(cl);
-       }
-}
index b8d6a06..897a291 100644 (file)
@@ -10,6 +10,5 @@
 #define _MT8186_AUDSYS_CLK_H_
 
 int mt8186_audsys_clk_register(struct mtk_base_afe *afe);
-void mt8186_audsys_clk_unregister(struct mtk_base_afe *afe);
 
 #endif
index 743d6a1..0fb9751 100644 (file)
@@ -418,13 +418,6 @@ int mt8188_afe_init_clock(struct mtk_base_afe *afe)
        return 0;
 }
 
-void mt8188_afe_deinit_clock(void *priv)
-{
-       struct mtk_base_afe *afe = priv;
-
-       mt8188_audsys_clk_unregister(afe);
-}
-
 int mt8188_afe_enable_clk(struct mtk_base_afe *afe, struct clk *clk)
 {
        int ret;
index 084fdfb..a4203a8 100644 (file)
@@ -100,7 +100,6 @@ int mt8188_afe_get_mclk_source_clk_id(int sel);
 int mt8188_afe_get_mclk_source_rate(struct mtk_base_afe *afe, int apll);
 int mt8188_afe_get_default_mclk_source_by_rate(int rate);
 int mt8188_afe_init_clock(struct mtk_base_afe *afe);
-void mt8188_afe_deinit_clock(void *priv);
 int mt8188_afe_enable_clk(struct mtk_base_afe *afe, struct clk *clk);
 void mt8188_afe_disable_clk(struct mtk_base_afe *afe, struct clk *clk);
 int mt8188_afe_set_clk_rate(struct mtk_base_afe *afe, struct clk *clk,
index e5f9373..bcf7025 100644 (file)
@@ -3185,10 +3185,6 @@ static int mt8188_afe_pcm_dev_probe(struct platform_device *pdev)
        if (ret)
                return dev_err_probe(dev, ret, "init clock error");
 
-       ret = devm_add_action_or_reset(dev, mt8188_afe_deinit_clock, (void *)afe);
-       if (ret)
-               return ret;
-
        spin_lock_init(&afe_priv->afe_ctrl_lock);
 
        mutex_init(&afe->irq_alloc_lock);
index be1c53b..c796ad8 100644 (file)
@@ -138,6 +138,29 @@ static const struct afe_gate aud_clks[CLK_AUD_NR_CLK] = {
        GATE_AUD6(CLK_AUD_GASRC11, "aud_gasrc11", "top_asm_h", 11),
 };
 
+static void mt8188_audsys_clk_unregister(void *data)
+{
+       struct mtk_base_afe *afe = data;
+       struct mt8188_afe_private *afe_priv = afe->platform_priv;
+       struct clk *clk;
+       struct clk_lookup *cl;
+       int i;
+
+       if (!afe_priv)
+               return;
+
+       for (i = 0; i < CLK_AUD_NR_CLK; i++) {
+               cl = afe_priv->lookup[i];
+               if (!cl)
+                       continue;
+
+               clk = cl->clk;
+               clk_unregister_gate(clk);
+
+               clkdev_drop(cl);
+       }
+}
+
 int mt8188_audsys_clk_register(struct mtk_base_afe *afe)
 {
        struct mt8188_afe_private *afe_priv = afe->platform_priv;
@@ -179,27 +202,5 @@ int mt8188_audsys_clk_register(struct mtk_base_afe *afe)
                afe_priv->lookup[i] = cl;
        }
 
-       return 0;
-}
-
-void mt8188_audsys_clk_unregister(struct mtk_base_afe *afe)
-{
-       struct mt8188_afe_private *afe_priv = afe->platform_priv;
-       struct clk *clk;
-       struct clk_lookup *cl;
-       int i;
-
-       if (!afe_priv)
-               return;
-
-       for (i = 0; i < CLK_AUD_NR_CLK; i++) {
-               cl = afe_priv->lookup[i];
-               if (!cl)
-                       continue;
-
-               clk = cl->clk;
-               clk_unregister_gate(clk);
-
-               clkdev_drop(cl);
-       }
+       return devm_add_action_or_reset(afe->dev, mt8188_audsys_clk_unregister, afe);
 }
index 6c5f463..45b0948 100644 (file)
@@ -10,6 +10,5 @@
 #define _MT8188_AUDSYS_CLK_H_
 
 int mt8188_audsys_clk_register(struct mtk_base_afe *afe);
-void mt8188_audsys_clk_unregister(struct mtk_base_afe *afe);
 
 #endif
index 9ca2cb8..f35318a 100644 (file)
@@ -410,11 +410,6 @@ int mt8195_afe_init_clock(struct mtk_base_afe *afe)
        return 0;
 }
 
-void mt8195_afe_deinit_clock(struct mtk_base_afe *afe)
-{
-       mt8195_audsys_clk_unregister(afe);
-}
-
 int mt8195_afe_enable_clk(struct mtk_base_afe *afe, struct clk *clk)
 {
        int ret;
index 40663e3..a08c0ee 100644 (file)
@@ -101,7 +101,6 @@ int mt8195_afe_get_mclk_source_clk_id(int sel);
 int mt8195_afe_get_mclk_source_rate(struct mtk_base_afe *afe, int apll);
 int mt8195_afe_get_default_mclk_source_by_rate(int rate);
 int mt8195_afe_init_clock(struct mtk_base_afe *afe);
-void mt8195_afe_deinit_clock(struct mtk_base_afe *afe);
 int mt8195_afe_enable_clk(struct mtk_base_afe *afe, struct clk *clk);
 void mt8195_afe_disable_clk(struct mtk_base_afe *afe, struct clk *clk);
 int mt8195_afe_prepare_clk(struct mtk_base_afe *afe, struct clk *clk);
index 9e45efe..03dabc0 100644 (file)
@@ -3255,15 +3255,11 @@ err_pm_put:
 
 static void mt8195_afe_pcm_dev_remove(struct platform_device *pdev)
 {
-       struct mtk_base_afe *afe = platform_get_drvdata(pdev);
-
        snd_soc_unregister_component(&pdev->dev);
 
        pm_runtime_disable(&pdev->dev);
        if (!pm_runtime_status_suspended(&pdev->dev))
                mt8195_afe_runtime_suspend(&pdev->dev);
-
-       mt8195_afe_deinit_clock(afe);
 }
 
 static const struct of_device_id mt8195_afe_pcm_dt_match[] = {
index e0670e0..38594bc 100644 (file)
@@ -148,6 +148,29 @@ static const struct afe_gate aud_clks[CLK_AUD_NR_CLK] = {
        GATE_AUD6(CLK_AUD_GASRC19, "aud_gasrc19", "top_asm_h", 19),
 };
 
+static void mt8195_audsys_clk_unregister(void *data)
+{
+       struct mtk_base_afe *afe = data;
+       struct mt8195_afe_private *afe_priv = afe->platform_priv;
+       struct clk *clk;
+       struct clk_lookup *cl;
+       int i;
+
+       if (!afe_priv)
+               return;
+
+       for (i = 0; i < CLK_AUD_NR_CLK; i++) {
+               cl = afe_priv->lookup[i];
+               if (!cl)
+                       continue;
+
+               clk = cl->clk;
+               clk_unregister_gate(clk);
+
+               clkdev_drop(cl);
+       }
+}
+
 int mt8195_audsys_clk_register(struct mtk_base_afe *afe)
 {
        struct mt8195_afe_private *afe_priv = afe->platform_priv;
@@ -188,27 +211,5 @@ int mt8195_audsys_clk_register(struct mtk_base_afe *afe)
                afe_priv->lookup[i] = cl;
        }
 
-       return 0;
-}
-
-void mt8195_audsys_clk_unregister(struct mtk_base_afe *afe)
-{
-       struct mt8195_afe_private *afe_priv = afe->platform_priv;
-       struct clk *clk;
-       struct clk_lookup *cl;
-       int i;
-
-       if (!afe_priv)
-               return;
-
-       for (i = 0; i < CLK_AUD_NR_CLK; i++) {
-               cl = afe_priv->lookup[i];
-               if (!cl)
-                       continue;
-
-               clk = cl->clk;
-               clk_unregister_gate(clk);
-
-               clkdev_drop(cl);
-       }
+       return devm_add_action_or_reset(afe->dev, mt8195_audsys_clk_unregister, afe);
 }
index 239d310..69db2dd 100644 (file)
@@ -10,6 +10,5 @@
 #define _MT8195_AUDSYS_CLK_H_
 
 int mt8195_audsys_clk_register(struct mtk_base_afe *afe);
-void mt8195_audsys_clk_unregister(struct mtk_base_afe *afe);
 
 #endif
index adb69d7..4fb1ac8 100644 (file)
@@ -2405,6 +2405,9 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
                if (!snd_soc_dpcm_be_can_update(fe, be, stream))
                        continue;
 
+               if (!snd_soc_dpcm_can_be_prepared(fe, be, stream))
+                       continue;
+
                if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) &&
@@ -3042,3 +3045,20 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
        return snd_soc_dpcm_check_state(fe, be, stream, state, ARRAY_SIZE(state));
 }
 EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_params);
+
+/*
+ * We can only prepare a BE DAI if any of it's FE are not prepared,
+ * running or paused for the specified stream direction.
+ */
+int snd_soc_dpcm_can_be_prepared(struct snd_soc_pcm_runtime *fe,
+                                struct snd_soc_pcm_runtime *be, int stream)
+{
+       const enum snd_soc_dpcm_state state[] = {
+               SND_SOC_DPCM_STATE_START,
+               SND_SOC_DPCM_STATE_PAUSED,
+               SND_SOC_DPCM_STATE_PREPARE,
+       };
+
+       return snd_soc_dpcm_check_state(fe, be, stream, state, ARRAY_SIZE(state));
+}
+EXPORT_SYMBOL_GPL(snd_soc_dpcm_can_be_prepared);
index 4e0c48a..749e856 100644 (file)
@@ -209,7 +209,12 @@ int acp_sof_ipc_msg_data(struct snd_sof_dev *sdev, struct snd_sof_pcm_stream *sp
                acp_mailbox_read(sdev, offset, p, sz);
        } else {
                struct snd_pcm_substream *substream = sps->substream;
-               struct acp_dsp_stream *stream = substream->runtime->private_data;
+               struct acp_dsp_stream *stream;
+
+               if (!substream || !substream->runtime)
+                       return -ESTRPIPE;
+
+               stream = substream->runtime->private_data;
 
                if (!stream)
                        return -ESTRPIPE;
index b42b598..d547318 100644 (file)
@@ -438,8 +438,8 @@ void snd_sof_handle_fw_exception(struct snd_sof_dev *sdev, const char *msg)
                /* should we prevent DSP entering D3 ? */
                if (!sdev->ipc_dump_printed)
                        dev_info(sdev->dev,
-                                "preventing DSP entering D3 state to preserve context\n");
-               pm_runtime_get_noresume(sdev->dev);
+                                "Attempting to prevent DSP from entering D3 state to preserve context\n");
+               pm_runtime_get_if_in_use(sdev->dev);
        }
 
        /* dump vital information to the logs */
index 775582a..b7cbf66 100644 (file)
@@ -19,6 +19,9 @@
 
 #if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA_MLINK)
 
+/* worst-case number of sublinks is used for sublink refcount array allocation only */
+#define HDAML_MAX_SUBLINKS (AZX_ML_LCTL_CPA_SHIFT - AZX_ML_LCTL_SPA_SHIFT)
+
 /**
  * struct hdac_ext2_link - HDAudio extended+alternate link
  *
@@ -33,6 +36,7 @@
  * @leptr:             extended link pointer
  * @eml_lock:          mutual exclusion to access shared registers e.g. CPA/SPA bits
  * in LCTL register
+ * @sublink_ref_count: array of refcounts, required to power-manage sublinks independently
  * @base_ptr:          pointer to shim/ip/shim_vs space
  * @instance_offset:   offset between each of @slcount instances managed by link
  * @shim_offset:       offset to SHIM register base
@@ -53,6 +57,7 @@ struct hdac_ext2_link {
        u32 leptr;
 
        struct mutex eml_lock; /* prevent concurrent access to e.g. CPA/SPA */
+       int sublink_ref_count[HDAML_MAX_SUBLINKS];
 
        /* internal values computed from LCAP contents */
        void __iomem *base_ptr;
@@ -68,6 +73,7 @@ struct hdac_ext2_link {
 #define AZX_REG_SDW_SHIM_OFFSET                                0x0
 #define AZX_REG_SDW_IP_OFFSET                          0x100
 #define AZX_REG_SDW_VS_SHIM_OFFSET                     0x6000
+#define AZX_REG_SDW_SHIM_PCMSyCM(y)                    (0x16 + 0x4 * (y))
 
 /* only one instance supported */
 #define AZX_REG_INTEL_DMIC_SHIM_OFFSET                 0x0
@@ -91,7 +97,7 @@ struct hdac_ext2_link {
  */
 
 static int hdaml_lnk_enum(struct device *dev, struct hdac_ext2_link *h2link,
-                         void __iomem *ml_addr, int link_idx)
+                         void __iomem *remap_addr, void __iomem *ml_addr, int link_idx)
 {
        struct hdac_ext_link *hlink = &h2link->hext_link;
        u32 base_offset;
@@ -126,15 +132,16 @@ static int hdaml_lnk_enum(struct device *dev, struct hdac_ext2_link *h2link,
                link_idx, h2link->slcount);
 
        /* find IP ID and offsets */
-       h2link->leptr = readl(hlink->ml_addr + AZX_REG_ML_LEPTR);
+       h2link->leptr = readl(ml_addr + AZX_REG_ML_LEPTR);
 
        h2link->elid = FIELD_GET(AZX_REG_ML_LEPTR_ID, h2link->leptr);
 
        base_offset = FIELD_GET(AZX_REG_ML_LEPTR_PTR, h2link->leptr);
-       h2link->base_ptr = hlink->ml_addr + base_offset;
+       h2link->base_ptr = remap_addr + base_offset;
 
        switch (h2link->elid) {
        case AZX_REG_ML_LEPTR_ID_SDW:
+               h2link->instance_offset = AZX_REG_SDW_INSTANCE_OFFSET;
                h2link->shim_offset = AZX_REG_SDW_SHIM_OFFSET;
                h2link->ip_offset = AZX_REG_SDW_IP_OFFSET;
                h2link->shim_vs_offset = AZX_REG_SDW_VS_SHIM_OFFSET;
@@ -149,6 +156,7 @@ static int hdaml_lnk_enum(struct device *dev, struct hdac_ext2_link *h2link,
                        link_idx, base_offset);
                break;
        case AZX_REG_ML_LEPTR_ID_INTEL_SSP:
+               h2link->instance_offset = AZX_REG_INTEL_SSP_INSTANCE_OFFSET;
                h2link->shim_offset = AZX_REG_INTEL_SSP_SHIM_OFFSET;
                h2link->ip_offset = AZX_REG_INTEL_SSP_IP_OFFSET;
                h2link->shim_vs_offset = AZX_REG_INTEL_SSP_VS_SHIM_OFFSET;
@@ -333,6 +341,21 @@ static void hdaml_link_set_lsdiid(u32 __iomem *lsdiid, int dev_num)
        writel(val, lsdiid);
 }
 
+static void hdaml_shim_map_stream_ch(u16 __iomem *pcmsycm, int lchan, int hchan,
+                                    int stream_id, int dir)
+{
+       u16 val;
+
+       val = readw(pcmsycm);
+
+       u16p_replace_bits(&val, lchan, GENMASK(3, 0));
+       u16p_replace_bits(&val, hchan, GENMASK(7, 4));
+       u16p_replace_bits(&val, stream_id, GENMASK(13, 8));
+       u16p_replace_bits(&val, dir, BIT(15));
+
+       writew(val, pcmsycm);
+}
+
 static void hdaml_lctl_offload_enable(u32 __iomem *lctl, bool enable)
 {
        u32 val = readl(lctl);
@@ -364,7 +387,7 @@ static int hda_ml_alloc_h2link(struct hdac_bus *bus, int index)
        hlink->bus = bus;
        hlink->ml_addr = bus->mlcap + AZX_ML_BASE + (AZX_ML_INTERVAL * index);
 
-       ret = hdaml_lnk_enum(bus->dev, h2link, hlink->ml_addr, index);
+       ret = hdaml_lnk_enum(bus->dev, h2link, bus->remap_addr, hlink->ml_addr, index);
        if (ret < 0) {
                kfree(h2link);
                return ret;
@@ -641,8 +664,13 @@ static int hdac_bus_eml_power_up_base(struct hdac_bus *bus, bool alt, int elid,
        if (eml_lock)
                mutex_lock(&h2link->eml_lock);
 
-       if (++hlink->ref_count > 1)
-               goto skip_init;
+       if (!alt) {
+               if (++hlink->ref_count > 1)
+                       goto skip_init;
+       } else {
+               if (++h2link->sublink_ref_count[sublink] > 1)
+                       goto skip_init;
+       }
 
        ret = hdaml_link_init(hlink->ml_addr + AZX_REG_ML_LCTL, sublink);
 
@@ -684,9 +712,13 @@ static int hdac_bus_eml_power_down_base(struct hdac_bus *bus, bool alt, int elid
        if (eml_lock)
                mutex_lock(&h2link->eml_lock);
 
-       if (--hlink->ref_count > 0)
-               goto skip_shutdown;
-
+       if (!alt) {
+               if (--hlink->ref_count > 0)
+                       goto skip_shutdown;
+       } else {
+               if (--h2link->sublink_ref_count[sublink] > 0)
+                       goto skip_shutdown;
+       }
        ret = hdaml_link_shutdown(hlink->ml_addr + AZX_REG_ML_LCTL, sublink);
 
 skip_shutdown:
@@ -740,6 +772,40 @@ int hdac_bus_eml_sdw_set_lsdiid(struct hdac_bus *bus, int sublink, int dev_num)
        return 0;
 } EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_set_lsdiid, SND_SOC_SOF_HDA_MLINK);
 
+/*
+ * the 'y' parameter comes from the PCMSyCM hardware register naming. 'y' refers to the
+ * PDI index, i.e. the FIFO used for RX or TX
+ */
+int hdac_bus_eml_sdw_map_stream_ch(struct hdac_bus *bus, int sublink, int y,
+                                  int channel_mask, int stream_id, int dir)
+{
+       struct hdac_ext2_link *h2link;
+       u16 __iomem *pcmsycm;
+       u16 val;
+
+       h2link = find_ext2_link(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+       if (!h2link)
+               return -ENODEV;
+
+       pcmsycm = h2link->base_ptr + h2link->shim_offset +
+               h2link->instance_offset * sublink +
+               AZX_REG_SDW_SHIM_PCMSyCM(y);
+
+       mutex_lock(&h2link->eml_lock);
+
+       hdaml_shim_map_stream_ch(pcmsycm, 0, hweight32(channel_mask),
+                                stream_id, dir);
+
+       mutex_unlock(&h2link->eml_lock);
+
+       val = readw(pcmsycm);
+
+       dev_dbg(bus->dev, "channel_mask %#x stream_id %d dir %d pcmscm %#x\n",
+               channel_mask, stream_id, dir, val);
+
+       return 0;
+} EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_map_stream_ch, SND_SOC_SOF_HDA_MLINK);
+
 void hda_bus_ml_put_all(struct hdac_bus *bus)
 {
        struct hdac_ext_link *hlink;
@@ -836,6 +902,18 @@ struct hdac_ext_link *hdac_bus_eml_dmic_get_hlink(struct hdac_bus *bus)
 }
 EXPORT_SYMBOL_NS(hdac_bus_eml_dmic_get_hlink, SND_SOC_SOF_HDA_MLINK);
 
+struct hdac_ext_link *hdac_bus_eml_sdw_get_hlink(struct hdac_bus *bus)
+{
+       struct hdac_ext2_link *h2link;
+
+       h2link = find_ext2_link(bus, true, AZX_REG_ML_LEPTR_ID_SDW);
+       if (!h2link)
+               return NULL;
+
+       return &h2link->hext_link;
+}
+EXPORT_SYMBOL_NS(hdac_bus_eml_sdw_get_hlink, SND_SOC_SOF_HDA_MLINK);
+
 int hdac_bus_eml_enable_offload(struct hdac_bus *bus, bool alt, int elid, bool enable)
 {
        struct hdac_ext2_link *h2link;
index fc1eb8e..ba4ef29 100644 (file)
@@ -2103,10 +2103,13 @@ static int sof_ipc3_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget *
         * For the case of PAUSE/HW_FREE, since there are no quirks, flags can be used as is.
         */
 
-       if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS)
+       if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) {
+               /* Clear stale command */
+               config->flags &= ~SOF_DAI_CONFIG_FLAGS_CMD_MASK;
                config->flags |= flags;
-       else
+       } else {
                config->flags = flags;
+       }
 
        /* only send the IPC if the widget is set up in the DSP */
        if (swidget->use_count > 0) {
index 059eebf..5abe616 100644 (file)
@@ -59,7 +59,7 @@ static const struct sof_topology_token ipc4_in_audio_format_tokens[] = {
                audio_fmt.interleaving_style)},
        {SOF_TKN_CAVS_AUDIO_FORMAT_IN_FMT_CFG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
                offsetof(struct sof_ipc4_pin_format, audio_fmt.fmt_cfg)},
-       {SOF_TKN_CAVS_AUDIO_FORMAT_PIN_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+       {SOF_TKN_CAVS_AUDIO_FORMAT_INPUT_PIN_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
                offsetof(struct sof_ipc4_pin_format, pin_index)},
        {SOF_TKN_CAVS_AUDIO_FORMAT_IBS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
                offsetof(struct sof_ipc4_pin_format, buffer_size)},
@@ -79,7 +79,7 @@ static const struct sof_topology_token ipc4_out_audio_format_tokens[] = {
                audio_fmt.interleaving_style)},
        {SOF_TKN_CAVS_AUDIO_FORMAT_OUT_FMT_CFG, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
                offsetof(struct sof_ipc4_pin_format, audio_fmt.fmt_cfg)},
-       {SOF_TKN_CAVS_AUDIO_FORMAT_PIN_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
+       {SOF_TKN_CAVS_AUDIO_FORMAT_OUTPUT_PIN_INDEX, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
                offsetof(struct sof_ipc4_pin_format, pin_index)},
        {SOF_TKN_CAVS_AUDIO_FORMAT_OBS, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32,
                offsetof(struct sof_ipc4_pin_format, buffer_size)},
index 567db32..d0ab6f3 100644 (file)
@@ -643,16 +643,17 @@ static int sof_pcm_probe(struct snd_soc_component *component)
                                       "%s/%s",
                                       plat_data->tplg_filename_prefix,
                                       plat_data->tplg_filename);
-       if (!tplg_filename)
-               return -ENOMEM;
+       if (!tplg_filename) {
+               ret = -ENOMEM;
+               goto pm_error;
+       }
 
        ret = snd_sof_load_topology(component, tplg_filename);
-       if (ret < 0) {
+       if (ret < 0)
                dev_err(component->dev, "error: failed to load DSP topology %d\n",
                        ret);
-               return ret;
-       }
 
+pm_error:
        pm_runtime_mark_last_busy(component->dev);
        pm_runtime_put_autosuspend(component->dev);
 
index 2fdbc53..2b23244 100644 (file)
@@ -164,7 +164,7 @@ static int sof_resume(struct device *dev, bool runtime_resume)
                ret = tplg_ops->set_up_all_pipelines(sdev, false);
                if (ret < 0) {
                        dev_err(sdev->dev, "Failed to restore pipeline after resume %d\n", ret);
-                       return ret;
+                       goto setup_fail;
                }
        }
 
@@ -178,6 +178,18 @@ static int sof_resume(struct device *dev, bool runtime_resume)
                        dev_err(sdev->dev, "ctx_restore IPC error during resume: %d\n", ret);
        }
 
+setup_fail:
+#if IS_ENABLED(CONFIG_SND_SOC_SOF_DEBUG_ENABLE_DEBUGFS_CACHE)
+       if (ret < 0) {
+               /*
+                * Debugfs cannot be read in runtime suspend, so cache
+                * the contents upon failure. This allows to capture
+                * possible DSP coredump information.
+                */
+               sof_cache_debugfs(sdev);
+       }
+#endif
+
        return ret;
 }
 
index fff1268..8d9e9d5 100644 (file)
@@ -218,12 +218,7 @@ static ssize_t sof_probes_dfs_points_read(struct file *file, char __user *to,
 
        ret = ipc->points_info(cdev, &desc, &num_desc);
        if (ret < 0)
-               goto exit;
-
-       pm_runtime_mark_last_busy(dev);
-       err = pm_runtime_put_autosuspend(dev);
-       if (err < 0)
-               dev_err_ratelimited(dev, "debugfs read failed to idle %d\n", err);
+               goto pm_error;
 
        for (i = 0; i < num_desc; i++) {
                offset = strlen(buf);
@@ -241,6 +236,13 @@ static ssize_t sof_probes_dfs_points_read(struct file *file, char __user *to,
        ret = simple_read_from_buffer(to, count, ppos, buf, strlen(buf));
 
        kfree(desc);
+
+pm_error:
+       pm_runtime_mark_last_busy(dev);
+       err = pm_runtime_put_autosuspend(dev);
+       if (err < 0)
+               dev_err_ratelimited(dev, "debugfs read failed to idle %d\n", err);
+
 exit:
        kfree(buf);
        return ret;
index d3d536b..f160dc4 100644 (file)
@@ -586,6 +586,10 @@ static int sof_copy_tuples(struct snd_sof_dev *sdev, struct snd_soc_tplg_vendor_
                                if (*num_copied_tuples == tuples_size)
                                        return 0;
                        }
+
+                       /* stop when we've found the required token instances */
+                       if (found == num_tokens * token_instance_num)
+                               return 0;
                }
 
                /* next array */
@@ -1261,7 +1265,7 @@ static int sof_widget_parse_tokens(struct snd_soc_component *scomp, struct snd_s
                if (num_sets > 1) {
                        struct snd_sof_tuple *new_tuples;
 
-                       num_tuples += token_list[object_token_list[i]].count * num_sets;
+                       num_tuples += token_list[object_token_list[i]].count * (num_sets - 1);
                        new_tuples = krealloc(swidget->tuples,
                                              sizeof(*new_tuples) * num_tuples, GFP_KERNEL);
                        if (!new_tuples) {
index 4b1c5ba..ab5fed9 100644 (file)
@@ -423,6 +423,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
        case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
        case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
        case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
+       case USB_ID(0x0e41, 0x424b): /* Line6 Pod Go */
        case USB_ID(0x19f7, 0x0011): /* Rode Rodecaster Pro */
                return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
        }
index f8129c6..f7ddd73 100644 (file)
@@ -198,6 +198,15 @@ struct kvm_arm_copy_mte_tags {
        __u64 reserved[2];
 };
 
+/*
+ * Counter/Timer offset structure. Describe the virtual/physical offset.
+ * To be used with KVM_ARM_SET_COUNTER_OFFSET.
+ */
+struct kvm_arm_counter_offset {
+       __u64 counter_offset;
+       __u64 reserved;
+};
+
 #define KVM_ARM_TAGS_TO_GUEST          0
 #define KVM_ARM_TAGS_FROM_GUEST                1
 
@@ -372,6 +381,10 @@ enum {
 #endif
 };
 
+/* Device Control API on vm fd */
+#define KVM_ARM_VM_SMCCC_CTRL          0
+#define   KVM_ARM_VM_SMCCC_FILTER      0
+
 /* Device Control API: ARM VGIC */
 #define KVM_DEV_ARM_VGIC_GRP_ADDR      0
 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
@@ -411,6 +424,8 @@ enum {
 #define KVM_ARM_VCPU_TIMER_CTRL                1
 #define   KVM_ARM_VCPU_TIMER_IRQ_VTIMER                0
 #define   KVM_ARM_VCPU_TIMER_IRQ_PTIMER                1
+#define   KVM_ARM_VCPU_TIMER_IRQ_HVTIMER       2
+#define   KVM_ARM_VCPU_TIMER_IRQ_HPTIMER       3
 #define KVM_ARM_VCPU_PVTIME_CTRL       2
 #define   KVM_ARM_VCPU_PVTIME_IPA      0
 
@@ -469,6 +484,27 @@ enum {
 /* run->fail_entry.hardware_entry_failure_reason codes. */
 #define KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED    (1ULL << 0)
 
+enum kvm_smccc_filter_action {
+       KVM_SMCCC_FILTER_HANDLE = 0,
+       KVM_SMCCC_FILTER_DENY,
+       KVM_SMCCC_FILTER_FWD_TO_USER,
+
+#ifdef __KERNEL__
+       NR_SMCCC_FILTER_ACTIONS
+#endif
+};
+
+struct kvm_smccc_filter {
+       __u32 base;
+       __u32 nr_functions;
+       __u8 action;
+       __u8 pad[15];
+};
+
+/* arm64-specific KVM_EXIT_HYPERCALL flags */
+#define KVM_HYPERCALL_EXIT_SMC         (1U << 0)
+#define KVM_HYPERCALL_EXIT_16BIT       (1U << 1)
+
 #endif
 
 #endif /* __ARM_KVM_H__ */
index b890058..cb8ca46 100644 (file)
@@ -97,7 +97,7 @@
 #define X86_FEATURE_SYSENTER32         ( 3*32+15) /* "" sysenter in IA32 userspace */
 #define X86_FEATURE_REP_GOOD           ( 3*32+16) /* REP microcode works well */
 #define X86_FEATURE_AMD_LBR_V2         ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
-#define X86_FEATURE_LFENCE_RDTSC       ( 3*32+18) /* "" LFENCE synchronizes RDTSC */
+/* FREE, was #define X86_FEATURE_LFENCE_RDTSC          ( 3*32+18) "" LFENCE synchronizes RDTSC */
 #define X86_FEATURE_ACC_POWER          ( 3*32+19) /* AMD Accumulated Power Mechanism */
 #define X86_FEATURE_NOPL               ( 3*32+20) /* The NOPL (0F 1F) instructions */
 #define X86_FEATURE_ALWAYS             ( 3*32+21) /* "" Always-present feature */
 
 /* Virtualization flags: Linux defined, word 8 */
 #define X86_FEATURE_TPR_SHADOW         ( 8*32+ 0) /* Intel TPR Shadow */
-#define X86_FEATURE_VNMI               ( 8*32+ 1) /* Intel Virtual NMI */
-#define X86_FEATURE_FLEXPRIORITY       ( 8*32+ 2) /* Intel FlexPriority */
-#define X86_FEATURE_EPT                        ( 8*32+ 3) /* Intel Extended Page Table */
-#define X86_FEATURE_VPID               ( 8*32+ 4) /* Intel Virtual Processor ID */
+#define X86_FEATURE_FLEXPRIORITY       ( 8*32+ 1) /* Intel FlexPriority */
+#define X86_FEATURE_EPT                        ( 8*32+ 2) /* Intel Extended Page Table */
+#define X86_FEATURE_VPID               ( 8*32+ 3) /* Intel Virtual Processor ID */
 
 #define X86_FEATURE_VMMCALL            ( 8*32+15) /* Prefer VMMCALL to VMCALL */
 #define X86_FEATURE_XENPV              ( 8*32+16) /* "" Xen paravirtual guest */
 #define X86_FEATURE_SGX_EDECCSSA       (11*32+18) /* "" SGX EDECCSSA user leaf function */
 #define X86_FEATURE_CALL_DEPTH         (11*32+19) /* "" Call depth tracking for RSB stuffing */
 #define X86_FEATURE_MSR_TSX_CTRL       (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
+#define X86_FEATURE_SMBA               (11*32+21) /* "" Slow Memory Bandwidth Allocation */
+#define X86_FEATURE_BMEC               (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
 #define X86_FEATURE_AVX_VNNI           (12*32+ 4) /* AVX VNNI instructions */
 #define X86_FEATURE_AVX512_BF16                (12*32+ 5) /* AVX512 BFLOAT16 instructions */
 #define X86_FEATURE_CMPCCXADD           (12*32+ 7) /* "" CMPccXADD instructions */
+#define X86_FEATURE_ARCH_PERFMON_EXT   (12*32+ 8) /* "" Intel Architectural PerfMon Extension */
+#define X86_FEATURE_FZRM               (12*32+10) /* "" Fast zero-length REP MOVSB */
+#define X86_FEATURE_FSRS               (12*32+11) /* "" Fast short REP STOSB */
+#define X86_FEATURE_FSRC               (12*32+12) /* "" Fast short REP {CMPSB,SCASB} */
 #define X86_FEATURE_LKGS               (12*32+18) /* "" Load "kernel" (userspace) GS */
 #define X86_FEATURE_AMX_FP16           (12*32+21) /* "" AMX fp16 Support */
 #define X86_FEATURE_AVX_IFMA            (12*32+23) /* "" Support for VPMADD52[H,L]UQ */
+#define X86_FEATURE_LAM                        (12*32+26) /* Linear Address Masking */
 
 /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
 #define X86_FEATURE_CLZERO             (13*32+ 0) /* CLZERO instruction */
 #define X86_FEATURE_VIRT_SSBD          (13*32+25) /* Virtualized Speculative Store Bypass Disable */
 #define X86_FEATURE_AMD_SSB_NO         (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
 #define X86_FEATURE_CPPC               (13*32+27) /* Collaborative Processor Performance Control */
+#define X86_FEATURE_AMD_PSFD            (13*32+28) /* "" Predictive Store Forwarding Disable */
 #define X86_FEATURE_BTC_NO             (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
 #define X86_FEATURE_BRS                        (13*32+31) /* Branch Sampling available */
 
 #define X86_FEATURE_VGIF               (15*32+16) /* Virtual GIF */
 #define X86_FEATURE_X2AVIC             (15*32+18) /* Virtual x2apic */
 #define X86_FEATURE_V_SPEC_CTRL                (15*32+20) /* Virtual SPEC_CTRL */
+#define X86_FEATURE_VNMI               (15*32+25) /* Virtual NMI */
 #define X86_FEATURE_SVME_ADDR_CHK      (15*32+28) /* "" SVME addr check */
 
 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
 #define X86_FEATURE_V_TSC_AUX          (19*32+ 9) /* "" Virtual TSC_AUX */
 #define X86_FEATURE_SME_COHERENT       (19*32+10) /* "" AMD hardware-enforced cache coherency */
 
+/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
+#define X86_FEATURE_NO_NESTED_DATA_BP  (20*32+ 0) /* "" No Nested Data Breakpoints */
+#define X86_FEATURE_LFENCE_RDTSC       (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */
+#define X86_FEATURE_NULL_SEL_CLR_BASE  (20*32+ 6) /* "" Null Selector Clears Base */
+#define X86_FEATURE_AUTOIBRS           (20*32+ 8) /* "" Automatic IBRS */
+#define X86_FEATURE_NO_SMM_CTL_MSR     (20*32+ 9) /* "" SMM_CTL MSR is not present */
+
 /*
  * BUG word(s)
  */
 #define X86_BUG_MMIO_UNKNOWN           X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
 #define X86_BUG_RETBLEED               X86_BUG(27) /* CPU is affected by RETBleed */
 #define X86_BUG_EIBRS_PBRSB            X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
+#define X86_BUG_SMT_RSB                        X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
 
 #endif /* _ASM_X86_CPUFEATURES_H */
index 5dfa4fb..fafe9be 100644 (file)
 # define DISABLE_CALL_DEPTH_TRACKING   (1 << (X86_FEATURE_CALL_DEPTH & 31))
 #endif
 
+#ifdef CONFIG_ADDRESS_MASKING
+# define DISABLE_LAM           0
+#else
+# define DISABLE_LAM           (1 << (X86_FEATURE_LAM & 31))
+#endif
+
 #ifdef CONFIG_INTEL_IOMMU_SVM
 # define DISABLE_ENQCMD                0
 #else
 #define DISABLED_MASK10        0
 #define DISABLED_MASK11        (DISABLE_RETPOLINE|DISABLE_RETHUNK|DISABLE_UNRET| \
                         DISABLE_CALL_DEPTH_TRACKING)
-#define DISABLED_MASK12        0
+#define DISABLED_MASK12        (DISABLE_LAM)
 #define DISABLED_MASK13        0
 #define DISABLED_MASK14        0
 #define DISABLED_MASK15        0
index ad35355..3aedae6 100644 (file)
 
 /* Abbreviated from Intel SDM name IA32_INTEGRITY_CAPABILITIES */
 #define MSR_INTEGRITY_CAPS                     0x000002d9
+#define MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT      2
+#define MSR_INTEGRITY_CAPS_ARRAY_BIST          BIT(MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT)
 #define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT   4
 #define MSR_INTEGRITY_CAPS_PERIODIC_BIST       BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT)
 
index 7f467fe..1a6a1f9 100644 (file)
@@ -559,4 +559,7 @@ struct kvm_pmu_event_filter {
 #define KVM_VCPU_TSC_CTRL 0 /* control group for the timestamp counter (TSC) */
 #define   KVM_VCPU_TSC_OFFSET 0 /* attribute for the TSC offset */
 
+/* x86-specific KVM_EXIT_HYPERCALL flags. */
+#define KVM_EXIT_HYPERCALL_LONG_MODE   BIT(0)
+
 #endif /* _ASM_X86_KVM_H */
index 500b96e..e8d7ebb 100644 (file)
 #define ARCH_GET_XCOMP_GUEST_PERM      0x1024
 #define ARCH_REQ_XCOMP_GUEST_PERM      0x1025
 
+#define ARCH_XCOMP_TILECFG             17
+#define ARCH_XCOMP_TILEDATA            18
+
 #define ARCH_MAP_VDSO_X32              0x2001
 #define ARCH_MAP_VDSO_32               0x2002
 #define ARCH_MAP_VDSO_64               0x2003
 
+#define ARCH_GET_UNTAG_MASK            0x4001
+#define ARCH_ENABLE_TAGGED_ADDR                0x4002
+#define ARCH_GET_MAX_TAG_BITS          0x4003
+#define ARCH_FORCE_TAGGED_SVA          0x4004
+
 #endif /* _ASM_X86_PRCTL_H */
index b8ddfc4..bc48a4d 100644 (file)
@@ -2,6 +2,9 @@
 #ifndef __NR_fork
 #define __NR_fork 2
 #endif
+#ifndef __NR_execve
+#define __NR_execve 11
+#endif
 #ifndef __NR_getppid
 #define __NR_getppid 64
 #endif
index a91ac66..d055b82 100644 (file)
 .section .noinstr.text, "ax"
 
 /*
- * We build a jump to memcpy_orig by default which gets NOPped out on
- * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
- * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
- * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
- */
-
-/*
  * memcpy - Copy a memory block.
  *
  * Input:
  *
  * Output:
  * rax original destination
+ *
+ * The FSRM alternative should be done inline (avoiding the call and
+ * the disgusting return handling), but that would require some help
+ * from the compiler for better calling conventions.
+ *
+ * The 'rep movsb' itself is small enough to replace the call, but the
+ * two register moves blow up the code. And one of them is "needed"
+ * only for the return value that is the same as the source input,
+ * which the compiler could/should do much better anyway.
  */
 SYM_TYPED_FUNC_START(__memcpy)
-       ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
-                     "jmp memcpy_erms", X86_FEATURE_ERMS
+       ALTERNATIVE "jmp memcpy_orig", "", X86_FEATURE_FSRM
 
        movq %rdi, %rax
        movq %rdx, %rcx
-       shrq $3, %rcx
-       andl $7, %edx
-       rep movsq
-       movl %edx, %ecx
        rep movsb
        RET
 SYM_FUNC_END(__memcpy)
@@ -45,17 +42,6 @@ EXPORT_SYMBOL(__memcpy)
 SYM_FUNC_ALIAS(memcpy, __memcpy)
 EXPORT_SYMBOL(memcpy)
 
-/*
- * memcpy_erms() - enhanced fast string memcpy. This is faster and
- * simpler than memcpy. Use memcpy_erms when possible.
- */
-SYM_FUNC_START_LOCAL(memcpy_erms)
-       movq %rdi, %rax
-       movq %rdx, %rcx
-       rep movsb
-       RET
-SYM_FUNC_END(memcpy_erms)
-
 SYM_FUNC_START_LOCAL(memcpy_orig)
        movq %rdi, %rax
 
index 6143b1a..7c59a70 100644 (file)
  * rdx   count (bytes)
  *
  * rax   original destination
+ *
+ * The FSRS alternative should be done inline (avoiding the call and
+ * the disgusting return handling), but that would require some help
+ * from the compiler for better calling conventions.
+ *
+ * The 'rep stosb' itself is small enough to replace the call, but all
+ * the register moves blow up the code. And two of them are "needed"
+ * only for the return value that is the same as the source input,
+ * which the compiler could/should do much better anyway.
  */
 SYM_FUNC_START(__memset)
-       /*
-        * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
-        * to use it when possible. If not available, use fast string instructions.
-        *
-        * Otherwise, use original memset function.
-        */
-       ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
-                     "jmp memset_erms", X86_FEATURE_ERMS
+       ALTERNATIVE "jmp memset_orig", "", X86_FEATURE_FSRS
 
        movq %rdi,%r9
+       movb %sil,%al
        movq %rdx,%rcx
-       andl $7,%edx
-       shrq $3,%rcx
-       /* expand byte value  */
-       movzbl %sil,%esi
-       movabs $0x0101010101010101,%rax
-       imulq %rsi,%rax
-       rep stosq
-       movl %edx,%ecx
        rep stosb
        movq %r9,%rax
        RET
@@ -48,26 +43,6 @@ EXPORT_SYMBOL(__memset)
 SYM_FUNC_ALIAS(memset, __memset)
 EXPORT_SYMBOL(memset)
 
-/*
- * ISO C memset - set a memory block to a byte value. This function uses
- * enhanced rep stosb to override the fast string function.
- * The code is simpler and shorter than the fast string function as well.
- *
- * rdi   destination
- * rsi   value (char)
- * rdx   count (bytes)
- *
- * rax   original destination
- */
-SYM_FUNC_START_LOCAL(memset_erms)
-       movq %rdi,%r9
-       movb %sil,%al
-       movq %rdx,%rcx
-       rep stosb
-       movq %r9,%rax
-       RET
-SYM_FUNC_END(memset_erms)
-
 SYM_FUNC_START_LOCAL(memset_orig)
        movq %rdi,%r10
 
index c61d061..52a0be4 100644 (file)
@@ -94,7 +94,7 @@ static void print_attributes(struct gpio_v2_line_info *info)
        for (i = 0; i < info->num_attrs; i++) {
                if (info->attrs[i].id == GPIO_V2_LINE_ATTR_ID_DEBOUNCE)
                        fprintf(stdout, ", debounce_period=%dusec",
-                               info->attrs[0].debounce_period_us);
+                               info->attrs[i].debounce_period_us);
        }
 }
 
index b54bd86..7ce02a2 100644 (file)
@@ -4,7 +4,6 @@
 
 /* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */
 
-#define altinstruction_entry #
-#define ALTERNATIVE_2 #
+#define ALTERNATIVE #
 
 #endif
index cef3b1c..51ac441 100644 (file)
  */
 #define CORESIGHT_LEGACY_CPU_TRACE_ID(cpu)  (0x10 + (cpu * 2))
 
-/* CoreSight trace ID is currently the bottom 7 bits of the value */
-#define CORESIGHT_TRACE_ID_VAL_MASK    GENMASK(6, 0)
-
-/*
- * perf record will set the legacy meta data values as unused initially.
- * This allows perf report to manage the decoders created when dynamic
- * allocation in operation.
- */
-#define CORESIGHT_TRACE_ID_UNUSED_FLAG BIT(31)
-
-/* Value to set for unused trace ID values */
-#define CORESIGHT_TRACE_ID_UNUSED_VAL  0x7F
-
 /*
  * Below are the definition of bit offsets for perf option, and works as
  * arbitrary values for all ETM versions.
index 6428085..a87bbbb 100644 (file)
@@ -972,6 +972,19 @@ extern "C" {
 #define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
 #define DRM_IOCTL_SET_VERSION          DRM_IOWR(0x07, struct drm_set_version)
 #define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08, struct drm_modeset_ctl)
+/**
+ * DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
+ *
+ * GEM handles are not reference-counted by the kernel. User-space is
+ * responsible for managing their lifetime. For example, if user-space imports
+ * the same memory object twice on the same DRM file description, the same GEM
+ * handle is returned by both imports, and user-space needs to ensure
+ * &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
+ * when a memory object is allocated, then exported and imported again on the
+ * same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
+ * and always returns fresh new GEM handles even if an existing GEM handle
+ * already refers to the same memory object before the IOCTL is performed.
+ */
 #define DRM_IOCTL_GEM_CLOSE            DRM_IOW (0x09, struct drm_gem_close)
 #define DRM_IOCTL_GEM_FLINK            DRM_IOWR(0x0a, struct drm_gem_flink)
 #define DRM_IOCTL_GEM_OPEN             DRM_IOWR(0x0b, struct drm_gem_open)
@@ -1012,7 +1025,37 @@ extern "C" {
 #define DRM_IOCTL_UNLOCK               DRM_IOW( 0x2b, struct drm_lock)
 #define DRM_IOCTL_FINISH               DRM_IOW( 0x2c, struct drm_lock)
 
+/**
+ * DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
+ *
+ * User-space sets &drm_prime_handle.handle with the GEM handle to export and
+ * &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
+ * &drm_prime_handle.fd.
+ *
+ * The export can fail for any driver-specific reason, e.g. because export is
+ * not supported for this specific GEM handle (but might be for others).
+ *
+ * Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
+ */
 #define DRM_IOCTL_PRIME_HANDLE_TO_FD    DRM_IOWR(0x2d, struct drm_prime_handle)
+/**
+ * DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
+ *
+ * User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
+ * import, and gets back a GEM handle in &drm_prime_handle.handle.
+ * &drm_prime_handle.flags is unused.
+ *
+ * If an existing GEM handle refers to the memory object backing the DMA-BUF,
+ * that GEM handle is returned. Therefore user-space which needs to handle
+ * arbitrary DMA-BUFs must have a user-space lookup data structure to manually
+ * reference-count duplicated GEM handles. For more information see
+ * &DRM_IOCTL_GEM_CLOSE.
+ *
+ * The import can fail for any driver-specific reason, e.g. because import is
+ * only supported for DMA-BUFs allocated on this DRM device.
+ *
+ * Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
+ */
 #define DRM_IOCTL_PRIME_FD_TO_HANDLE    DRM_IOWR(0x2e, struct drm_prime_handle)
 
 #define DRM_IOCTL_AGP_ACQUIRE          DRM_IO(  0x30)
@@ -1104,8 +1147,13 @@ extern "C" {
  * struct as the output.
  *
  * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
- * will be filled with GEM buffer handles. Planes are valid until one has a
- * zero handle -- this can be used to compute the number of planes.
+ * will be filled with GEM buffer handles. Fresh new GEM handles are always
+ * returned, even if another GEM handle referring to the same memory object
+ * already exists on the DRM file description. The caller is responsible for
+ * removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
+ * new handle will be returned for multiple planes in case they use the same
+ * memory object. Planes are valid until one has a zero handle -- this can be
+ * used to compute the number of planes.
  *
  * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
  * until one has a zero &drm_mode_fb_cmd2.pitches.
@@ -1113,6 +1161,11 @@ extern "C" {
  * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
  * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
  * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
+ *
+ * To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
+ * can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
+ * close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
+ * double-close handles which are specified multiple times in the array.
  */
 #define DRM_IOCTL_MODE_GETFB2          DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
 
index 8df261c..dba7c5a 100644 (file)
@@ -2491,7 +2491,7 @@ struct i915_context_param_engines {
 #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
 #define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
 #define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
-       struct i915_engine_class_instance engines[0];
+       struct i915_engine_class_instance engines[];
 } __attribute__((packed));
 
 #define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
@@ -2676,6 +2676,10 @@ enum drm_i915_oa_format {
        I915_OAR_FORMAT_A32u40_A4u32_B8_C8,
        I915_OA_FORMAT_A24u40_A14u32_B8_C8,
 
+       /* MTL OAM */
+       I915_OAM_FORMAT_MPEC8u64_B8_C8,
+       I915_OAM_FORMAT_MPEC8u32_B8_C8,
+
        I915_OA_FORMAT_MAX          /* non-ABI */
 };
 
@@ -2758,6 +2762,25 @@ enum drm_i915_perf_property_id {
         */
        DRM_I915_PERF_PROP_POLL_OA_PERIOD,
 
+       /**
+        * Multiple engines may be mapped to the same OA unit. The OA unit is
+        * identified by class:instance of any engine mapped to it.
+        *
+        * This parameter specifies the engine class and must be passed along
+        * with DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE.
+        *
+        * This property is available in perf revision 6.
+        */
+       DRM_I915_PERF_PROP_OA_ENGINE_CLASS,
+
+       /**
+        * This parameter specifies the engine instance and must be passed along
+        * with DRM_I915_PERF_PROP_OA_ENGINE_CLASS.
+        *
+        * This property is available in perf revision 6.
+        */
+       DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE,
+
        DRM_I915_PERF_PROP_MAX /* non-ABI */
 };
 
index 1bb11a6..c994ff5 100644 (file)
@@ -1035,6 +1035,7 @@ enum bpf_attach_type {
        BPF_TRACE_KPROBE_MULTI,
        BPF_LSM_CGROUP,
        BPF_STRUCT_OPS,
+       BPF_NETFILTER,
        __MAX_BPF_ATTACH_TYPE
 };
 
index af2a44c..a429381 100644 (file)
@@ -28,7 +28,7 @@
 #define _BITUL(x)      (_UL(1) << (x))
 #define _BITULL(x)     (_ULL(1) << (x))
 
-#define __ALIGN_KERNEL(x, a)           __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
+#define __ALIGN_KERNEL(x, a)           __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
 #define __ALIGN_KERNEL_MASK(x, mask)   (((x) + (mask)) & ~(mask))
 
 #define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
index 07a4cb1..e682ab6 100644 (file)
@@ -162,6 +162,8 @@ struct in_addr {
 #define MCAST_MSFILTER                 48
 #define IP_MULTICAST_ALL               49
 #define IP_UNICAST_IF                  50
+#define IP_LOCAL_PORT_RANGE            51
+#define IP_PROTOCOL                    52
 
 #define MCAST_EXCLUDE  0
 #define MCAST_INCLUDE  1
index 4003a16..737318b 100644 (file)
@@ -341,8 +341,13 @@ struct kvm_run {
                        __u64 nr;
                        __u64 args[6];
                        __u64 ret;
-                       __u32 longmode;
-                       __u32 pad;
+
+                       union {
+#ifndef __KERNEL__
+                               __u32 longmode;
+#endif
+                               __u64 flags;
+                       };
                } hypercall;
                /* KVM_EXIT_TPR_ACCESS */
                struct {
@@ -1184,6 +1189,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_S390_PROTECTED_ASYNC_DISABLE 224
 #define KVM_CAP_DIRTY_LOG_RING_WITH_BITMAP 225
 #define KVM_CAP_PMU_EVENT_MASKED_EVENTS 226
+#define KVM_CAP_COUNTER_OFFSET 227
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1543,6 +1549,8 @@ struct kvm_s390_ucas_mapping {
 #define KVM_SET_PMU_EVENT_FILTER  _IOW(KVMIO,  0xb2, struct kvm_pmu_event_filter)
 #define KVM_PPC_SVM_OFF                  _IO(KVMIO,  0xb3)
 #define KVM_ARM_MTE_COPY_TAGS    _IOR(KVMIO,  0xb4, struct kvm_arm_copy_mte_tags)
+/* Available with KVM_CAP_COUNTER_OFFSET */
+#define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO,  0xb5, struct kvm_arm_counter_offset)
 
 /* ioctl for vm fd */
 #define KVM_CREATE_DEVICE        _IOWR(KVMIO,  0xe0, struct kvm_create_device)
index 759b3f5..f23d9a1 100644 (file)
@@ -290,6 +290,8 @@ struct prctl_mm_map {
 #define PR_SET_VMA             0x53564d41
 # define PR_SET_VMA_ANON_NAME          0
 
+#define PR_GET_AUXV                    0x41555856
+
 #define PR_SET_MEMORY_MERGE            67
 #define PR_GET_MEMORY_MERGE            68
 #endif /* _LINUX_PRCTL_H */
index de6810e..0aa955a 100644 (file)
@@ -429,9 +429,14 @@ struct snd_pcm_sw_params {
        snd_pcm_uframes_t avail_min;            /* min avail frames for wakeup */
        snd_pcm_uframes_t xfer_align;           /* obsolete: xfer size need to be a multiple */
        snd_pcm_uframes_t start_threshold;      /* min hw_avail frames for automatic start */
-       snd_pcm_uframes_t stop_threshold;       /* min avail frames for automatic stop */
-       snd_pcm_uframes_t silence_threshold;    /* min distance from noise for silence filling */
-       snd_pcm_uframes_t silence_size;         /* silence block size */
+       /*
+        * The following two thresholds alleviate playback buffer underruns; when
+        * hw_avail drops below the threshold, the respective action is triggered:
+        */
+       snd_pcm_uframes_t stop_threshold;       /* - stop playback */
+       snd_pcm_uframes_t silence_threshold;    /* - pre-fill buffer with silence */
+       snd_pcm_uframes_t silence_size;         /* max size of silence pre-fill; when >= boundary,
+                                                * fill played area with silence immediately */
        snd_pcm_uframes_t boundary;             /* pointers wrap point */
        unsigned int proto;                     /* protocol version */
        unsigned int tstamp_type;               /* timestamp type (req. proto >= 2.0.12) */
@@ -570,7 +575,8 @@ struct __snd_pcm_mmap_status64 {
 struct __snd_pcm_mmap_control64 {
        __pad_before_uframe __pad1;
        snd_pcm_uframes_t appl_ptr;      /* RW: appl ptr (0...boundary-1) */
-       __pad_before_uframe __pad2;
+       __pad_before_uframe __pad2;      // This should be __pad_after_uframe, but binary
+                                        // backwards compatibility constraints prevent a fix.
 
        __pad_before_uframe __pad3;
        snd_pcm_uframes_t  avail_min;    /* RW: min available frames for wakeup */
index ad1ec89..a27f6e9 100644 (file)
@@ -117,6 +117,7 @@ static const char * const attach_type_name[] = {
        [BPF_PERF_EVENT]                = "perf_event",
        [BPF_TRACE_KPROBE_MULTI]        = "trace_kprobe_multi",
        [BPF_STRUCT_OPS]                = "struct_ops",
+       [BPF_NETFILTER]                 = "netfilter",
 };
 
 static const char * const link_type_name[] = {
@@ -8712,7 +8713,7 @@ static const struct bpf_sec_def section_defs[] = {
        SEC_DEF("struct_ops+",          STRUCT_OPS, 0, SEC_NONE),
        SEC_DEF("struct_ops.s+",        STRUCT_OPS, 0, SEC_SLEEPABLE),
        SEC_DEF("sk_lookup",            SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
-       SEC_DEF("netfilter",            NETFILTER, 0, SEC_NONE),
+       SEC_DEF("netfilter",            NETFILTER, BPF_NETFILTER, SEC_NONE),
 };
 
 static size_t custom_sec_def_cnt;
index 6065f40..b7d4431 100644 (file)
@@ -180,7 +180,9 @@ static int probe_prog_load(enum bpf_prog_type prog_type,
        case BPF_PROG_TYPE_SK_REUSEPORT:
        case BPF_PROG_TYPE_FLOW_DISSECTOR:
        case BPF_PROG_TYPE_CGROUP_SYSCTL:
+               break;
        case BPF_PROG_TYPE_NETFILTER:
+               opts.expected_attach_type = BPF_NETFILTER;
                break;
        default:
                return -EOPNOTSUPP;
index aa77bca..3144f33 100644 (file)
@@ -591,8 +591,9 @@ class YnlFamily(SpecFamily):
                         print('Unexpected message: ' + repr(gm))
                         continue
 
-                rsp.append(self._decode(gm.raw_attrs, op.attr_set.name)
-                           | gm.fixed_header_attrs)
+                rsp_msg = self._decode(gm.raw_attrs, op.attr_set.name)
+                rsp_msg.update(gm.fixed_header_attrs)
+                rsp.append(rsp_msg)
 
         if not rsp:
             return None
index 4884520..a794d9e 100644 (file)
@@ -216,6 +216,12 @@ ifeq ($(call get-executable,$(BISON)),)
   dummy := $(error Error: $(BISON) is missing on this system, please install it)
 endif
 
+ifeq ($(BUILD_BPF_SKEL),1)
+  ifeq ($(call get-executable,$(CLANG)),)
+    dummy := $(error $(CLANG) is missing on this system, please install it to be able to build with BUILD_BPF_SKEL=1)
+  endif
+endif
+
 ifneq ($(OUTPUT),)
   ifeq ($(shell expr $(shell $(BISON) --version | grep bison | sed -e 's/.\+ \([0-9]\+\).\([0-9]\+\).\([0-9]\+\)/\1\2\3/g') \>\= 371), 1)
     BISON_FILE_PREFIX_MAP := --file-prefix-map=$(OUTPUT)=
@@ -921,6 +927,7 @@ ifndef NO_DEMANGLE
     EXTLIBS += -lstdc++
     CFLAGS += -DHAVE_CXA_DEMANGLE_SUPPORT
     CXXFLAGS += -DHAVE_CXA_DEMANGLE_SUPPORT
+    $(call detected,CONFIG_CXX_DEMANGLE)
   endif
   ifdef BUILD_NONDISTRO
     ifeq ($(filter -liberty,$(EXTLIBS)),)
index a42a6a9..f487948 100644 (file)
@@ -181,7 +181,6 @@ HOSTCC  ?= gcc
 HOSTLD  ?= ld
 HOSTAR  ?= ar
 CLANG   ?= clang
-LLVM_STRIP ?= llvm-strip
 
 PKG_CONFIG = $(CROSS_COMPILE)pkg-config
 
@@ -1057,15 +1056,33 @@ $(SKEL_TMP_OUT) $(LIBAPI_OUTPUT) $(LIBBPF_OUTPUT) $(LIBPERF_OUTPUT) $(LIBSUBCMD_
 
 ifdef BUILD_BPF_SKEL
 BPFTOOL := $(SKEL_TMP_OUT)/bootstrap/bpftool
-BPF_INCLUDE := -I$(SKEL_TMP_OUT)/.. -I$(LIBBPF_INCLUDE)
+# Get Clang's default includes on this system, as opposed to those seen by
+# '-target bpf'. This fixes "missing" files on some architectures/distros,
+# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc.
+#
+# Use '-idirafter': Don't interfere with include mechanics except where the
+# build would have failed anyways.
+define get_sys_includes
+$(shell $(1) $(2) -v -E - </dev/null 2>&1 \
+       | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
+$(shell $(1) $(2) -dM -E - </dev/null | grep '__riscv_xlen ' | awk '{printf("-D__riscv_xlen=%d -D__BITS_PER_LONG=%d", $$3, $$3)}')
+endef
+
+ifneq ($(CROSS_COMPILE),)
+CLANG_TARGET_ARCH = --target=$(notdir $(CROSS_COMPILE:%-=%))
+endif
+
+CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG),$(CLANG_TARGET_ARCH))
+BPF_INCLUDE := -I$(SKEL_TMP_OUT)/.. -I$(LIBBPF_INCLUDE) $(CLANG_SYS_INCLUDES)
+TOOLS_UAPI_INCLUDE := -I$(srctree)/tools/include/uapi
 
 $(BPFTOOL): | $(SKEL_TMP_OUT)
        $(Q)CFLAGS= $(MAKE) -C ../bpf/bpftool \
                OUTPUT=$(SKEL_TMP_OUT)/ bootstrap
 
 $(SKEL_TMP_OUT)/%.bpf.o: util/bpf_skel/%.bpf.c $(LIBBPF) | $(SKEL_TMP_OUT)
-       $(QUIET_CLANG)$(CLANG) -g -O2 -target bpf -Wall -Werror $(BPF_INCLUDE) \
-         -c $(filter util/bpf_skel/%.bpf.c,$^) -o $@ && $(LLVM_STRIP) -g $@
+       $(QUIET_CLANG)$(CLANG) -g -O2 -target bpf -Wall -Werror $(BPF_INCLUDE) $(TOOLS_UAPI_INCLUDE) \
+         -c $(filter util/bpf_skel/%.bpf.c,$^) -o $@
 
 $(SKEL_OUT)/%.skel.h: $(SKEL_TMP_OUT)/%.bpf.o | $(BPFTOOL)
        $(QUIET_GENSKEL)$(BPFTOOL) gen skeleton $< > $@
index 77cb03e..9ca040b 100644 (file)
@@ -78,9 +78,9 @@ static int cs_etm_validate_context_id(struct auxtrace_record *itr,
        char path[PATH_MAX];
        int err;
        u32 val;
-       u64 contextid =
-               evsel->core.attr.config &
-               (perf_pmu__format_bits(&cs_etm_pmu->format, "contextid1") |
+       u64 contextid = evsel->core.attr.config &
+               (perf_pmu__format_bits(&cs_etm_pmu->format, "contextid") |
+                perf_pmu__format_bits(&cs_etm_pmu->format, "contextid1") |
                 perf_pmu__format_bits(&cs_etm_pmu->format, "contextid2"));
 
        if (!contextid)
@@ -114,8 +114,7 @@ static int cs_etm_validate_context_id(struct auxtrace_record *itr,
                 *  0b00100 Maximum of 32-bit Context ID size.
                 *  All other values are reserved.
                 */
-               val = BMVAL(val, 5, 9);
-               if (!val || val != 0x4) {
+               if (BMVAL(val, 5, 9) != 0x4) {
                        pr_err("%s: CONTEXTIDR_EL1 isn't supported, disable with %s/contextid1=0/\n",
                               CORESIGHT_ETM_PMU_NAME, CORESIGHT_ETM_PMU_NAME);
                        return -EINVAL;
index 860a8b4..a9623b1 100644 (file)
@@ -12,7 +12,7 @@
 #include "arm-spe.h"
 #include "hisi-ptt.h"
 #include "../../../util/pmu.h"
-#include "../cs-etm.h"
+#include "../../../util/cs-etm.h"
 
 struct perf_event_attr
 *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
index d730666..80b9f62 100644 (file)
@@ -29,8 +29,8 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus)
                char path[PATH_MAX];
                FILE *file;
 
-               scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d"MIDR,
-                               sysfs, cpus->map[cpu]);
+               scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d" MIDR,
+                         sysfs, RC_CHK_ACCESS(cpus)->map[cpu].cpu);
 
                file = fopen(path, "r");
                if (!file) {
index fa143ac..ef1ed64 100644 (file)
@@ -18,7 +18,7 @@ static struct perf_pmu *pmu__find_core_pmu(void)
                 * The cpumap should cover all CPUs. Otherwise, some CPUs may
                 * not support some events or have different event IDs.
                 */
-               if (pmu->cpus->nr != cpu__max_cpu().cpu)
+               if (RC_CHK_ACCESS(pmu->cpus)->nr != cpu__max_cpu().cpu)
                        return NULL;
 
                return pmu;
index 7991476..b68f475 100644 (file)
 444  common    landlock_create_ruleset sys_landlock_create_ruleset     sys_landlock_create_ruleset
 445  common    landlock_add_rule       sys_landlock_add_rule           sys_landlock_add_rule
 446  common    landlock_restrict_self  sys_landlock_restrict_self      sys_landlock_restrict_self
-# 447 reserved for memfd_secret
+447  common    memfd_secret            sys_memfd_secret                sys_memfd_secret
 448  common    process_mrelease        sys_process_mrelease            sys_process_mrelease
 449  common    futex_waitv             sys_futex_waitv                 sys_futex_waitv
 450  common    set_mempolicy_home_node sys_set_mempolicy_home_node     sys_set_mempolicy_home_node
index 50ae8bd..6188e19 100644 (file)
@@ -7,7 +7,3 @@ MEMCPY_FN(memcpy_orig,
 MEMCPY_FN(__memcpy,
        "x86-64-movsq",
        "movsq-based memcpy() in arch/x86/lib/memcpy_64.S")
-
-MEMCPY_FN(memcpy_erms,
-       "x86-64-movsb",
-       "movsb-based memcpy() in arch/x86/lib/memcpy_64.S")
index 6eb45a2..1b9fef7 100644 (file)
@@ -2,7 +2,7 @@
 
 /* Various wrappers to make the kernel .S file build in user-space: */
 
-// memcpy_orig and memcpy_erms are being defined as SYM_L_LOCAL but we need it
+// memcpy_orig is being defined as SYM_L_LOCAL but we need it
 #define SYM_FUNC_START_LOCAL(name)                      \
         SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
 #define memcpy MEMCPY /* don't hide glibc's memcpy() */
index dac6d2b..247c72f 100644 (file)
@@ -7,7 +7,3 @@ MEMSET_FN(memset_orig,
 MEMSET_FN(__memset,
        "x86-64-stosq",
        "movsq-based memset() in arch/x86/lib/memset_64.S")
-
-MEMSET_FN(memset_erms,
-       "x86-64-stosb",
-       "movsb-based memset() in arch/x86/lib/memset_64.S")
index 6f093c4..abd26c9 100644 (file)
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-// memset_orig and memset_erms are being defined as SYM_L_LOCAL but we need it
+// memset_orig is being defined as SYM_L_LOCAL but we need it
 #define SYM_FUNC_START_LOCAL(name)                      \
         SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
 #define memset MEMSET /* don't hide glibc's memset() */
index 810e337..f9906f5 100644 (file)
@@ -1175,7 +1175,7 @@ int cmd_ftrace(int argc, const char **argv)
        OPT_BOOLEAN('b', "use-bpf", &ftrace.target.use_bpf,
                    "Use BPF to measure function latency"),
 #endif
-       OPT_BOOLEAN('n', "--use-nsec", &ftrace.use_nsec,
+       OPT_BOOLEAN('n', "use-nsec", &ftrace.use_nsec,
                    "Use nano-second histogram"),
        OPT_PARENT(common_options),
        };
index 006f522..c57be48 100644 (file)
@@ -3647,6 +3647,13 @@ static int process_stat_config_event(struct perf_session *session __maybe_unused
                                     union perf_event *event)
 {
        perf_event__read_stat_config(&stat_config, &event->stat_config);
+
+       /*
+        * Aggregation modes are not used since post-processing scripts are
+        * supposed to take care of such requirements
+        */
+       stat_config.aggr_mode = AGGR_NONE;
+
        return 0;
 }
 
index cc9fa48..b9ad32f 100644 (file)
@@ -667,6 +667,13 @@ static enum counter_recovery stat_handle_error(struct evsel *counter)
                        evsel_list->core.threads->err_thread = -1;
                        return COUNTER_RETRY;
                }
+       } else if (counter->skippable) {
+               if (verbose > 0)
+                       ui__warning("skipping event %s that kernel failed to open .\n",
+                                   evsel__name(counter));
+               counter->supported = false;
+               counter->errored = true;
+               return COUNTER_SKIP;
        }
 
        evsel__open_strerror(counter, &target, errno, msg, sizeof(msg));
@@ -1890,15 +1897,28 @@ static int add_default_attributes(void)
                 * caused by exposing latent bugs. This is fixed properly in:
                 * https://lore.kernel.org/lkml/bff481ba-e60a-763f-0aa0-3ee53302c480@linux.intel.com/
                 */
-               if (metricgroup__has_metric("TopdownL1") && !perf_pmu__has_hybrid() &&
-                   metricgroup__parse_groups(evsel_list, "TopdownL1",
-                                           /*metric_no_group=*/false,
-                                           /*metric_no_merge=*/false,
-                                           /*metric_no_threshold=*/true,
-                                           stat_config.user_requested_cpu_list,
-                                           stat_config.system_wide,
-                                           &stat_config.metric_events) < 0)
-                       return -1;
+               if (metricgroup__has_metric("TopdownL1") && !perf_pmu__has_hybrid()) {
+                       struct evlist *metric_evlist = evlist__new();
+                       struct evsel *metric_evsel;
+
+                       if (!metric_evlist)
+                               return -1;
+
+                       if (metricgroup__parse_groups(metric_evlist, "TopdownL1",
+                                                       /*metric_no_group=*/false,
+                                                       /*metric_no_merge=*/false,
+                                                       /*metric_no_threshold=*/true,
+                                                       stat_config.user_requested_cpu_list,
+                                                       stat_config.system_wide,
+                                                       &stat_config.metric_events) < 0)
+                               return -1;
+
+                       evlist__for_each_entry(metric_evlist, metric_evsel) {
+                               metric_evsel->skippable = true;
+                       }
+                       evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries);
+                       evlist__delete(metric_evlist);
+               }
 
                /* Platform specific attrs */
                if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
index 75d80e7..1f90475 100644 (file)
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls.  Note that uops must be available for consumption in order for this event to count.  If a uop is not available (IQ is empty), this event will not count.   The rest of these subevents count backend stalls, in cycles, due to an outstanding request which is memory bound vs core bound.   The subevents are not slot based events and therefore can not be precisely added or subtracted from the Backend_Bound_Aux subevents which are slot based.",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound_aux",
         "MetricThreshold": "tma_backend_bound_aux > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls.  Note that UOPS must be available for consumption in order for this event to count.  If a uop is not available (IQ is empty), this event will not count.  All of these subevents count backend stalls, in slots, due to a resource limitation.   These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based.  These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation.",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
         "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_base",
         "MetricThreshold": "tma_base > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.05",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.05",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_ms_uops",
         "MetricThreshold": "tma_ms_uops > 0.05",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS).  This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
         "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_aux_group",
         "MetricName": "tma_resource_bound",
         "MetricThreshold": "tma_resource_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls.  Note that uops must be available for consumption in order for this event to count.  If a uop is not available (IQ is empty), this event will not count.",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.75",
+        "MetricgroupNoGroup": "TopdownL1",
         "ScaleUnit": "100%",
         "Unit": "cpu_atom"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: TOPDOWN.BR_MISPREDICT_SLOTS. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 6 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. Sample with: UOPS_RETIRED.HEAVY",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
         "ScaleUnit": "100%",
         "Unit": "cpu_core"
index 1a85d93..0402adb 100644 (file)
@@ -98,6 +98,7 @@
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls.  Note that uops must be available for consumption in order for this event to count.  If a uop is not available (IQ is empty), this event will not count.   The rest of these subevents count backend stalls, in cycles, due to an outstanding request which is memory bound vs core bound.   The subevents are not slot based events and therefore can not be precisely added or subtracted from the Backend_Bound_Aux subevents which are slot based.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound_aux",
         "MetricThreshold": "tma_backend_bound_aux > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls.  Note that UOPS must be available for consumption in order for this event to count.  If a uop is not available (IQ is empty), this event will not count.  All of these subevents count backend stalls, in slots, due to a resource limitation.   These are not cycle based events and therefore can not be precisely added or subtracted from the Backend_Bound subevents which are cycle based.  These subevents are supplementary to Backend_Bound and can be used to analyze results from a resource perspective at allocation.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "Counts the total number of issue slots that were not consumed by the backend because allocation is stalled due to a mispredicted jump or a machine clear. Only issue slots wasted due to fast nukes such as memory ordering nukes are counted. Other nukes are not accounted for. Counts all issue slots blocked during this recovery window including relevant microcode flows and while uops are not yet available in the instruction queue (IQ). Also includes the issue slots that were consumed by the backend but were thrown away because they were younger than the mispredict or machine clear.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_base",
         "MetricThreshold": "tma_base > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.05",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_bad_speculation_group",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.05",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "ScaleUnit": "100%"
     },
     {
         "MetricGroup": "TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_ms_uops",
         "MetricThreshold": "tma_ms_uops > 0.05",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "Counts the number of uops that are from the complex flows issued by the micro-sequencer (MS).  This includes uops from flows due to complex instructions, faults, assists, and inserted flows.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TopdownL2;tma_L2_group;tma_backend_bound_aux_group",
         "MetricName": "tma_resource_bound",
         "MetricThreshold": "tma_resource_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "Counts the total number of issue slots  that were not consumed by the backend due to backend stalls.  Note that uops must be available for consumption in order for this event to count.  If a uop is not available (IQ is empty), this event will not count.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.75",
+        "MetricgroupNoGroup": "TopdownL1",
         "ScaleUnit": "100%"
     },
     {
index 51cf856..f9e2316 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index fb57c73..e9c46d3 100644 (file)
@@ -97,6 +97,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: TOPDOWN.BR_MISPREDICT_SLOTS. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
         "ScaleUnit": "100%"
     },
index 65ec0c9..437b986 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 8f7dc72..875c766 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 2528418..9570a88 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 11f152c..a522202 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index f45ae34..1a2154f 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
         "ScaleUnit": "100%"
     },
index 0f9b174..1ef772b 100644 (file)
@@ -80,6 +80,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
         "ScaleUnit": "100%"
     },
@@ -89,6 +90,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
         "ScaleUnit": "100%"
     },
index 5247f69..11080cc 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 89469b1..65a46d6 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index e8f4e5c..66a6f65 100644 (file)
@@ -76,6 +76,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
@@ -85,6 +86,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
@@ -95,6 +97,7 @@
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 4a99fe5..4b8bc19 100644 (file)
@@ -76,6 +76,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
@@ -85,6 +86,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
@@ -95,6 +97,7 @@
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: RS_EVENTS.EMPTY_END",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 126300b..620fc5b 100644 (file)
@@ -87,6 +87,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
         "ScaleUnit": "100%"
     },
@@ -96,6 +97,7 @@
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: TOPDOWN.BR_MISPREDICT_SLOTS. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 6 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences. Sample with: UOPS_RETIRED.HEAVY",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
         "ScaleUnit": "100%"
     },
index a6d212b..21ef6c9 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index fa2f7f1..eb6f12c 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 4 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.RETIRE_SLOTS",
         "ScaleUnit": "100%"
     },
index 4c80d6b..b442ed4 100644 (file)
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_backend_bound",
         "MetricThreshold": "tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where no uops are being delivered due to a lack of required resources for accepting new uops in the Backend. Backend is the portion of the processor core where the out-of-order scheduler dispatches ready uops into their respective execution units; and once completed these uops get retired according to program order. For example; stalls due to data-cache misses or stalls due to the divider unit being overloaded are both categorized under Backend Bound. Backend Bound is further divided into two main categories: Memory Bound and Core Bound. Sample with: TOPDOWN.BACKEND_BOUND_SLOTS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_bad_speculation",
         "MetricThreshold": "tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots wasted due to incorrect speculations. This include slots used to issue uops that do not eventually get retired and slots for which the issue-pipeline was blocked due to recovery from earlier incorrect speculation. For example; wasted work due to miss-predicted branches are categorized under Bad Speculation category. Incorrect data speculation followed by Memory Ordering Nukes is another example.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;BrMispredicts;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueBM",
         "MetricName": "tma_branch_mispredicts",
         "MetricThreshold": "tma_branch_mispredicts > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Branch Misprediction.  These slots are either wasted by uops fetched from an incorrectly speculated program path; or stalls when the out-of-order part of the machine needs to recover its state from a speculative path. Sample with: BR_MISP_RETIRED.ALL_BRANCHES. Related metrics: tma_info_branch_misprediction_cost, tma_info_mispredictions, tma_mispredicts_resteers",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;Compute;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_core_bound",
         "MetricThreshold": "tma_core_bound > 0.1 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where Core non-memory issues were of a bottleneck.  Shortage in hardware compute resources; or dependencies in software's instructions are both categorized under Core Bound. Hence it may indicate the machine ran out of an out-of-order resource; certain execution units are overloaded or dependencies in program's data- or instruction-flow are limiting the performance (e.g. FP-chained long-latency arithmetic operations).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "FetchBW;Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group;tma_issueFB",
         "MetricName": "tma_fetch_bandwidth",
         "MetricThreshold": "tma_fetch_bandwidth > 0.1 & tma_frontend_bound > 0.15 & tma_info_ipc / 5 > 0.35",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend bandwidth issues.  For example; inefficiencies at the instruction decoders; or restrictions for caching in the DSB (decoded uops cache) are categorized under Fetch Bandwidth. In such cases; the Frontend typically delivers suboptimal amount of uops to the Backend. Sample with: FRONTEND_RETIRED.LATENCY_GE_2_BUBBLES_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_1_PS;FRONTEND_RETIRED.LATENCY_GE_2_PS. Related metrics: tma_dsb_switches, tma_info_dsb_coverage, tma_info_dsb_misses, tma_info_iptb, tma_lcp",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Frontend;TmaL2;TopdownL2;tma_L2_group;tma_frontend_bound_group",
         "MetricName": "tma_fetch_latency",
         "MetricThreshold": "tma_fetch_latency > 0.1 & tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU was stalled due to Frontend latency issues.  For example; instruction-cache misses; iTLB misses or fetch stalls after a branch misprediction are categorized under Frontend Latency. In such cases; the Frontend eventually delivers no uops for some period. Sample with: FRONTEND_RETIRED.LATENCY_GE_16_PS;FRONTEND_RETIRED.LATENCY_GE_8_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "PGO;TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_frontend_bound",
         "MetricThreshold": "tma_frontend_bound > 0.15",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots where the processor's Frontend undersupplies its Backend. Frontend denotes the first part of the processor core responsible to fetch operations that are executed later on by the Backend part. Within the Frontend; a branch predictor predicts the next address to fetch; cache-lines are fetched from the memory subsystem; parsed into instructions; and lastly decoded into micro-operations (uops). Ideally the Frontend can issue Pipeline_Width uops every cycle to the Backend. Frontend Bound denotes unutilized issue-slots when there is no Backend stall; i.e. bubbles where Frontend delivered no uops while Backend could have accepted them. For example; stalls due to instruction-cache misses would be categorized under Frontend Bound. Sample with: FRONTEND_RETIRED.LATENCY_GE_4_PS",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_heavy_operations",
         "MetricThreshold": "tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring heavy-weight operations -- instructions that require two or more uops or micro-coded sequences. This highly-correlates with the uop length of these instructions/sequences.",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Retire;TmaL2;TopdownL2;tma_L2_group;tma_retiring_group",
         "MetricName": "tma_light_operations",
         "MetricThreshold": "tma_light_operations > 0.6",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots where the CPU was retiring light-weight operations -- instructions that require no more than one uop (micro-operation). This correlates with total number of instructions used by the program. A uops-per-instruction (see UopPI metric) ratio of 1 or less should be expected for decently optimized software running on Intel Core/Xeon products. While this often indicates efficient X86 instructions were executed; high value does not necessarily mean better performance cannot be achieved. Sample with: INST_RETIRED.PREC_DIST",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "BadSpec;MachineClears;TmaL2;TopdownL2;tma_L2_group;tma_bad_speculation_group;tma_issueMC;tma_issueSyncxn",
         "MetricName": "tma_machine_clears",
         "MetricThreshold": "tma_machine_clears > 0.1 & tma_bad_speculation > 0.15",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the CPU has wasted due to Machine Clears.  These slots are either wasted by uops fetched prior to the clear; or stalls the out-of-order portion of the machine needs to recover its state after the clear. For example; this can happen due to memory ordering Nukes (e.g. Memory Disambiguation) or Self-Modifying-Code (SMC) nukes. Sample with: MACHINE_CLEARS.COUNT. Related metrics: tma_clears_resteers, tma_contested_accesses, tma_data_sharing, tma_false_sharing, tma_l1_bound, tma_microcode_sequencer, tma_ms_switches, tma_remote_cache",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "Backend;TmaL2;TopdownL2;tma_L2_group;tma_backend_bound_group",
         "MetricName": "tma_memory_bound",
         "MetricThreshold": "tma_memory_bound > 0.2 & tma_backend_bound > 0.2",
+        "MetricgroupNoGroup": "TopdownL2",
         "PublicDescription": "This metric represents fraction of slots the Memory subsystem within the Backend was a bottleneck.  Memory Bound estimates fraction of slots where pipeline is likely stalled due to demand load or store instructions. This accounts mainly for (1) non-completed in-flight memory demand loads which coincides with execution units starvation; in addition to (2) cases where stores could impose backpressure on the pipeline when many of them get buffered at the same time (less common out of the two).",
         "ScaleUnit": "100%"
     },
         "MetricGroup": "TmaL1;TopdownL1;tma_L1_group",
         "MetricName": "tma_retiring",
         "MetricThreshold": "tma_retiring > 0.7 | tma_heavy_operations > 0.1",
+        "MetricgroupNoGroup": "TopdownL1",
         "PublicDescription": "This category represents fraction of slots utilized by useful work i.e. issued uops that eventually get retired. Ideally; all pipeline slots would be attributed to the Retiring category.  Retiring of 100% would indicate the maximum Pipeline_Width throughput was achieved.  Maximizing Retiring typically increases the Instructions-per-cycle (see IPC metric). Note that a high Retiring value does not necessary mean there is no room for more performance.  For example; Heavy-operations or Microcode Assists are categorized under Retiring. They often indicate suboptimal performance and can often be optimized or avoided. Sample with: UOPS_RETIRED.SLOTS",
         "ScaleUnit": "100%"
     },
index ca99b9c..f57a8f2 100755 (executable)
@@ -52,7 +52,8 @@ _json_event_attributes = [
 # Attributes that are in pmu_metric rather than pmu_event.
 _json_metric_attributes = [
     'metric_name', 'metric_group', 'metric_expr', 'metric_threshold', 'desc',
-    'long_desc', 'unit', 'compat', 'aggr_mode', 'event_grouping'
+    'long_desc', 'unit', 'compat', 'metricgroup_no_group', 'aggr_mode',
+    'event_grouping'
 ]
 # Attributes that are bools or enum int values, encoded as '0', '1',...
 _json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
@@ -303,6 +304,7 @@ class JsonEvent:
     self.deprecated = jd.get('Deprecated')
     self.metric_name = jd.get('MetricName')
     self.metric_group = jd.get('MetricGroup')
+    self.metricgroup_no_group = jd.get('MetricgroupNoGroup')
     self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint'))
     self.metric_expr = None
     if 'MetricExpr' in jd:
index b7dff8f..8034968 100644 (file)
@@ -59,6 +59,7 @@ struct pmu_metric {
        const char *compat;
        const char *desc;
        const char *long_desc;
+       const char *metricgroup_no_group;
        enum aggr_mode_class aggr_mode;
        enum metric_event_groups event_grouping;
 };
index ccfef86..e890c26 100644 (file)
@@ -152,7 +152,7 @@ def parse_version(version):
 #   - expected values assignments
 class Test(object):
     def __init__(self, path, options):
-        parser = configparser.SafeConfigParser()
+        parser = configparser.ConfigParser()
         parser.read(path)
 
         log.warning("running '%s'" % path)
@@ -247,7 +247,7 @@ class Test(object):
         return True
 
     def load_events(self, path, events):
-        parser_event = configparser.SafeConfigParser()
+        parser_event = configparser.ConfigParser()
         parser_event.read(path)
 
         # The event record section header contains 'event' word,
@@ -261,7 +261,7 @@ class Test(object):
             # Read parent event if there's any
             if (':' in section):
                 base = section[section.index(':') + 1:]
-                parser_base = configparser.SafeConfigParser()
+                parser_base = configparser.ConfigParser()
                 parser_base.read(self.test_dir + '/' + base)
                 base_items = parser_base.items('event')
 
index a21fb65..fccd8ec 100644 (file)
@@ -16,7 +16,7 @@ pinned=0
 exclusive=0
 exclude_user=0
 exclude_kernel=0|1
-exclude_hv=0
+exclude_hv=0|1
 exclude_idle=0
 mmap=0
 comm=0
index d8ea6a8..a1e2da0 100644 (file)
@@ -40,7 +40,6 @@ fd=6
 type=0
 config=7
 optional=1
-
 # PERF_TYPE_HARDWARE / PERF_COUNT_HW_STALLED_CYCLES_BACKEND
 [event7:base-stat]
 fd=7
@@ -89,79 +88,98 @@ enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
 [event13:base-stat]
 fd=13
 group_fd=11
 type=4
-config=33024
+config=33280
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
 [event14:base-stat]
 fd=14
 group_fd=11
 type=4
-config=33280
+config=33536
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
 [event15:base-stat]
 fd=15
 group_fd=11
 type=4
-config=33536
+config=33024
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
 [event16:base-stat]
 fd=16
-group_fd=11
 type=4
-config=33792
-disabled=0
-enable_on_exec=0
-read_format=15
+config=4109
 optional=1
 
-# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
 [event17:base-stat]
 fd=17
-group_fd=11
 type=4
-config=34048
-disabled=0
-enable_on_exec=0
-read_format=15
+config=17039629
 optional=1
 
-# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
 [event18:base-stat]
 fd=18
-group_fd=11
 type=4
-config=34304
-disabled=0
-enable_on_exec=0
-read_format=15
+config=60
 optional=1
 
-# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
 [event19:base-stat]
 fd=19
-group_fd=11
 type=4
-config=34560
-disabled=0
-enable_on_exec=0
-read_format=15
+config=2097421
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+[event20:base-stat]
+fd=20
+type=4
+config=316
+optional=1
+
+# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
+[event21:base-stat]
+fd=21
+type=4
+config=412
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
+[event22:base-stat]
+fd=22
+type=4
+config=572
+optional=1
+
+# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
+[event23:base-stat]
+fd=23
+type=4
+config=706
+optional=1
+
+# PERF_TYPE_RAW / UOPS_ISSUED.ANY
+[event24:base-stat]
+fd=24
+type=4
+config=270
 optional=1
index b656ab9..1c52cb0 100644 (file)
@@ -90,89 +90,108 @@ enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
 [event13:base-stat]
 fd=13
 group_fd=11
 type=4
-config=33024
+config=33280
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
 [event14:base-stat]
 fd=14
 group_fd=11
 type=4
-config=33280
+config=33536
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
 [event15:base-stat]
 fd=15
 group_fd=11
 type=4
-config=33536
+config=33024
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
 [event16:base-stat]
 fd=16
-group_fd=11
 type=4
-config=33792
-disabled=0
-enable_on_exec=0
-read_format=15
+config=4109
 optional=1
 
-# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
 [event17:base-stat]
 fd=17
-group_fd=11
 type=4
-config=34048
-disabled=0
-enable_on_exec=0
-read_format=15
+config=17039629
 optional=1
 
-# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
 [event18:base-stat]
 fd=18
-group_fd=11
 type=4
-config=34304
-disabled=0
-enable_on_exec=0
-read_format=15
+config=60
 optional=1
 
-# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
 [event19:base-stat]
 fd=19
-group_fd=11
 type=4
-config=34560
-disabled=0
-enable_on_exec=0
-read_format=15
+config=2097421
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+[event20:base-stat]
+fd=20
+type=4
+config=316
+optional=1
+
+# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
+[event21:base-stat]
+fd=21
+type=4
+config=412
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
+[event22:base-stat]
+fd=22
+type=4
+config=572
+optional=1
+
+# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
+[event23:base-stat]
+fd=23
+type=4
+config=706
+optional=1
+
+# PERF_TYPE_RAW / UOPS_ISSUED.ANY
+[event24:base-stat]
+fd=24
+type=4
+config=270
 optional=1
 
 # PERF_TYPE_HW_CACHE /
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event20:base-stat]
-fd=20
+[event25:base-stat]
+fd=25
 type=3
 config=0
 optional=1
@@ -181,8 +200,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event21:base-stat]
-fd=21
+[event26:base-stat]
+fd=26
 type=3
 config=65536
 optional=1
@@ -191,8 +210,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event22:base-stat]
-fd=22
+[event27:base-stat]
+fd=27
 type=3
 config=2
 optional=1
@@ -201,8 +220,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event23:base-stat]
-fd=23
+[event28:base-stat]
+fd=28
 type=3
 config=65538
 optional=1
index 9762509..7e961d2 100644 (file)
@@ -90,89 +90,108 @@ enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
 [event13:base-stat]
 fd=13
 group_fd=11
 type=4
-config=33024
+config=33280
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
 [event14:base-stat]
 fd=14
 group_fd=11
 type=4
-config=33280
+config=33536
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
 [event15:base-stat]
 fd=15
 group_fd=11
 type=4
-config=33536
+config=33024
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
 [event16:base-stat]
 fd=16
-group_fd=11
 type=4
-config=33792
-disabled=0
-enable_on_exec=0
-read_format=15
+config=4109
 optional=1
 
-# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
 [event17:base-stat]
 fd=17
-group_fd=11
 type=4
-config=34048
-disabled=0
-enable_on_exec=0
-read_format=15
+config=17039629
 optional=1
 
-# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
 [event18:base-stat]
 fd=18
-group_fd=11
 type=4
-config=34304
-disabled=0
-enable_on_exec=0
-read_format=15
+config=60
 optional=1
 
-# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
 [event19:base-stat]
 fd=19
-group_fd=11
 type=4
-config=34560
-disabled=0
-enable_on_exec=0
-read_format=15
+config=2097421
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+[event20:base-stat]
+fd=20
+type=4
+config=316
+optional=1
+
+# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
+[event21:base-stat]
+fd=21
+type=4
+config=412
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
+[event22:base-stat]
+fd=22
+type=4
+config=572
+optional=1
+
+# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
+[event23:base-stat]
+fd=23
+type=4
+config=706
+optional=1
+
+# PERF_TYPE_RAW / UOPS_ISSUED.ANY
+[event24:base-stat]
+fd=24
+type=4
+config=270
 optional=1
 
 # PERF_TYPE_HW_CACHE /
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event20:base-stat]
-fd=20
+[event25:base-stat]
+fd=25
 type=3
 config=0
 optional=1
@@ -181,8 +200,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event21:base-stat]
-fd=21
+[event26:base-stat]
+fd=26
 type=3
 config=65536
 optional=1
@@ -191,8 +210,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event22:base-stat]
-fd=22
+[event27:base-stat]
+fd=27
 type=3
 config=2
 optional=1
@@ -201,8 +220,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event23:base-stat]
-fd=23
+[event28:base-stat]
+fd=28
 type=3
 config=65538
 optional=1
@@ -211,8 +230,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event24:base-stat]
-fd=24
+[event29:base-stat]
+fd=29
 type=3
 config=1
 optional=1
@@ -221,8 +240,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event25:base-stat]
-fd=25
+[event30:base-stat]
+fd=30
 type=3
 config=65537
 optional=1
@@ -231,8 +250,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event26:base-stat]
-fd=26
+[event31:base-stat]
+fd=31
 type=3
 config=3
 optional=1
@@ -241,8 +260,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event27:base-stat]
-fd=27
+[event32:base-stat]
+fd=32
 type=3
 config=65539
 optional=1
@@ -251,8 +270,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event28:base-stat]
-fd=28
+[event33:base-stat]
+fd=33
 type=3
 config=4
 optional=1
@@ -261,8 +280,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event29:base-stat]
-fd=29
+[event34:base-stat]
+fd=34
 type=3
 config=65540
 optional=1
index d555042..e50535f 100644 (file)
@@ -90,89 +90,108 @@ enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
+# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
 [event13:base-stat]
 fd=13
 group_fd=11
 type=4
-config=33024
+config=33280
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-fe-bound (0x8200)
+# PERF_TYPE_RAW / topdown-be-bound (0x8300)
 [event14:base-stat]
 fd=14
 group_fd=11
 type=4
-config=33280
+config=33536
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-be-bound (0x8300)
+# PERF_TYPE_RAW / topdown-bad-spec (0x8100)
 [event15:base-stat]
 fd=15
 group_fd=11
 type=4
-config=33536
+config=33024
 disabled=0
 enable_on_exec=0
 read_format=15
 optional=1
 
-# PERF_TYPE_RAW / topdown-heavy-ops (0x8400)
+# PERF_TYPE_RAW / INT_MISC.UOP_DROPPING
 [event16:base-stat]
 fd=16
-group_fd=11
 type=4
-config=33792
-disabled=0
-enable_on_exec=0
-read_format=15
+config=4109
 optional=1
 
-# PERF_TYPE_RAW / topdown-br-mispredict (0x8500)
+# PERF_TYPE_RAW / cpu/INT_MISC.RECOVERY_CYCLES,cmask=1,edge/
 [event17:base-stat]
 fd=17
-group_fd=11
 type=4
-config=34048
-disabled=0
-enable_on_exec=0
-read_format=15
+config=17039629
 optional=1
 
-# PERF_TYPE_RAW / topdown-fetch-lat (0x8600)
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.THREAD
 [event18:base-stat]
 fd=18
-group_fd=11
 type=4
-config=34304
-disabled=0
-enable_on_exec=0
-read_format=15
+config=60
 optional=1
 
-# PERF_TYPE_RAW / topdown-mem-bound (0x8700)
+# PERF_TYPE_RAW / INT_MISC.RECOVERY_CYCLES_ANY
 [event19:base-stat]
 fd=19
-group_fd=11
 type=4
-config=34560
-disabled=0
-enable_on_exec=0
-read_format=15
+config=2097421
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.REF_XCLK
+[event20:base-stat]
+fd=20
+type=4
+config=316
+optional=1
+
+# PERF_TYPE_RAW / IDQ_UOPS_NOT_DELIVERED.CORE
+[event21:base-stat]
+fd=21
+type=4
+config=412
+optional=1
+
+# PERF_TYPE_RAW / CPU_CLK_UNHALTED.ONE_THREAD_ACTIVE
+[event22:base-stat]
+fd=22
+type=4
+config=572
+optional=1
+
+# PERF_TYPE_RAW / UOPS_RETIRED.RETIRE_SLOTS
+[event23:base-stat]
+fd=23
+type=4
+config=706
+optional=1
+
+# PERF_TYPE_RAW / UOPS_ISSUED.ANY
+[event24:base-stat]
+fd=24
+type=4
+config=270
 optional=1
 
 # PERF_TYPE_HW_CACHE /
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event20:base-stat]
-fd=20
+[event25:base-stat]
+fd=25
 type=3
 config=0
 optional=1
@@ -181,8 +200,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event21:base-stat]
-fd=21
+[event26:base-stat]
+fd=26
 type=3
 config=65536
 optional=1
@@ -191,8 +210,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event22:base-stat]
-fd=22
+[event27:base-stat]
+fd=27
 type=3
 config=2
 optional=1
@@ -201,8 +220,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_LL                 <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event23:base-stat]
-fd=23
+[event28:base-stat]
+fd=28
 type=3
 config=65538
 optional=1
@@ -211,8 +230,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event24:base-stat]
-fd=24
+[event29:base-stat]
+fd=29
 type=3
 config=1
 optional=1
@@ -221,8 +240,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1I                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event25:base-stat]
-fd=25
+[event30:base-stat]
+fd=30
 type=3
 config=65537
 optional=1
@@ -231,8 +250,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event26:base-stat]
-fd=26
+[event31:base-stat]
+fd=31
 type=3
 config=3
 optional=1
@@ -241,8 +260,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_DTLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event27:base-stat]
-fd=27
+[event32:base-stat]
+fd=32
 type=3
 config=65539
 optional=1
@@ -251,8 +270,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event28:base-stat]
-fd=28
+[event33:base-stat]
+fd=33
 type=3
 config=4
 optional=1
@@ -261,8 +280,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_ITLB               <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_READ            <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event29:base-stat]
-fd=29
+[event34:base-stat]
+fd=34
 type=3
 config=65540
 optional=1
@@ -271,8 +290,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_ACCESS      << 16)
-[event30:base-stat]
-fd=30
+[event35:base-stat]
+fd=35
 type=3
 config=512
 optional=1
@@ -281,8 +300,8 @@ optional=1
 #  PERF_COUNT_HW_CACHE_L1D                <<  0  |
 # (PERF_COUNT_HW_CACHE_OP_PREFETCH        <<  8) |
 # (PERF_COUNT_HW_CACHE_RESULT_MISS        << 16)
-[event31:base-stat]
-fd=31
+[event36:base-stat]
+fd=36
 type=3
 config=66048
 optional=1
index cbf0e0c..733ead1 100644 (file)
@@ -120,7 +120,8 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u
 
        p = "FOO/0";
        ret = expr__parse(&val, ctx, p);
-       TEST_ASSERT_VAL("division by zero", ret == -1);
+       TEST_ASSERT_VAL("division by zero", ret == 0);
+       TEST_ASSERT_VAL("division by zero", isnan(val));
 
        p = "BAR/";
        ret = expr__parse(&val, ctx, p);
index 1185b79..c05148e 100644 (file)
@@ -38,6 +38,7 @@ static void load_runtime_stat(struct evlist *evlist, struct value *vals)
        evlist__alloc_aggr_stats(evlist, 1);
        evlist__for_each_entry(evlist, evsel) {
                count = find_value(evsel->name, vals);
+               evsel->supported = true;
                evsel->stats->aggr->counts.val = count;
                if (evsel__name_is(evsel, "duration_time"))
                        update_stats(&walltime_nsecs_stats, count);
index 2c1d3f7..b154fbb 100755 (executable)
@@ -28,6 +28,18 @@ test_stat_record_report() {
   echo "stat record and report test [Success]"
 }
 
+test_stat_record_script() {
+  echo "stat record and script test"
+  if ! perf stat record -o - true | perf script -i - 2>&1 | \
+    grep -E -q "CPU[[:space:]]+THREAD[[:space:]]+VAL[[:space:]]+ENA[[:space:]]+RUN[[:space:]]+TIME[[:space:]]+EVENT"
+  then
+    echo "stat record and script test [Failed]"
+    err=1
+    return
+  fi
+  echo "stat record and script test [Success]"
+}
+
 test_stat_repeat_weak_groups() {
   echo "stat repeat weak groups test"
   if ! perf stat -e '{cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles,cycles}' \
@@ -93,6 +105,7 @@ test_topdown_weak_groups() {
 
 test_default_stat
 test_stat_record_report
+test_stat_record_script
 test_stat_repeat_weak_groups
 test_topdown_groups
 test_topdown_weak_groups
index 4ddb17c..3a8b9bf 100755 (executable)
@@ -506,6 +506,13 @@ test_sample()
                echo "perf record failed with --aux-sample"
                return 1
        fi
+       # Check with event with PMU name
+       if perf_record_no_decode -o "${perfdatafile}" -e br_misp_retired.all_branches:u uname ; then
+               if ! perf_record_no_decode -o "${perfdatafile}" -e '{intel_pt//,br_misp_retired.all_branches/aux-sample-size=8192/}:u' uname ; then
+                       echo "perf record failed with --aux-sample-size"
+                       return 1
+               fi
+       fi
        echo OK
        return 0
 }
index 90cea88..499539d 100755 (executable)
@@ -56,7 +56,7 @@ if [ $? -ne 0 ]; then
        exit 1
 fi
 
-if ! perf inject -i $PERF_DATA -o $PERF_INJ_DATA -j; then
+if ! DEBUGINFOD_URLS='' perf inject -i $PERF_DATA -o $PERF_INJ_DATA -j; then
        echo "Fail to inject samples"
        exit 1
 fi
index fe022ca..a211348 100644 (file)
 
 static DEFINE_STRARRAY_OFFSET(x86_arch_prctl_codes_1, "ARCH_", x86_arch_prctl_codes_1_offset);
 static DEFINE_STRARRAY_OFFSET(x86_arch_prctl_codes_2, "ARCH_", x86_arch_prctl_codes_2_offset);
+static DEFINE_STRARRAY_OFFSET(x86_arch_prctl_codes_3, "ARCH_", x86_arch_prctl_codes_3_offset);
 
 static struct strarray *x86_arch_prctl_codes[] = {
        &strarray__x86_arch_prctl_codes_1,
        &strarray__x86_arch_prctl_codes_2,
+       &strarray__x86_arch_prctl_codes_3,
 };
 
 static DEFINE_STRARRAYS(x86_arch_prctl_codes);
index 57fa6aa..fd5c740 100755 (executable)
@@ -24,3 +24,4 @@ print_range () {
 
 print_range 1 0x1 0x1001
 print_range 2 0x2 0x2001
+print_range 3 0x4 0x4001
index bd18fe5..f9df1df 100644 (file)
@@ -214,7 +214,7 @@ perf-$(CONFIG_ZSTD) += zstd.o
 
 perf-$(CONFIG_LIBCAP) += cap.o
 
-perf-y += demangle-cxx.o
+perf-$(CONFIG_CXX_DEMANGLE) += demangle-cxx.o
 perf-y += demangle-ocaml.o
 perf-y += demangle-java.o
 perf-y += demangle-rust.o
index 8d3cfbb..1d48226 100644 (file)
@@ -416,6 +416,8 @@ int contention_end(u64 *ctx)
        return 0;
 }
 
+struct rq {};
+
 extern struct rq runqueues __ksym;
 
 struct rq___old {
index cffe493..fb94f52 100644 (file)
@@ -25,7 +25,7 @@ struct perf_sample_data___new {
 } __attribute__((preserve_access_index));
 
 /* new kernel perf_mem_data_src definition */
-union perf_mem_data_src__new {
+union perf_mem_data_src___new {
        __u64 val;
        struct {
                __u64   mem_op:5,       /* type of opcode */
@@ -108,7 +108,7 @@ static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx,
                if (entry->part == 7)
                        return kctx->data->data_src.mem_blk;
                if (entry->part == 8) {
-                       union perf_mem_data_src__new *data = (void *)&kctx->data->data_src;
+                       union perf_mem_data_src___new *data = (void *)&kctx->data->data_src;
 
                        if (bpf_core_field_exists(data->mem_hops))
                                return data->mem_hops;
index 449b1ea..c7ed51b 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __VMLINUX_H
 #define __VMLINUX_H
 
+#include <linux/stddef.h> // for define __always_inline
 #include <linux/bpf.h>
 #include <linux/types.h>
 #include <linux/perf_event.h>
index 70cac03..ecca407 100644 (file)
@@ -227,6 +227,19 @@ struct cs_etm_packet_queue {
 #define INFO_HEADER_SIZE (sizeof(((struct perf_record_auxtrace_info *)0)->type) + \
                          sizeof(((struct perf_record_auxtrace_info *)0)->reserved__))
 
+/* CoreSight trace ID is currently the bottom 7 bits of the value */
+#define CORESIGHT_TRACE_ID_VAL_MASK    GENMASK(6, 0)
+
+/*
+ * perf record will set the legacy meta data values as unused initially.
+ * This allows perf report to manage the decoders created when dynamic
+ * allocation in operation.
+ */
+#define CORESIGHT_TRACE_ID_UNUSED_FLAG BIT(31)
+
+/* Value to set for unused trace ID values */
+#define CORESIGHT_TRACE_ID_UNUSED_VAL  0x7F
+
 int cs_etm__process_auxtrace_info(union perf_event *event,
                                  struct perf_session *session);
 struct perf_event_attr *cs_etm_get_default_config(struct perf_pmu *pmu);
index 356c07f..c2dbb56 100644 (file)
@@ -282,6 +282,7 @@ void evsel__init(struct evsel *evsel,
        evsel->bpf_fd      = -1;
        INIT_LIST_HEAD(&evsel->config_terms);
        INIT_LIST_HEAD(&evsel->bpf_counter_list);
+       INIT_LIST_HEAD(&evsel->bpf_filters);
        perf_evsel__object.init(evsel);
        evsel->sample_size = __evsel__sample_size(attr->sample_type);
        evsel__calc_id_pos(evsel);
@@ -290,6 +291,7 @@ void evsel__init(struct evsel *evsel,
        evsel->per_pkg_mask  = NULL;
        evsel->collect_stat  = false;
        evsel->pmu_name      = NULL;
+       evsel->skippable     = false;
 }
 
 struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
@@ -828,26 +830,26 @@ bool evsel__name_is(struct evsel *evsel, const char *name)
 
 const char *evsel__group_pmu_name(const struct evsel *evsel)
 {
-       const struct evsel *leader;
+       struct evsel *leader = evsel__leader(evsel);
+       struct evsel *pos;
 
-       /* If the pmu_name is set use it. pmu_name isn't set for CPU and software events. */
-       if (evsel->pmu_name)
-               return evsel->pmu_name;
        /*
         * Software events may be in a group with other uncore PMU events. Use
-        * the pmu_name of the group leader to avoid breaking the software event
-        * out of the group.
+        * the pmu_name of the first non-software event to avoid breaking the
+        * software event out of the group.
         *
         * Aux event leaders, like intel_pt, expect a group with events from
         * other PMUs, so substitute the AUX event's PMU in this case.
         */
-       leader  = evsel__leader(evsel);
-       if ((evsel->core.attr.type == PERF_TYPE_SOFTWARE || evsel__is_aux_event(leader)) &&
-           leader->pmu_name) {
-               return leader->pmu_name;
+       if (evsel->core.attr.type == PERF_TYPE_SOFTWARE || evsel__is_aux_event(leader)) {
+               /* Starting with the leader, find the first event with a named PMU. */
+               for_each_group_evsel(pos, leader) {
+                       if (pos->pmu_name)
+                               return pos->pmu_name;
+               }
        }
 
-       return "cpu";
+       return evsel->pmu_name ?: "cpu";
 }
 
 const char *evsel__metric_id(const struct evsel *evsel)
@@ -1725,9 +1727,13 @@ static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
                return -1;
 
        fd = FD(leader, cpu_map_idx, thread);
-       BUG_ON(fd == -1);
+       BUG_ON(fd == -1 && !leader->skippable);
 
-       return fd;
+       /*
+        * When the leader has been skipped, return -2 to distinguish from no
+        * group leader case.
+        */
+       return fd == -1 ? -2 : fd;
 }
 
 static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx)
@@ -2109,6 +2115,12 @@ retry_open:
 
                        group_fd = get_group_fd(evsel, idx, thread);
 
+                       if (group_fd == -2) {
+                               pr_debug("broken group leader for %s\n", evsel->name);
+                               err = -EINVAL;
+                               goto out_close;
+                       }
+
                        test_attr__ready();
 
                        /* Debug message used by test scripts */
index d575390..0f54f28 100644 (file)
@@ -95,6 +95,7 @@ struct evsel {
                bool                    weak_group;
                bool                    bpf_counter;
                bool                    use_config_name;
+               bool                    skippable;
                int                     bpf_fd;
                struct bpf_object       *bpf_obj;
                struct list_head        config_terms;
@@ -150,10 +151,8 @@ struct evsel {
         */
        struct bpf_counter_ops  *bpf_counter_ops;
 
-       union {
-               struct list_head        bpf_counter_list; /* for perf-stat -b */
-               struct list_head        bpf_filters; /* for perf-record --filter */
-       };
+       struct list_head        bpf_counter_list; /* for perf-stat -b */
+       struct list_head        bpf_filters; /* for perf-record --filter */
 
        /* for perf-stat --use-bpf */
        int                     bperf_leader_prog_fd;
index 250e444..4ce931c 100644 (file)
@@ -225,7 +225,11 @@ expr: NUMBER
 {
        if (fpclassify($3.val) == FP_ZERO) {
                pr_debug("division by zero\n");
-               YYABORT;
+               assert($3.ids == NULL);
+               if (compute_ids)
+                       ids__free($1.ids);
+               $$.val = NAN;
+               $$.ids = NULL;
        } else if (!compute_ids || (is_const($1.val) && is_const($3.val))) {
                assert($1.ids == NULL);
                assert($3.ids == NULL);
index c566c68..5e9c657 100644 (file)
@@ -1144,12 +1144,12 @@ static int metricgroup__add_metric_callback(const struct pmu_metric *pm,
        struct metricgroup__add_metric_data *data = vdata;
        int ret = 0;
 
-       if (pm->metric_expr &&
-               (match_metric(pm->metric_group, data->metric_name) ||
-                match_metric(pm->metric_name, data->metric_name))) {
+       if (pm->metric_expr && match_pm_metric(pm, data->metric_name)) {
+               bool metric_no_group = data->metric_no_group ||
+                       match_metric(data->metric_name, pm->metricgroup_no_group);
 
                data->has_match = true;
-               ret = add_metric(data->list, pm, data->modifier, data->metric_no_group,
+               ret = add_metric(data->list, pm, data->modifier, metric_no_group,
                                 data->metric_no_threshold, data->user_requested_cpu_list,
                                 data->system_wide, /*root_metric=*/NULL,
                                 /*visited_metrics=*/NULL, table);
@@ -1672,7 +1672,7 @@ static int metricgroup__topdown_max_level_callback(const struct pmu_metric *pm,
 {
        unsigned int *max_level = data;
        unsigned int level;
-       const char *p = strstr(pm->metric_group, "TopdownL");
+       const char *p = strstr(pm->metric_group ?: "", "TopdownL");
 
        if (!p || p[8] == '\0')
                return 0;
index d71019d..34ba840 100644 (file)
@@ -2140,25 +2140,32 @@ static int evlist__cmp(void *state, const struct list_head *l, const struct list
        int *leader_idx = state;
        int lhs_leader_idx = *leader_idx, rhs_leader_idx = *leader_idx, ret;
        const char *lhs_pmu_name, *rhs_pmu_name;
+       bool lhs_has_group = false, rhs_has_group = false;
 
        /*
         * First sort by grouping/leader. Read the leader idx only if the evsel
         * is part of a group, as -1 indicates no group.
         */
-       if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1)
+       if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) {
+               lhs_has_group = true;
                lhs_leader_idx = lhs_core->leader->idx;
-       if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1)
+       }
+       if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) {
+               rhs_has_group = true;
                rhs_leader_idx = rhs_core->leader->idx;
+       }
 
        if (lhs_leader_idx != rhs_leader_idx)
                return lhs_leader_idx - rhs_leader_idx;
 
-       /* Group by PMU. Groups can't span PMUs. */
-       lhs_pmu_name = evsel__group_pmu_name(lhs);
-       rhs_pmu_name = evsel__group_pmu_name(rhs);
-       ret = strcmp(lhs_pmu_name, rhs_pmu_name);
-       if (ret)
-               return ret;
+       /* Group by PMU if there is a group. Groups can't span PMUs. */
+       if (lhs_has_group && rhs_has_group) {
+               lhs_pmu_name = evsel__group_pmu_name(lhs);
+               rhs_pmu_name = evsel__group_pmu_name(rhs);
+               ret = strcmp(lhs_pmu_name, rhs_pmu_name);
+               if (ret)
+                       return ret;
+       }
 
        /* Architecture specific sorting. */
        return arch_evlist__cmp(lhs, rhs);
index 73b2ff2..bf5a6c1 100644 (file)
@@ -431,7 +431,7 @@ static void print_metric_json(struct perf_stat_config *config __maybe_unused,
        struct outstate *os = ctx;
        FILE *out = os->fh;
 
-       fprintf(out, "\"metric-value\" : %f, ", val);
+       fprintf(out, "\"metric-value\" : \"%f\", ", val);
        fprintf(out, "\"metric-unit\" : \"%s\"", unit);
        if (!config->metric_only)
                fprintf(out, "}");
index eeccab6..1566a20 100644 (file)
@@ -403,12 +403,25 @@ static int prepare_metric(struct evsel **metric_events,
                        if (!aggr)
                                break;
 
-                       /*
-                        * If an event was scaled during stat gathering, reverse
-                        * the scale before computing the metric.
-                        */
-                       val = aggr->counts.val * (1.0 / metric_events[i]->scale);
-                       source_count = evsel__source_count(metric_events[i]);
+                        if (!metric_events[i]->supported) {
+                               /*
+                                * Not supported events will have a count of 0,
+                                * which can be confusing in a
+                                * metric. Explicitly set the value to NAN. Not
+                                * counted events (enable time of 0) are read as
+                                * 0.
+                                */
+                               val = NAN;
+                               source_count = 0;
+                       } else {
+                               /*
+                                * If an event was scaled during stat gathering,
+                                * reverse the scale before computing the
+                                * metric.
+                                */
+                               val = aggr->counts.val * (1.0 / metric_events[i]->scale);
+                               source_count = evsel__source_count(metric_events[i]);
+                       }
                }
                n = strdup(evsel__metric_id(metric_events[i]));
                if (!n)
index b2ed9cc..63882a4 100644 (file)
 #include <bfd.h>
 #endif
 
+#if defined(HAVE_LIBBFD_SUPPORT) || defined(HAVE_CPLUS_DEMANGLE_SUPPORT)
+#ifndef DMGL_PARAMS
+#define DMGL_PARAMS     (1 << 0)  /* Include function args */
+#define DMGL_ANSI       (1 << 1)  /* Include const, volatile, etc */
+#endif
+#endif
+
 #ifndef EM_AARCH64
 #define EM_AARCH64     183  /* ARM 64 bit */
 #endif
@@ -271,6 +278,26 @@ static bool want_demangle(bool is_kernel_sym)
        return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
 }
 
+/*
+ * Demangle C++ function signature, typically replaced by demangle-cxx.cpp
+ * version.
+ */
+__weak char *cxx_demangle_sym(const char *str __maybe_unused, bool params __maybe_unused,
+                             bool modifiers __maybe_unused)
+{
+#ifdef HAVE_LIBBFD_SUPPORT
+       int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
+
+       return bfd_demangle(NULL, str, flags);
+#elif defined(HAVE_CPLUS_DEMANGLE_SUPPORT)
+       int flags = (params ? DMGL_PARAMS : 0) | (modifiers ? DMGL_ANSI : 0);
+
+       return cplus_demangle(str, flags);
+#else
+       return NULL;
+#endif
+}
+
 static char *demangle_sym(struct dso *dso, int kmodule, const char *elf_name)
 {
        char *demangled = NULL;
index 0ce29ee..a7a59c6 100644 (file)
@@ -40,25 +40,34 @@ static int sysfs_get_enabled(char *path, int *mode)
 {
        int fd;
        char yes_no;
+       int ret = 0;
 
        *mode = 0;
 
        fd = open(path, O_RDONLY);
-       if (fd == -1)
-               return -1;
+       if (fd == -1) {
+               ret = -1;
+               goto out;
+       }
 
        if (read(fd, &yes_no, 1) != 1) {
-               close(fd);
-               return -1;
+               ret = -1;
+               goto out_close;
        }
 
        if (yes_no == '1') {
                *mode = 1;
-               return 0;
+               goto out_close;
        } else if (yes_no == '0') {
-               return 0;
+               goto out_close;
+       } else {
+               ret = -1;
+               goto out_close;
        }
-       return -1;
+out_close:
+       close(fd);
+out:
+       return ret;
 }
 
 int powercap_get_enabled(int *mode)
index e7d48cb..ae6af35 100644 (file)
@@ -70,8 +70,8 @@ static int max_freq_mode;
  */
 static unsigned long max_frequency;
 
-static unsigned long long tsc_at_measure_start;
-static unsigned long long tsc_at_measure_end;
+static unsigned long long *tsc_at_measure_start;
+static unsigned long long *tsc_at_measure_end;
 static unsigned long long *mperf_previous_count;
 static unsigned long long *aperf_previous_count;
 static unsigned long long *mperf_current_count;
@@ -169,7 +169,7 @@ static int mperf_get_count_percent(unsigned int id, double *percent,
        aperf_diff = aperf_current_count[cpu] - aperf_previous_count[cpu];
 
        if (max_freq_mode == MAX_FREQ_TSC_REF) {
-               tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
+               tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
                *percent = 100.0 * mperf_diff / tsc_diff;
                dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n",
                       mperf_cstates[id].name, mperf_diff, tsc_diff);
@@ -206,7 +206,7 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
 
        if (max_freq_mode == MAX_FREQ_TSC_REF) {
                /* Calculate max_freq from TSC count */
-               tsc_diff = tsc_at_measure_end - tsc_at_measure_start;
+               tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
                time_diff = timespec_diff_us(time_start, time_end);
                max_frequency = tsc_diff / time_diff;
        }
@@ -225,33 +225,27 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
 static int mperf_start(void)
 {
        int cpu;
-       unsigned long long dbg;
 
        clock_gettime(CLOCK_REALTIME, &time_start);
-       mperf_get_tsc(&tsc_at_measure_start);
 
-       for (cpu = 0; cpu < cpu_count; cpu++)
+       for (cpu = 0; cpu < cpu_count; cpu++) {
+               mperf_get_tsc(&tsc_at_measure_start[cpu]);
                mperf_init_stats(cpu);
+       }
 
-       mperf_get_tsc(&dbg);
-       dprint("TSC diff: %llu\n", dbg - tsc_at_measure_start);
        return 0;
 }
 
 static int mperf_stop(void)
 {
-       unsigned long long dbg;
        int cpu;
 
-       for (cpu = 0; cpu < cpu_count; cpu++)
+       for (cpu = 0; cpu < cpu_count; cpu++) {
                mperf_measure_stats(cpu);
+               mperf_get_tsc(&tsc_at_measure_end[cpu]);
+       }
 
-       mperf_get_tsc(&tsc_at_measure_end);
        clock_gettime(CLOCK_REALTIME, &time_end);
-
-       mperf_get_tsc(&dbg);
-       dprint("TSC diff: %llu\n", dbg - tsc_at_measure_end);
-
        return 0;
 }
 
@@ -353,7 +347,8 @@ struct cpuidle_monitor *mperf_register(void)
        aperf_previous_count = calloc(cpu_count, sizeof(unsigned long long));
        mperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
        aperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
-
+       tsc_at_measure_start = calloc(cpu_count, sizeof(unsigned long long));
+       tsc_at_measure_end = calloc(cpu_count, sizeof(unsigned long long));
        mperf_monitor.name_len = strlen(mperf_monitor.name);
        return &mperf_monitor;
 }
@@ -364,6 +359,8 @@ void mperf_unregister(void)
        free(aperf_previous_count);
        free(mperf_current_count);
        free(aperf_current_count);
+       free(tsc_at_measure_start);
+       free(tsc_at_measure_end);
        free(is_valid);
 }
 
index fba7bec..6f9347a 100644 (file)
@@ -6,6 +6,7 @@ ldflags-y += --wrap=acpi_pci_find_root
 ldflags-y += --wrap=nvdimm_bus_register
 ldflags-y += --wrap=devm_cxl_port_enumerate_dports
 ldflags-y += --wrap=devm_cxl_setup_hdm
+ldflags-y += --wrap=devm_cxl_enable_hdm
 ldflags-y += --wrap=devm_cxl_add_passthrough_decoder
 ldflags-y += --wrap=devm_cxl_enumerate_decoders
 ldflags-y += --wrap=cxl_await_media_ready
index ba572d0..34b4802 100644 (file)
@@ -1256,6 +1256,7 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
        if (rc)
                return rc;
 
+       cxlds->media_ready = true;
        rc = cxl_dev_state_identify(cxlds);
        if (rc)
                return rc;
index de3933a..2844165 100644 (file)
@@ -149,6 +149,21 @@ struct cxl_hdm *__wrap_devm_cxl_setup_hdm(struct cxl_port *port,
 }
 EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_setup_hdm, CXL);
 
+int __wrap_devm_cxl_enable_hdm(struct cxl_port *port, struct cxl_hdm *cxlhdm)
+{
+       int index, rc;
+       struct cxl_mock_ops *ops = get_cxl_mock_ops(&index);
+
+       if (ops && ops->is_mock_port(port->uport))
+               rc = 0;
+       else
+               rc = devm_cxl_enable_hdm(port, cxlhdm);
+       put_cxl_mock_ops(index);
+
+       return rc;
+}
+EXPORT_SYMBOL_NS_GPL(__wrap_devm_cxl_enable_hdm, CXL);
+
 int __wrap_devm_cxl_add_passthrough_decoder(struct cxl_port *port)
 {
        int rc, index;
index 3e390fe..b7eef32 100644 (file)
@@ -381,7 +381,7 @@ __format:
                goto __close;
        }
        if (rrate != rate) {
-               snprintf(msg, sizeof(msg), "rate mismatch %ld != %ld", rate, rrate);
+               snprintf(msg, sizeof(msg), "rate mismatch %ld != %d", rate, rrate);
                goto __close;
        }
        rperiod_size = period_size;
@@ -447,24 +447,24 @@ __format:
                        frames = snd_pcm_writei(handle, samples, rate);
                        if (frames < 0) {
                                snprintf(msg, sizeof(msg),
-                                        "Write failed: expected %d, wrote %li", rate, frames);
+                                        "Write failed: expected %ld, wrote %li", rate, frames);
                                goto __close;
                        }
                        if (frames < rate) {
                                snprintf(msg, sizeof(msg),
-                                        "expected %d, wrote %li", rate, frames);
+                                        "expected %ld, wrote %li", rate, frames);
                                goto __close;
                        }
                } else {
                        frames = snd_pcm_readi(handle, samples, rate);
                        if (frames < 0) {
                                snprintf(msg, sizeof(msg),
-                                        "expected %d, wrote %li", rate, frames);
+                                        "expected %ld, wrote %li", rate, frames);
                                goto __close;
                        }
                        if (frames < rate) {
                                snprintf(msg, sizeof(msg),
-                                        "expected %d, wrote %li", rate, frames);
+                                        "expected %ld, wrote %li", rate, frames);
                                goto __close;
                        }
                }
index c49e540..28d2c77 100644 (file)
@@ -197,7 +197,7 @@ $(OUTPUT)/urandom_read: urandom_read.c urandom_read_aux.c $(OUTPUT)/liburandom_r
 
 $(OUTPUT)/sign-file: ../../../../scripts/sign-file.c
        $(call msg,SIGN-FILE,,$@)
-       $(Q)$(CC) $(shell $(HOSTPKG_CONFIG)--cflags libcrypto 2> /dev/null) \
+       $(Q)$(CC) $(shell $(HOSTPKG_CONFIG) --cflags libcrypto 2> /dev/null) \
                  $< -o $@ \
                  $(shell $(HOSTPKG_CONFIG) --libs libcrypto 2> /dev/null || echo -lcrypto)
 
diff --git a/tools/testing/selftests/bpf/prog_tests/inner_array_lookup.c b/tools/testing/selftests/bpf/prog_tests/inner_array_lookup.c
new file mode 100644 (file)
index 0000000..9ab4cd1
--- /dev/null
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <test_progs.h>
+
+#include "inner_array_lookup.skel.h"
+
+void test_inner_array_lookup(void)
+{
+       int map1_fd, err;
+       int key = 3;
+       int val = 1;
+       struct inner_array_lookup *skel;
+
+       skel = inner_array_lookup__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "open_load_skeleton"))
+               return;
+
+       err = inner_array_lookup__attach(skel);
+       if (!ASSERT_OK(err, "skeleton_attach"))
+               goto cleanup;
+
+       map1_fd = bpf_map__fd(skel->maps.inner_map1);
+       bpf_map_update_elem(map1_fd, &key, &val, 0);
+
+       /* Probe should have set the element at index 3 to 2 */
+       bpf_map_lookup_elem(map1_fd, &key, &val);
+       ASSERT_EQ(val, 2, "value_is_2");
+
+cleanup:
+       inner_array_lookup__destroy(skel);
+}
index 0ce25a9..064cc5e 100644 (file)
@@ -2,6 +2,7 @@
 // Copyright (c) 2020 Cloudflare
 #include <error.h>
 #include <netinet/tcp.h>
+#include <sys/epoll.h>
 
 #include "test_progs.h"
 #include "test_skmsg_load_helpers.skel.h"
@@ -9,8 +10,12 @@
 #include "test_sockmap_invalid_update.skel.h"
 #include "test_sockmap_skb_verdict_attach.skel.h"
 #include "test_sockmap_progs_query.skel.h"
+#include "test_sockmap_pass_prog.skel.h"
+#include "test_sockmap_drop_prog.skel.h"
 #include "bpf_iter_sockmap.skel.h"
 
+#include "sockmap_helpers.h"
+
 #define TCP_REPAIR             19      /* TCP sock is under repair right now */
 
 #define TCP_REPAIR_ON          1
@@ -350,6 +355,126 @@ out:
        test_sockmap_progs_query__destroy(skel);
 }
 
+#define MAX_EVENTS 10
+static void test_sockmap_skb_verdict_shutdown(void)
+{
+       struct epoll_event ev, events[MAX_EVENTS];
+       int n, err, map, verdict, s, c1, p1;
+       struct test_sockmap_pass_prog *skel;
+       int epollfd;
+       int zero = 0;
+       char b;
+
+       skel = test_sockmap_pass_prog__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "open_and_load"))
+               return;
+
+       verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
+       map = bpf_map__fd(skel->maps.sock_map_rx);
+
+       err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+       if (!ASSERT_OK(err, "bpf_prog_attach"))
+               goto out;
+
+       s = socket_loopback(AF_INET, SOCK_STREAM);
+       if (s < 0)
+               goto out;
+       err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1);
+       if (err < 0)
+               goto out;
+
+       err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
+       if (err < 0)
+               goto out_close;
+
+       shutdown(p1, SHUT_WR);
+
+       ev.events = EPOLLIN;
+       ev.data.fd = c1;
+
+       epollfd = epoll_create1(0);
+       if (!ASSERT_GT(epollfd, -1, "epoll_create(0)"))
+               goto out_close;
+       err = epoll_ctl(epollfd, EPOLL_CTL_ADD, c1, &ev);
+       if (!ASSERT_OK(err, "epoll_ctl(EPOLL_CTL_ADD)"))
+               goto out_close;
+       err = epoll_wait(epollfd, events, MAX_EVENTS, -1);
+       if (!ASSERT_EQ(err, 1, "epoll_wait(fd)"))
+               goto out_close;
+
+       n = recv(c1, &b, 1, SOCK_NONBLOCK);
+       ASSERT_EQ(n, 0, "recv_timeout(fin)");
+out_close:
+       close(c1);
+       close(p1);
+out:
+       test_sockmap_pass_prog__destroy(skel);
+}
+
+static void test_sockmap_skb_verdict_fionread(bool pass_prog)
+{
+       int expected, zero = 0, sent, recvd, avail;
+       int err, map, verdict, s, c0, c1, p0, p1;
+       struct test_sockmap_pass_prog *pass;
+       struct test_sockmap_drop_prog *drop;
+       char buf[256] = "0123456789";
+
+       if (pass_prog) {
+               pass = test_sockmap_pass_prog__open_and_load();
+               if (!ASSERT_OK_PTR(pass, "open_and_load"))
+                       return;
+               verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
+               map = bpf_map__fd(pass->maps.sock_map_rx);
+               expected = sizeof(buf);
+       } else {
+               drop = test_sockmap_drop_prog__open_and_load();
+               if (!ASSERT_OK_PTR(drop, "open_and_load"))
+                       return;
+               verdict = bpf_program__fd(drop->progs.prog_skb_verdict);
+               map = bpf_map__fd(drop->maps.sock_map_rx);
+               /* On drop data is consumed immediately and copied_seq inc'd */
+               expected = 0;
+       }
+
+
+       err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
+       if (!ASSERT_OK(err, "bpf_prog_attach"))
+               goto out;
+
+       s = socket_loopback(AF_INET, SOCK_STREAM);
+       if (!ASSERT_GT(s, -1, "socket_loopback(s)"))
+               goto out;
+       err = create_socket_pairs(s, AF_INET, SOCK_STREAM, &c0, &c1, &p0, &p1);
+       if (!ASSERT_OK(err, "create_socket_pairs(s)"))
+               goto out;
+
+       err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
+       if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
+               goto out_close;
+
+       sent = xsend(p1, &buf, sizeof(buf), 0);
+       ASSERT_EQ(sent, sizeof(buf), "xsend(p0)");
+       err = ioctl(c1, FIONREAD, &avail);
+       ASSERT_OK(err, "ioctl(FIONREAD) error");
+       ASSERT_EQ(avail, expected, "ioctl(FIONREAD)");
+       /* On DROP test there will be no data to read */
+       if (pass_prog) {
+               recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC);
+               ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)");
+       }
+
+out_close:
+       close(c0);
+       close(p0);
+       close(c1);
+       close(p1);
+out:
+       if (pass_prog)
+               test_sockmap_pass_prog__destroy(pass);
+       else
+               test_sockmap_drop_prog__destroy(drop);
+}
+
 void test_sockmap_basic(void)
 {
        if (test__start_subtest("sockmap create_update_free"))
@@ -384,4 +509,10 @@ void test_sockmap_basic(void)
                test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT);
        if (test__start_subtest("sockmap skb_verdict progs query"))
                test_sockmap_progs_query(BPF_SK_SKB_VERDICT);
+       if (test__start_subtest("sockmap skb_verdict shutdown"))
+               test_sockmap_skb_verdict_shutdown();
+       if (test__start_subtest("sockmap skb_verdict fionread"))
+               test_sockmap_skb_verdict_fionread(true);
+       if (test__start_subtest("sockmap skb_verdict fionread on drop"))
+               test_sockmap_skb_verdict_fionread(false);
 }
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h b/tools/testing/selftests/bpf/prog_tests/sockmap_helpers.h
new file mode 100644 (file)
index 0000000..d126654
--- /dev/null
@@ -0,0 +1,390 @@
+#ifndef __SOCKMAP_HELPERS__
+#define __SOCKMAP_HELPERS__
+
+#include <linux/vm_sockets.h>
+
+#define IO_TIMEOUT_SEC 30
+#define MAX_STRERR_LEN 256
+#define MAX_TEST_NAME 80
+
+/* workaround for older vm_sockets.h */
+#ifndef VMADDR_CID_LOCAL
+#define VMADDR_CID_LOCAL 1
+#endif
+
+#define __always_unused        __attribute__((__unused__))
+
+#define _FAIL(errnum, fmt...)                                                  \
+       ({                                                                     \
+               error_at_line(0, (errnum), __func__, __LINE__, fmt);           \
+               CHECK_FAIL(true);                                              \
+       })
+#define FAIL(fmt...) _FAIL(0, fmt)
+#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt)
+#define FAIL_LIBBPF(err, msg)                                                  \
+       ({                                                                     \
+               char __buf[MAX_STRERR_LEN];                                    \
+               libbpf_strerror((err), __buf, sizeof(__buf));                  \
+               FAIL("%s: %s", (msg), __buf);                                  \
+       })
+
+/* Wrappers that fail the test on error and report it. */
+
+#define xaccept_nonblock(fd, addr, len)                                        \
+       ({                                                                     \
+               int __ret =                                                    \
+                       accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC);   \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("accept");                                  \
+               __ret;                                                         \
+       })
+
+#define xbind(fd, addr, len)                                                   \
+       ({                                                                     \
+               int __ret = bind((fd), (addr), (len));                         \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("bind");                                    \
+               __ret;                                                         \
+       })
+
+#define xclose(fd)                                                             \
+       ({                                                                     \
+               int __ret = close((fd));                                       \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("close");                                   \
+               __ret;                                                         \
+       })
+
+#define xconnect(fd, addr, len)                                                \
+       ({                                                                     \
+               int __ret = connect((fd), (addr), (len));                      \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("connect");                                 \
+               __ret;                                                         \
+       })
+
+#define xgetsockname(fd, addr, len)                                            \
+       ({                                                                     \
+               int __ret = getsockname((fd), (addr), (len));                  \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("getsockname");                             \
+               __ret;                                                         \
+       })
+
+#define xgetsockopt(fd, level, name, val, len)                                 \
+       ({                                                                     \
+               int __ret = getsockopt((fd), (level), (name), (val), (len));   \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("getsockopt(" #name ")");                   \
+               __ret;                                                         \
+       })
+
+#define xlisten(fd, backlog)                                                   \
+       ({                                                                     \
+               int __ret = listen((fd), (backlog));                           \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("listen");                                  \
+               __ret;                                                         \
+       })
+
+#define xsetsockopt(fd, level, name, val, len)                                 \
+       ({                                                                     \
+               int __ret = setsockopt((fd), (level), (name), (val), (len));   \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("setsockopt(" #name ")");                   \
+               __ret;                                                         \
+       })
+
+#define xsend(fd, buf, len, flags)                                             \
+       ({                                                                     \
+               ssize_t __ret = send((fd), (buf), (len), (flags));             \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("send");                                    \
+               __ret;                                                         \
+       })
+
+#define xrecv_nonblock(fd, buf, len, flags)                                    \
+       ({                                                                     \
+               ssize_t __ret = recv_timeout((fd), (buf), (len), (flags),      \
+                                            IO_TIMEOUT_SEC);                  \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("recv");                                    \
+               __ret;                                                         \
+       })
+
+#define xsocket(family, sotype, flags)                                         \
+       ({                                                                     \
+               int __ret = socket(family, sotype, flags);                     \
+               if (__ret == -1)                                               \
+                       FAIL_ERRNO("socket");                                  \
+               __ret;                                                         \
+       })
+
+#define xbpf_map_delete_elem(fd, key)                                          \
+       ({                                                                     \
+               int __ret = bpf_map_delete_elem((fd), (key));                  \
+               if (__ret < 0)                                               \
+                       FAIL_ERRNO("map_delete");                              \
+               __ret;                                                         \
+       })
+
+#define xbpf_map_lookup_elem(fd, key, val)                                     \
+       ({                                                                     \
+               int __ret = bpf_map_lookup_elem((fd), (key), (val));           \
+               if (__ret < 0)                                               \
+                       FAIL_ERRNO("map_lookup");                              \
+               __ret;                                                         \
+       })
+
+#define xbpf_map_update_elem(fd, key, val, flags)                              \
+       ({                                                                     \
+               int __ret = bpf_map_update_elem((fd), (key), (val), (flags));  \
+               if (__ret < 0)                                               \
+                       FAIL_ERRNO("map_update");                              \
+               __ret;                                                         \
+       })
+
+#define xbpf_prog_attach(prog, target, type, flags)                            \
+       ({                                                                     \
+               int __ret =                                                    \
+                       bpf_prog_attach((prog), (target), (type), (flags));    \
+               if (__ret < 0)                                               \
+                       FAIL_ERRNO("prog_attach(" #type ")");                  \
+               __ret;                                                         \
+       })
+
+#define xbpf_prog_detach2(prog, target, type)                                  \
+       ({                                                                     \
+               int __ret = bpf_prog_detach2((prog), (target), (type));        \
+               if (__ret < 0)                                               \
+                       FAIL_ERRNO("prog_detach2(" #type ")");                 \
+               __ret;                                                         \
+       })
+
+#define xpthread_create(thread, attr, func, arg)                               \
+       ({                                                                     \
+               int __ret = pthread_create((thread), (attr), (func), (arg));   \
+               errno = __ret;                                                 \
+               if (__ret)                                                     \
+                       FAIL_ERRNO("pthread_create");                          \
+               __ret;                                                         \
+       })
+
+#define xpthread_join(thread, retval)                                          \
+       ({                                                                     \
+               int __ret = pthread_join((thread), (retval));                  \
+               errno = __ret;                                                 \
+               if (__ret)                                                     \
+                       FAIL_ERRNO("pthread_join");                            \
+               __ret;                                                         \
+       })
+
+static inline int poll_read(int fd, unsigned int timeout_sec)
+{
+       struct timeval timeout = { .tv_sec = timeout_sec };
+       fd_set rfds;
+       int r;
+
+       FD_ZERO(&rfds);
+       FD_SET(fd, &rfds);
+
+       r = select(fd + 1, &rfds, NULL, NULL, &timeout);
+       if (r == 0)
+               errno = ETIME;
+
+       return r == 1 ? 0 : -1;
+}
+
+static inline int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len,
+                                unsigned int timeout_sec)
+{
+       if (poll_read(fd, timeout_sec))
+               return -1;
+
+       return accept(fd, addr, len);
+}
+
+static inline int recv_timeout(int fd, void *buf, size_t len, int flags,
+                              unsigned int timeout_sec)
+{
+       if (poll_read(fd, timeout_sec))
+               return -1;
+
+       return recv(fd, buf, len, flags);
+}
+
+static inline void init_addr_loopback4(struct sockaddr_storage *ss,
+                                      socklen_t *len)
+{
+       struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss));
+
+       addr4->sin_family = AF_INET;
+       addr4->sin_port = 0;
+       addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+       *len = sizeof(*addr4);
+}
+
+static inline void init_addr_loopback6(struct sockaddr_storage *ss,
+                                      socklen_t *len)
+{
+       struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss));
+
+       addr6->sin6_family = AF_INET6;
+       addr6->sin6_port = 0;
+       addr6->sin6_addr = in6addr_loopback;
+       *len = sizeof(*addr6);
+}
+
+static inline void init_addr_loopback_vsock(struct sockaddr_storage *ss,
+                                           socklen_t *len)
+{
+       struct sockaddr_vm *addr = memset(ss, 0, sizeof(*ss));
+
+       addr->svm_family = AF_VSOCK;
+       addr->svm_port = VMADDR_PORT_ANY;
+       addr->svm_cid = VMADDR_CID_LOCAL;
+       *len = sizeof(*addr);
+}
+
+static inline void init_addr_loopback(int family, struct sockaddr_storage *ss,
+                                     socklen_t *len)
+{
+       switch (family) {
+       case AF_INET:
+               init_addr_loopback4(ss, len);
+               return;
+       case AF_INET6:
+               init_addr_loopback6(ss, len);
+               return;
+       case AF_VSOCK:
+               init_addr_loopback_vsock(ss, len);
+               return;
+       default:
+               FAIL("unsupported address family %d", family);
+       }
+}
+
+static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss)
+{
+       return (struct sockaddr *)ss;
+}
+
+static inline int add_to_sockmap(int sock_mapfd, int fd1, int fd2)
+{
+       u64 value;
+       u32 key;
+       int err;
+
+       key = 0;
+       value = fd1;
+       err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+       if (err)
+               return err;
+
+       key = 1;
+       value = fd2;
+       return xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
+}
+
+static inline int create_pair(int s, int family, int sotype, int *c, int *p)
+{
+       struct sockaddr_storage addr;
+       socklen_t len;
+       int err = 0;
+
+       len = sizeof(addr);
+       err = xgetsockname(s, sockaddr(&addr), &len);
+       if (err)
+               return err;
+
+       *c = xsocket(family, sotype, 0);
+       if (*c < 0)
+               return errno;
+       err = xconnect(*c, sockaddr(&addr), len);
+       if (err) {
+               err = errno;
+               goto close_cli0;
+       }
+
+       *p = xaccept_nonblock(s, NULL, NULL);
+       if (*p < 0) {
+               err = errno;
+               goto close_cli0;
+       }
+       return err;
+close_cli0:
+       close(*c);
+       return err;
+}
+
+static inline int create_socket_pairs(int s, int family, int sotype,
+                                     int *c0, int *c1, int *p0, int *p1)
+{
+       int err;
+
+       err = create_pair(s, family, sotype, c0, p0);
+       if (err)
+               return err;
+
+       err = create_pair(s, family, sotype, c1, p1);
+       if (err) {
+               close(*c0);
+               close(*p0);
+       }
+       return err;
+}
+
+static inline int enable_reuseport(int s, int progfd)
+{
+       int err, one = 1;
+
+       err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
+       if (err)
+               return -1;
+       err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd,
+                         sizeof(progfd));
+       if (err)
+               return -1;
+
+       return 0;
+}
+
+static inline int socket_loopback_reuseport(int family, int sotype, int progfd)
+{
+       struct sockaddr_storage addr;
+       socklen_t len;
+       int err, s;
+
+       init_addr_loopback(family, &addr, &len);
+
+       s = xsocket(family, sotype, 0);
+       if (s == -1)
+               return -1;
+
+       if (progfd >= 0)
+               enable_reuseport(s, progfd);
+
+       err = xbind(s, sockaddr(&addr), len);
+       if (err)
+               goto close;
+
+       if (sotype & SOCK_DGRAM)
+               return s;
+
+       err = xlisten(s, SOMAXCONN);
+       if (err)
+               goto close;
+
+       return s;
+close:
+       xclose(s);
+       return -1;
+}
+
+static inline int socket_loopback(int family, int sotype)
+{
+       return socket_loopback_reuseport(family, sotype, -1);
+}
+
+
+#endif // __SOCKMAP_HELPERS__
index 141c1e5..b4f6f3a 100644 (file)
 #include <unistd.h>
 #include <linux/vm_sockets.h>
 
-/* workaround for older vm_sockets.h */
-#ifndef VMADDR_CID_LOCAL
-#define VMADDR_CID_LOCAL 1
-#endif
-
 #include <bpf/bpf.h>
 #include <bpf/libbpf.h>
 
 #include "test_progs.h"
 #include "test_sockmap_listen.skel.h"
 
-#define IO_TIMEOUT_SEC 30
-#define MAX_STRERR_LEN 256
-#define MAX_TEST_NAME 80
-
-#define __always_unused        __attribute__((__unused__))
-
-#define _FAIL(errnum, fmt...)                                                  \
-       ({                                                                     \
-               error_at_line(0, (errnum), __func__, __LINE__, fmt);           \
-               CHECK_FAIL(true);                                              \
-       })
-#define FAIL(fmt...) _FAIL(0, fmt)
-#define FAIL_ERRNO(fmt...) _FAIL(errno, fmt)
-#define FAIL_LIBBPF(err, msg)                                                  \
-       ({                                                                     \
-               char __buf[MAX_STRERR_LEN];                                    \
-               libbpf_strerror((err), __buf, sizeof(__buf));                  \
-               FAIL("%s: %s", (msg), __buf);                                  \
-       })
-
-/* Wrappers that fail the test on error and report it. */
-
-#define xaccept_nonblock(fd, addr, len)                                        \
-       ({                                                                     \
-               int __ret =                                                    \
-                       accept_timeout((fd), (addr), (len), IO_TIMEOUT_SEC);   \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("accept");                                  \
-               __ret;                                                         \
-       })
-
-#define xbind(fd, addr, len)                                                   \
-       ({                                                                     \
-               int __ret = bind((fd), (addr), (len));                         \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("bind");                                    \
-               __ret;                                                         \
-       })
-
-#define xclose(fd)                                                             \
-       ({                                                                     \
-               int __ret = close((fd));                                       \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("close");                                   \
-               __ret;                                                         \
-       })
-
-#define xconnect(fd, addr, len)                                                \
-       ({                                                                     \
-               int __ret = connect((fd), (addr), (len));                      \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("connect");                                 \
-               __ret;                                                         \
-       })
-
-#define xgetsockname(fd, addr, len)                                            \
-       ({                                                                     \
-               int __ret = getsockname((fd), (addr), (len));                  \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("getsockname");                             \
-               __ret;                                                         \
-       })
-
-#define xgetsockopt(fd, level, name, val, len)                                 \
-       ({                                                                     \
-               int __ret = getsockopt((fd), (level), (name), (val), (len));   \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("getsockopt(" #name ")");                   \
-               __ret;                                                         \
-       })
-
-#define xlisten(fd, backlog)                                                   \
-       ({                                                                     \
-               int __ret = listen((fd), (backlog));                           \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("listen");                                  \
-               __ret;                                                         \
-       })
-
-#define xsetsockopt(fd, level, name, val, len)                                 \
-       ({                                                                     \
-               int __ret = setsockopt((fd), (level), (name), (val), (len));   \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("setsockopt(" #name ")");                   \
-               __ret;                                                         \
-       })
-
-#define xsend(fd, buf, len, flags)                                             \
-       ({                                                                     \
-               ssize_t __ret = send((fd), (buf), (len), (flags));             \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("send");                                    \
-               __ret;                                                         \
-       })
-
-#define xrecv_nonblock(fd, buf, len, flags)                                    \
-       ({                                                                     \
-               ssize_t __ret = recv_timeout((fd), (buf), (len), (flags),      \
-                                            IO_TIMEOUT_SEC);                  \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("recv");                                    \
-               __ret;                                                         \
-       })
-
-#define xsocket(family, sotype, flags)                                         \
-       ({                                                                     \
-               int __ret = socket(family, sotype, flags);                     \
-               if (__ret == -1)                                               \
-                       FAIL_ERRNO("socket");                                  \
-               __ret;                                                         \
-       })
-
-#define xbpf_map_delete_elem(fd, key)                                          \
-       ({                                                                     \
-               int __ret = bpf_map_delete_elem((fd), (key));                  \
-               if (__ret < 0)                                               \
-                       FAIL_ERRNO("map_delete");                              \
-               __ret;                                                         \
-       })
-
-#define xbpf_map_lookup_elem(fd, key, val)                                     \
-       ({                                                                     \
-               int __ret = bpf_map_lookup_elem((fd), (key), (val));           \
-               if (__ret < 0)                                               \
-                       FAIL_ERRNO("map_lookup");                              \
-               __ret;                                                         \
-       })
-
-#define xbpf_map_update_elem(fd, key, val, flags)                              \
-       ({                                                                     \
-               int __ret = bpf_map_update_elem((fd), (key), (val), (flags));  \
-               if (__ret < 0)                                               \
-                       FAIL_ERRNO("map_update");                              \
-               __ret;                                                         \
-       })
-
-#define xbpf_prog_attach(prog, target, type, flags)                            \
-       ({                                                                     \
-               int __ret =                                                    \
-                       bpf_prog_attach((prog), (target), (type), (flags));    \
-               if (__ret < 0)                                               \
-                       FAIL_ERRNO("prog_attach(" #type ")");                  \
-               __ret;                                                         \
-       })
-
-#define xbpf_prog_detach2(prog, target, type)                                  \
-       ({                                                                     \
-               int __ret = bpf_prog_detach2((prog), (target), (type));        \
-               if (__ret < 0)                                               \
-                       FAIL_ERRNO("prog_detach2(" #type ")");                 \
-               __ret;                                                         \
-       })
-
-#define xpthread_create(thread, attr, func, arg)                               \
-       ({                                                                     \
-               int __ret = pthread_create((thread), (attr), (func), (arg));   \
-               errno = __ret;                                                 \
-               if (__ret)                                                     \
-                       FAIL_ERRNO("pthread_create");                          \
-               __ret;                                                         \
-       })
-
-#define xpthread_join(thread, retval)                                          \
-       ({                                                                     \
-               int __ret = pthread_join((thread), (retval));                  \
-               errno = __ret;                                                 \
-               if (__ret)                                                     \
-                       FAIL_ERRNO("pthread_join");                            \
-               __ret;                                                         \
-       })
-
-static int poll_read(int fd, unsigned int timeout_sec)
-{
-       struct timeval timeout = { .tv_sec = timeout_sec };
-       fd_set rfds;
-       int r;
-
-       FD_ZERO(&rfds);
-       FD_SET(fd, &rfds);
-
-       r = select(fd + 1, &rfds, NULL, NULL, &timeout);
-       if (r == 0)
-               errno = ETIME;
-
-       return r == 1 ? 0 : -1;
-}
-
-static int accept_timeout(int fd, struct sockaddr *addr, socklen_t *len,
-                         unsigned int timeout_sec)
-{
-       if (poll_read(fd, timeout_sec))
-               return -1;
-
-       return accept(fd, addr, len);
-}
-
-static int recv_timeout(int fd, void *buf, size_t len, int flags,
-                       unsigned int timeout_sec)
-{
-       if (poll_read(fd, timeout_sec))
-               return -1;
-
-       return recv(fd, buf, len, flags);
-}
-
-static void init_addr_loopback4(struct sockaddr_storage *ss, socklen_t *len)
-{
-       struct sockaddr_in *addr4 = memset(ss, 0, sizeof(*ss));
-
-       addr4->sin_family = AF_INET;
-       addr4->sin_port = 0;
-       addr4->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
-       *len = sizeof(*addr4);
-}
-
-static void init_addr_loopback6(struct sockaddr_storage *ss, socklen_t *len)
-{
-       struct sockaddr_in6 *addr6 = memset(ss, 0, sizeof(*ss));
-
-       addr6->sin6_family = AF_INET6;
-       addr6->sin6_port = 0;
-       addr6->sin6_addr = in6addr_loopback;
-       *len = sizeof(*addr6);
-}
-
-static void init_addr_loopback_vsock(struct sockaddr_storage *ss, socklen_t *len)
-{
-       struct sockaddr_vm *addr = memset(ss, 0, sizeof(*ss));
-
-       addr->svm_family = AF_VSOCK;
-       addr->svm_port = VMADDR_PORT_ANY;
-       addr->svm_cid = VMADDR_CID_LOCAL;
-       *len = sizeof(*addr);
-}
-
-static void init_addr_loopback(int family, struct sockaddr_storage *ss,
-                              socklen_t *len)
-{
-       switch (family) {
-       case AF_INET:
-               init_addr_loopback4(ss, len);
-               return;
-       case AF_INET6:
-               init_addr_loopback6(ss, len);
-               return;
-       case AF_VSOCK:
-               init_addr_loopback_vsock(ss, len);
-               return;
-       default:
-               FAIL("unsupported address family %d", family);
-       }
-}
-
-static inline struct sockaddr *sockaddr(struct sockaddr_storage *ss)
-{
-       return (struct sockaddr *)ss;
-}
-
-static int enable_reuseport(int s, int progfd)
-{
-       int err, one = 1;
-
-       err = xsetsockopt(s, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one));
-       if (err)
-               return -1;
-       err = xsetsockopt(s, SOL_SOCKET, SO_ATTACH_REUSEPORT_EBPF, &progfd,
-                         sizeof(progfd));
-       if (err)
-               return -1;
-
-       return 0;
-}
-
-static int socket_loopback_reuseport(int family, int sotype, int progfd)
-{
-       struct sockaddr_storage addr;
-       socklen_t len;
-       int err, s;
-
-       init_addr_loopback(family, &addr, &len);
-
-       s = xsocket(family, sotype, 0);
-       if (s == -1)
-               return -1;
-
-       if (progfd >= 0)
-               enable_reuseport(s, progfd);
-
-       err = xbind(s, sockaddr(&addr), len);
-       if (err)
-               goto close;
-
-       if (sotype & SOCK_DGRAM)
-               return s;
-
-       err = xlisten(s, SOMAXCONN);
-       if (err)
-               goto close;
-
-       return s;
-close:
-       xclose(s);
-       return -1;
-}
-
-static int socket_loopback(int family, int sotype)
-{
-       return socket_loopback_reuseport(family, sotype, -1);
-}
+#include "sockmap_helpers.h"
 
 static void test_insert_invalid(struct test_sockmap_listen *skel __always_unused,
                                int family, int sotype, int mapfd)
@@ -984,31 +671,12 @@ static const char *redir_mode_str(enum redir_mode mode)
        }
 }
 
-static int add_to_sockmap(int sock_mapfd, int fd1, int fd2)
-{
-       u64 value;
-       u32 key;
-       int err;
-
-       key = 0;
-       value = fd1;
-       err = xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
-       if (err)
-               return err;
-
-       key = 1;
-       value = fd2;
-       return xbpf_map_update_elem(sock_mapfd, &key, &value, BPF_NOEXIST);
-}
-
 static void redir_to_connected(int family, int sotype, int sock_mapfd,
                               int verd_mapfd, enum redir_mode mode)
 {
        const char *log_prefix = redir_mode_str(mode);
-       struct sockaddr_storage addr;
        int s, c0, c1, p0, p1;
        unsigned int pass;
-       socklen_t len;
        int err, n;
        u32 key;
        char b;
@@ -1019,36 +687,13 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,
        if (s < 0)
                return;
 
-       len = sizeof(addr);
-       err = xgetsockname(s, sockaddr(&addr), &len);
+       err = create_socket_pairs(s, family, sotype, &c0, &c1, &p0, &p1);
        if (err)
                goto close_srv;
 
-       c0 = xsocket(family, sotype, 0);
-       if (c0 < 0)
-               goto close_srv;
-       err = xconnect(c0, sockaddr(&addr), len);
-       if (err)
-               goto close_cli0;
-
-       p0 = xaccept_nonblock(s, NULL, NULL);
-       if (p0 < 0)
-               goto close_cli0;
-
-       c1 = xsocket(family, sotype, 0);
-       if (c1 < 0)
-               goto close_peer0;
-       err = xconnect(c1, sockaddr(&addr), len);
-       if (err)
-               goto close_cli1;
-
-       p1 = xaccept_nonblock(s, NULL, NULL);
-       if (p1 < 0)
-               goto close_cli1;
-
        err = add_to_sockmap(sock_mapfd, p0, p1);
        if (err)
-               goto close_peer1;
+               goto close;
 
        n = write(mode == REDIR_INGRESS ? c1 : p1, "a", 1);
        if (n < 0)
@@ -1056,12 +701,12 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,
        if (n == 0)
                FAIL("%s: incomplete write", log_prefix);
        if (n < 1)
-               goto close_peer1;
+               goto close;
 
        key = SK_PASS;
        err = xbpf_map_lookup_elem(verd_mapfd, &key, &pass);
        if (err)
-               goto close_peer1;
+               goto close;
        if (pass != 1)
                FAIL("%s: want pass count 1, have %d", log_prefix, pass);
        n = recv_timeout(c0, &b, 1, 0, IO_TIMEOUT_SEC);
@@ -1070,13 +715,10 @@ static void redir_to_connected(int family, int sotype, int sock_mapfd,
        if (n == 0)
                FAIL("%s: incomplete recv", log_prefix);
 
-close_peer1:
+close:
        xclose(p1);
-close_cli1:
        xclose(c1);
-close_peer0:
        xclose(p0);
-close_cli0:
        xclose(c0);
 close_srv:
        xclose(s);
index 4512dd8..05d0e07 100644 (file)
@@ -209,7 +209,7 @@ static int getsetsockopt(void)
                        err, errno);
                goto err;
        }
-       ASSERT_EQ(optlen, 4, "Unexpected NETLINK_LIST_MEMBERSHIPS value");
+       ASSERT_EQ(optlen, 8, "Unexpected NETLINK_LIST_MEMBERSHIPS value");
 
        free(big_buf);
        close(fd);
diff --git a/tools/testing/selftests/bpf/progs/inner_array_lookup.c b/tools/testing/selftests/bpf/progs/inner_array_lookup.c
new file mode 100644 (file)
index 0000000..c2c8f2f
--- /dev/null
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct inner_map {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, 5);
+       __type(key, int);
+       __type(value, int);
+} inner_map1 SEC(".maps");
+
+struct outer_map {
+       __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
+       __uint(max_entries, 3);
+       __type(key, int);
+       __array(values, struct inner_map);
+} outer_map1 SEC(".maps") = {
+       .values = {
+               [2] = &inner_map1,
+       },
+};
+
+SEC("raw_tp/sys_enter")
+int handle__sys_enter(void *ctx)
+{
+       int outer_key = 2, inner_key = 3;
+       int *val;
+       void *map;
+
+       map = bpf_map_lookup_elem(&outer_map1, &outer_key);
+       if (!map)
+               return 1;
+
+       val = bpf_map_lookup_elem(map, &inner_key);
+       if (!val)
+               return 1;
+
+       if (*val == 1)
+               *val = 2;
+
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c b/tools/testing/selftests/bpf/progs/test_sockmap_drop_prog.c
new file mode 100644 (file)
index 0000000..2931480
--- /dev/null
@@ -0,0 +1,32 @@
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, 20);
+       __type(key, int);
+       __type(value, int);
+} sock_map_rx SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, 20);
+       __type(key, int);
+       __type(value, int);
+} sock_map_tx SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, 20);
+       __type(key, int);
+       __type(value, int);
+} sock_map_msg SEC(".maps");
+
+SEC("sk_skb")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+       return SK_DROP;
+}
+
+char _license[] SEC("license") = "GPL";
index baf9ebc..99d2ea9 100644 (file)
@@ -191,7 +191,7 @@ SEC("sockops")
 int bpf_sockmap(struct bpf_sock_ops *skops)
 {
        __u32 lport, rport;
-       int op, err, ret;
+       int op, ret;
 
        op = (int) skops->op;
 
@@ -203,10 +203,10 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
                if (lport == 10000) {
                        ret = 1;
 #ifdef SOCKMAP
-                       err = bpf_sock_map_update(skops, &sock_map, &ret,
+                       bpf_sock_map_update(skops, &sock_map, &ret,
                                                  BPF_NOEXIST);
 #else
-                       err = bpf_sock_hash_update(skops, &sock_map, &ret,
+                       bpf_sock_hash_update(skops, &sock_map, &ret,
                                                   BPF_NOEXIST);
 #endif
                }
@@ -218,10 +218,10 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
                if (bpf_ntohl(rport) == 10001) {
                        ret = 10;
 #ifdef SOCKMAP
-                       err = bpf_sock_map_update(skops, &sock_map, &ret,
+                       bpf_sock_map_update(skops, &sock_map, &ret,
                                                  BPF_NOEXIST);
 #else
-                       err = bpf_sock_hash_update(skops, &sock_map, &ret,
+                       bpf_sock_hash_update(skops, &sock_map, &ret,
                                                   BPF_NOEXIST);
 #endif
                }
@@ -230,8 +230,6 @@ int bpf_sockmap(struct bpf_sock_ops *skops)
                break;
        }
 
-       __sink(err);
-
        return 0;
 }
 
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c b/tools/testing/selftests/bpf/progs/test_sockmap_pass_prog.c
new file mode 100644 (file)
index 0000000..1d86a71
--- /dev/null
@@ -0,0 +1,32 @@
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, 20);
+       __type(key, int);
+       __type(value, int);
+} sock_map_rx SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, 20);
+       __type(key, int);
+       __type(value, int);
+} sock_map_tx SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_SOCKMAP);
+       __uint(max_entries, 20);
+       __type(key, int);
+       __type(value, int);
+} sock_map_msg SEC(".maps");
+
+SEC("sk_skb")
+int prog_skb_verdict(struct __sk_buff *skb)
+{
+       return SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
index d6e106f..a1e955d 100644 (file)
@@ -1,7 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0
 all:
 
-TEST_PROGS := ftracetest
+TEST_PROGS_EXTENDED := ftracetest
+TEST_PROGS := ftracetest-ktap
 TEST_FILES := test.d settings
 EXTRA_CLEAN := $(OUTPUT)/logs/*
 
index c3311c8..2506621 100755 (executable)
@@ -13,6 +13,7 @@ echo "Usage: ftracetest [options] [testcase(s)] [testcase-directory(s)]"
 echo " Options:"
 echo "         -h|--help  Show help message"
 echo "         -k|--keep  Keep passed test logs"
+echo "         -K|--ktap  Output in KTAP format"
 echo "         -v|--verbose Increase verbosity of test messages"
 echo "         -vv        Alias of -v -v (Show all results in stdout)"
 echo "         -vvv       Alias of -v -v -v (Show all commands immediately)"
@@ -85,6 +86,10 @@ parse_opts() { # opts
       KEEP_LOG=1
       shift 1
     ;;
+    --ktap|-K)
+      KTAP=1
+      shift 1
+    ;;
     --verbose|-v|-vv|-vvv)
       if [ $VERBOSE -eq -1 ]; then
        usage "--console can not use with --verbose"
@@ -178,6 +183,7 @@ TEST_DIR=$TOP_DIR/test.d
 TEST_CASES=`find_testcases $TEST_DIR`
 LOG_DIR=$TOP_DIR/logs/`date +%Y%m%d-%H%M%S`/
 KEEP_LOG=0
+KTAP=0
 DEBUG=0
 VERBOSE=0
 UNSUPPORTED_RESULT=0
@@ -229,7 +235,7 @@ prlog() { # messages
     newline=
     shift
   fi
-  printf "$*$newline"
+  [ "$KTAP" != "1" ] && printf "$*$newline"
   [ "$LOG_FILE" ] && printf "$*$newline" | strip_esc >> $LOG_FILE
 }
 catlog() { #file
@@ -260,11 +266,11 @@ TOTAL_RESULT=0
 
 INSTANCE=
 CASENO=0
+CASENAME=
 
 testcase() { # testfile
   CASENO=$((CASENO+1))
-  desc=`grep "^#[ \t]*description:" $1 | cut -f2- -d:`
-  prlog -n "[$CASENO]$INSTANCE$desc"
+  CASENAME=`grep "^#[ \t]*description:" $1 | cut -f2- -d:`
 }
 
 checkreq() { # testfile
@@ -277,40 +283,68 @@ test_on_instance() { # testfile
   grep -q "^#[ \t]*flags:.*instance" $1
 }
 
+ktaptest() { # result comment
+  if [ "$KTAP" != "1" ]; then
+    return
+  fi
+
+  local result=
+  if [ "$1" = "1" ]; then
+    result="ok"
+  else
+    result="not ok"
+  fi
+  shift
+
+  local comment=$*
+  if [ "$comment" != "" ]; then
+    comment="# $comment"
+  fi
+
+  echo $CASENO $result $INSTANCE$CASENAME $comment
+}
+
 eval_result() { # sigval
   case $1 in
     $PASS)
       prlog "  [${color_green}PASS${color_reset}]"
+      ktaptest 1
       PASSED_CASES="$PASSED_CASES $CASENO"
       return 0
     ;;
     $FAIL)
       prlog "  [${color_red}FAIL${color_reset}]"
+      ktaptest 0
       FAILED_CASES="$FAILED_CASES $CASENO"
       return 1 # this is a bug.
     ;;
     $UNRESOLVED)
       prlog "  [${color_blue}UNRESOLVED${color_reset}]"
+      ktaptest 0 UNRESOLVED
       UNRESOLVED_CASES="$UNRESOLVED_CASES $CASENO"
       return $UNRESOLVED_RESULT # depends on use case
     ;;
     $UNTESTED)
       prlog "  [${color_blue}UNTESTED${color_reset}]"
+      ktaptest 1 SKIP
       UNTESTED_CASES="$UNTESTED_CASES $CASENO"
       return 0
     ;;
     $UNSUPPORTED)
       prlog "  [${color_blue}UNSUPPORTED${color_reset}]"
+      ktaptest 1 SKIP
       UNSUPPORTED_CASES="$UNSUPPORTED_CASES $CASENO"
       return $UNSUPPORTED_RESULT # depends on use case
     ;;
     $XFAIL)
       prlog "  [${color_green}XFAIL${color_reset}]"
+      ktaptest 1 XFAIL
       XFAILED_CASES="$XFAILED_CASES $CASENO"
       return 0
     ;;
     *)
       prlog "  [${color_blue}UNDEFINED${color_reset}]"
+      ktaptest 0 error
       UNDEFINED_CASES="$UNDEFINED_CASES $CASENO"
       return 1 # this must be a test bug
     ;;
@@ -371,6 +405,7 @@ __run_test() { # testfile
 run_test() { # testfile
   local testname=`basename $1`
   testcase $1
+  prlog -n "[$CASENO]$INSTANCE$CASENAME"
   if [ ! -z "$LOG_FILE" ] ; then
     local testlog=`mktemp $LOG_DIR/${CASENO}-${testname}-log.XXXXXX`
   else
@@ -405,6 +440,17 @@ run_test() { # testfile
 # load in the helper functions
 . $TEST_DIR/functions
 
+if [ "$KTAP" = "1" ]; then
+  echo "TAP version 13"
+
+  casecount=`echo $TEST_CASES | wc -w`
+  for t in $TEST_CASES; do
+    test_on_instance $t || continue
+    casecount=$((casecount+1))
+  done
+  echo "1..${casecount}"
+fi
+
 # Main loop
 for t in $TEST_CASES; do
   run_test $t
@@ -439,6 +485,17 @@ prlog "# of unsupported: " `echo $UNSUPPORTED_CASES | wc -w`
 prlog "# of xfailed: " `echo $XFAILED_CASES | wc -w`
 prlog "# of undefined(test bug): " `echo $UNDEFINED_CASES | wc -w`
 
+if [ "$KTAP" = "1" ]; then
+  echo -n "# Totals:"
+  echo -n " pass:"`echo $PASSED_CASES | wc -w`
+  echo -n " faii:"`echo $FAILED_CASES | wc -w`
+  echo -n " xfail:"`echo $XFAILED_CASES | wc -w`
+  echo -n " xpass:0"
+  echo -n " skip:"`echo $UNTESTED_CASES $UNSUPPORTED_CASES | wc -w`
+  echo -n " error:"`echo $UNRESOLVED_CASES $UNDEFINED_CASES | wc -w`
+  echo
+fi
+
 cleanup
 
 # if no error, return 0
diff --git a/tools/testing/selftests/ftrace/ftracetest-ktap b/tools/testing/selftests/ftrace/ftracetest-ktap
new file mode 100755 (executable)
index 0000000..b328467
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh -e
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# ftracetest-ktap: Wrapper to integrate ftracetest with the kselftest runner
+#
+# Copyright (C) Arm Ltd., 2023
+
+./ftracetest -K
index e2ff3bf..2de7c61 100644 (file)
@@ -9,18 +9,33 @@ fail() { #msg
     exit_fail
 }
 
-echo "Test event filter function name"
+sample_events() {
+    echo > trace
+    echo 1 > events/kmem/kmem_cache_free/enable
+    echo 1 > tracing_on
+    ls > /dev/null
+    echo 0 > tracing_on
+    echo 0 > events/kmem/kmem_cache_free/enable
+}
+
 echo 0 > tracing_on
 echo 0 > events/enable
+
+echo "Get the most frequently calling function"
+sample_events
+
+target_func=`cut -d: -f3 trace | sed 's/call_site=\([^+]*\)+0x.*/\1/' | sort | uniq -c | sort | tail -n 1 | sed 's/^[ 0-9]*//'`
+if [ -z "$target_func" ]; then
+    exit_fail
+fi
 echo > trace
-echo 'call_site.function == exit_mmap' > events/kmem/kmem_cache_free/filter
-echo 1 > events/kmem/kmem_cache_free/enable
-echo 1 > tracing_on
-ls > /dev/null
-echo 0 > events/kmem/kmem_cache_free/enable
 
-hitcnt=`grep kmem_cache_free trace| grep exit_mmap | wc -l`
-misscnt=`grep kmem_cache_free trace| grep -v exit_mmap | wc -l`
+echo "Test event filter function name"
+echo "call_site.function == $target_func" > events/kmem/kmem_cache_free/filter
+sample_events
+
+hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
+misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
 
 if [ $hitcnt -eq 0 ]; then
        exit_fail
@@ -30,20 +45,14 @@ if [ $misscnt -gt 0 ]; then
        exit_fail
 fi
 
-address=`grep ' exit_mmap$' /proc/kallsyms | cut -d' ' -f1`
+address=`grep " ${target_func}\$" /proc/kallsyms | cut -d' ' -f1`
 
 echo "Test event filter function address"
-echo 0 > tracing_on
-echo 0 > events/enable
-echo > trace
 echo "call_site.function == 0x$address" > events/kmem/kmem_cache_free/filter
-echo 1 > events/kmem/kmem_cache_free/enable
-echo 1 > tracing_on
-sleep 1
-echo 0 > events/kmem/kmem_cache_free/enable
+sample_events
 
-hitcnt=`grep kmem_cache_free trace| grep exit_mmap | wc -l`
-misscnt=`grep kmem_cache_free trace| grep -v exit_mmap | wc -l`
+hitcnt=`grep kmem_cache_free trace| grep $target_func | wc -l`
+misscnt=`grep kmem_cache_free trace| grep -v $target_func | wc -l`
 
 if [ $hitcnt -eq 0 ]; then
        exit_fail
diff --git a/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack-legacy.tc b/tools/testing/selftests/ftrace/test.d/trigger/inter-event/trigger-synthetic-event-stack-legacy.tc
new file mode 100644 (file)
index 0000000..d0cd91a
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# description: event trigger - test inter-event histogram trigger trace action with dynamic string param (legacy stack)
+# requires: set_event synthetic_events events/sched/sched_process_exec/hist "long[] stack' >> synthetic_events":README
+
+fail() { #msg
+    echo $1
+    exit_fail
+}
+
+echo "Test create synthetic event with stack"
+
+# Test the old stacktrace keyword (for backward compatibility)
+echo 's:wake_lat pid_t pid; u64 delta; unsigned long[] stack;' > dynamic_events
+echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=stacktrace  if prev_state == 1||prev_state == 2' >> events/sched/sched_switch/trigger
+echo 'hist:keys=prev_pid:delta=common_timestamp.usecs-$ts,s=$st:onmax($delta).trace(wake_lat,prev_pid,$delta,$s)' >> events/sched/sched_switch/trigger
+echo 1 > events/synthetic/wake_lat/enable
+sleep 1
+
+if ! grep -q "=>.*sched" trace; then
+    fail "Failed to create synthetic event with stack"
+fi
+
+exit 0
index 755dbe9..8f1cc9a 100644 (file)
@@ -1,7 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # description: event trigger - test inter-event histogram trigger trace action with dynamic string param
-# requires: set_event synthetic_events events/sched/sched_process_exec/hist "long[]' >> synthetic_events":README
+# requires: set_event synthetic_events events/sched/sched_process_exec/hist "can be any field, or the special string 'common_stacktrace'":README
 
 fail() { #msg
     echo $1
@@ -10,9 +10,8 @@ fail() { #msg
 
 echo "Test create synthetic event with stack"
 
-
 echo 's:wake_lat pid_t pid; u64 delta; unsigned long[] stack;' > dynamic_events
-echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=stacktrace  if prev_state == 1||prev_state == 2' >> events/sched/sched_switch/trigger
+echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=common_stacktrace  if prev_state == 1||prev_state == 2' >> events/sched/sched_switch/trigger
 echo 'hist:keys=prev_pid:delta=common_timestamp.usecs-$ts,s=$st:onmax($delta).trace(wake_lat,prev_pid,$delta,$s)' >> events/sched/sched_switch/trigger
 echo 1 > events/synthetic/wake_lat/enable
 sleep 1
index 9f539d4..fa2ce2b 100755 (executable)
@@ -389,6 +389,9 @@ create_chip chip
 create_bank chip bank
 set_num_lines chip bank 8
 enable_chip chip
+DEVNAME=`configfs_dev_name chip`
+CHIPNAME=`configfs_chip_name chip bank`
+SYSFS_PATH="/sys/devices/platform/$DEVNAME/$CHIPNAME/sim_gpio0/value"
 $BASE_DIR/gpio-mockup-cdev -b pull-up /dev/`configfs_chip_name chip bank` 0
 test `cat $SYSFS_PATH` = "1" || fail "bias setting does not work"
 remove_chip chip
index 7a5ff64..4761b76 100644 (file)
@@ -116,6 +116,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/sev_migrate_tests
 TEST_GEN_PROGS_x86_64 += x86_64/amx_test
 TEST_GEN_PROGS_x86_64 += x86_64/max_vcpuid_cap_test
 TEST_GEN_PROGS_x86_64 += x86_64/triple_fault_event_test
+TEST_GEN_PROGS_x86_64 += x86_64/recalc_apic_map_test
 TEST_GEN_PROGS_x86_64 += access_tracking_perf_test
 TEST_GEN_PROGS_x86_64 += demand_paging_test
 TEST_GEN_PROGS_x86_64 += dirty_log_test
diff --git a/tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c b/tools/testing/selftests/kvm/x86_64/recalc_apic_map_test.c
new file mode 100644 (file)
index 0000000..4c416eb
--- /dev/null
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test edge cases and race conditions in kvm_recalculate_apic_map().
+ */
+
+#include <sys/ioctl.h>
+#include <pthread.h>
+#include <time.h>
+
+#include "processor.h"
+#include "test_util.h"
+#include "kvm_util.h"
+#include "apic.h"
+
+#define TIMEOUT                5       /* seconds */
+
+#define LAPIC_DISABLED 0
+#define LAPIC_X2APIC   (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)
+#define MAX_XAPIC_ID   0xff
+
+static void *race(void *arg)
+{
+       struct kvm_lapic_state lapic = {};
+       struct kvm_vcpu *vcpu = arg;
+
+       while (1) {
+               /* Trigger kvm_recalculate_apic_map(). */
+               vcpu_ioctl(vcpu, KVM_SET_LAPIC, &lapic);
+               pthread_testcancel();
+       }
+
+       return NULL;
+}
+
+int main(void)
+{
+       struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+       struct kvm_vcpu *vcpuN;
+       struct kvm_vm *vm;
+       pthread_t thread;
+       time_t t;
+       int i;
+
+       kvm_static_assert(KVM_MAX_VCPUS > MAX_XAPIC_ID);
+
+       /*
+        * Create the max number of vCPUs supported by selftests so that KVM
+        * has decent amount of work to do when recalculating the map, i.e. to
+        * make the problematic window large enough to hit.
+        */
+       vm = vm_create_with_vcpus(KVM_MAX_VCPUS, NULL, vcpus);
+
+       /*
+        * Enable x2APIC on all vCPUs so that KVM doesn't bail from the recalc
+        * due to vCPUs having aliased xAPIC IDs (truncated to 8 bits).
+        */
+       for (i = 0; i < KVM_MAX_VCPUS; i++)
+               vcpu_set_msr(vcpus[i], MSR_IA32_APICBASE, LAPIC_X2APIC);
+
+       ASSERT_EQ(pthread_create(&thread, NULL, race, vcpus[0]), 0);
+
+       vcpuN = vcpus[KVM_MAX_VCPUS - 1];
+       for (t = time(NULL) + TIMEOUT; time(NULL) < t;) {
+               vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_X2APIC);
+               vcpu_set_msr(vcpuN, MSR_IA32_APICBASE, LAPIC_DISABLED);
+       }
+
+       ASSERT_EQ(pthread_cancel(thread), 0);
+       ASSERT_EQ(pthread_join(thread, NULL), 0);
+
+       kvm_vm_free(vm);
+
+       return 0;
+}
index 80f06aa..f27a733 100644 (file)
@@ -8,8 +8,10 @@ diag_uid
 fin_ack_lat
 gro
 hwtstamp_config
+io_uring_zerocopy_tx
 ioam6_parser
 ip_defrag
+ip_local_port_range
 ipsec
 ipv6_flowlabel
 ipv6_flowlabel_mgr
@@ -26,6 +28,7 @@ reuseport_bpf_cpu
 reuseport_bpf_numa
 reuseport_dualstack
 rxtimestamp
+sctp_hello
 sk_bind_sendto_listen
 sk_connect_zero_addr
 socket
index a47b26a..0f5e88c 100755 (executable)
@@ -2283,7 +2283,7 @@ EOF
 ################################################################################
 # main
 
-while getopts :t:pP46hv:w: o
+while getopts :t:pP46hvw: o
 do
        case $o in
                t) TESTS=$OPTARG;;
index 7da8ec8..35d89df 100755 (executable)
@@ -68,7 +68,7 @@ setup()
 cleanup()
 {
        $IP link del dev dummy0 &> /dev/null
-       ip netns del ns1
+       ip netns del ns1 &> /dev/null
        ip netns del ns2 &> /dev/null
 }
 
index 43a7236..7b936a9 100644 (file)
@@ -9,7 +9,7 @@ TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \
 
 TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq
 
-TEST_FILES := settings
+TEST_FILES := mptcp_lib.sh settings
 
 EXTRA_CLEAN := *.pcap
 
index ef628b1..4eacdb1 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+. "$(dirname "${0}")/mptcp_lib.sh"
+
 sec=$(date +%s)
 rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
 ns="ns1-$rndh"
@@ -31,6 +33,8 @@ cleanup()
        ip netns del $ns
 }
 
+mptcp_lib_check_mptcp
+
 ip -Version > /dev/null 2>&1
 if [ $? -ne 0 ];then
        echo "SKIP: Could not run test without ip tool"
index a43d3e2..c1f7bac 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+. "$(dirname "${0}")/mptcp_lib.sh"
+
 time_start=$(date +%s)
 
 optstring="S:R:d:e:l:r:h4cm:f:tC"
@@ -141,6 +143,8 @@ cleanup()
        done
 }
 
+mptcp_lib_check_mptcp
+
 ip -Version > /dev/null 2>&1
 if [ $? -ne 0 ];then
        echo "SKIP: Could not run test without ip tool"
index 26310c1..29f0c99 100755 (executable)
@@ -10,6 +10,8 @@
 # because it's invoked by variable name, see how the "tests" array is used
 #shellcheck disable=SC2317
 
+. "$(dirname "${0}")/mptcp_lib.sh"
+
 ret=0
 sin=""
 sinfail=""
@@ -17,6 +19,7 @@ sout=""
 cin=""
 cinfail=""
 cinsent=""
+tmpfile=""
 cout=""
 capout=""
 ns1=""
@@ -136,6 +139,8 @@ cleanup_partial()
 
 check_tools()
 {
+       mptcp_lib_check_mptcp
+
        if ! ip -Version &> /dev/null; then
                echo "SKIP: Could not run test without ip tool"
                exit $ksft_skip
@@ -175,6 +180,7 @@ cleanup()
 {
        rm -f "$cin" "$cout" "$sinfail"
        rm -f "$sin" "$sout" "$cinsent" "$cinfail"
+       rm -f "$tmpfile"
        rm -rf $evts_ns1 $evts_ns2
        cleanup_partial
 }
@@ -383,9 +389,16 @@ check_transfer()
                        fail_test
                        return 1
                fi
-               bytes="--bytes=${bytes}"
+
+               # note: BusyBox's "cmp" command doesn't support --bytes
+               tmpfile=$(mktemp)
+               head --bytes="$bytes" "$in" > "$tmpfile"
+               mv "$tmpfile" "$in"
+               head --bytes="$bytes" "$out" > "$tmpfile"
+               mv "$tmpfile" "$out"
+               tmpfile=""
        fi
-       cmp -l "$in" "$out" ${bytes} | while read -r i a b; do
+       cmp -l "$in" "$out" | while read -r i a b; do
                local sum=$((0${a} + 0${b}))
                if [ $check_invert -eq 0 ] || [ $sum -ne $((0xff)) ]; then
                        echo "[ FAIL ] $what does not match (in, out):"
@@ -849,7 +862,15 @@ do_transfer()
                                     sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
                                ip netns exec ${listener_ns} ./pm_nl_ctl ann $addr token $tk id $id
                                sleep 1
+                               sp=$(grep "type:10" "$evts_ns1" |
+                                    sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
+                               da=$(grep "type:10" "$evts_ns1" |
+                                    sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
+                               dp=$(grep "type:10" "$evts_ns1" |
+                                    sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q')
                                ip netns exec ${listener_ns} ./pm_nl_ctl rem token $tk id $id
+                               ip netns exec ${listener_ns} ./pm_nl_ctl dsf lip "::ffff:$addr" \
+                                                       lport $sp rip $da rport $dp token $tk
                        fi
 
                        counter=$((counter + 1))
@@ -915,6 +936,7 @@ do_transfer()
                                sleep 1
                                sp=$(grep "type:10" "$evts_ns2" |
                                     sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
+                               ip netns exec ${connector_ns} ./pm_nl_ctl rem token $tk id $id
                                ip netns exec ${connector_ns} ./pm_nl_ctl dsf lip $addr lport $sp \
                                                                        rip $da rport $dp token $tk
                        fi
@@ -3129,7 +3151,7 @@ userspace_tests()
                pm_nl_set_limits $ns1 0 1
                run_tests $ns1 $ns2 10.0.1.1 0 0 userspace_1 slow
                chk_join_nr 1 1 1
-               chk_rm_nr 0 1
+               chk_rm_nr 1 1
                kill_events_pids
        fi
 }
diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh
new file mode 100644 (file)
index 0000000..3286536
--- /dev/null
@@ -0,0 +1,40 @@
+#! /bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+readonly KSFT_FAIL=1
+readonly KSFT_SKIP=4
+
+# SELFTESTS_MPTCP_LIB_EXPECT_ALL_FEATURES env var can be set when validating all
+# features using the last version of the kernel and the selftests to make sure
+# a test is not being skipped by mistake.
+mptcp_lib_expect_all_features() {
+       [ "${SELFTESTS_MPTCP_LIB_EXPECT_ALL_FEATURES:-}" = "1" ]
+}
+
+# $1: msg
+mptcp_lib_fail_if_expected_feature() {
+       if mptcp_lib_expect_all_features; then
+               echo "ERROR: missing feature: ${*}"
+               exit ${KSFT_FAIL}
+       fi
+
+       return 1
+}
+
+# $1: file
+mptcp_lib_has_file() {
+       local f="${1}"
+
+       if [ -f "${f}" ]; then
+               return 0
+       fi
+
+       mptcp_lib_fail_if_expected_feature "${f} file not found"
+}
+
+mptcp_lib_check_mptcp() {
+       if ! mptcp_lib_has_file "/proc/sys/net/mptcp/enabled"; then
+               echo "SKIP: MPTCP support is not available"
+               exit ${KSFT_SKIP}
+       fi
+}
index 1b70c0a..ff5adbb 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+. "$(dirname "${0}")/mptcp_lib.sh"
+
 ret=0
 sin=""
 sout=""
@@ -84,6 +86,8 @@ cleanup()
        rm -f "$sin" "$sout"
 }
 
+mptcp_lib_check_mptcp
+
 ip -Version > /dev/null 2>&1
 if [ $? -ne 0 ];then
        echo "SKIP: Could not run test without ip tool"
index 89839d1..32f7533 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+. "$(dirname "${0}")/mptcp_lib.sh"
+
 ksft_skip=4
 ret=0
 
@@ -34,6 +36,8 @@ cleanup()
        ip netns del $ns1
 }
 
+mptcp_lib_check_mptcp
+
 ip -Version > /dev/null 2>&1
 if [ $? -ne 0 ];then
        echo "SKIP: Could not run test without ip tool"
index 9f22f7e..36a3c9d 100755 (executable)
@@ -1,6 +1,8 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+. "$(dirname "${0}")/mptcp_lib.sh"
+
 sec=$(date +%s)
 rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
 ns1="ns1-$rndh"
@@ -34,6 +36,8 @@ cleanup()
        done
 }
 
+mptcp_lib_check_mptcp
+
 ip -Version > /dev/null 2>&1
 if [ $? -ne 0 ];then
        echo "SKIP: Could not run test without ip tool"
index b1eb7bc..8092399 100755 (executable)
@@ -1,6 +1,10 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+. "$(dirname "${0}")/mptcp_lib.sh"
+
+mptcp_lib_check_mptcp
+
 ip -Version > /dev/null 2>&1
 if [ $? -ne 0 ];then
        echo "SKIP: Cannot not run test without ip tool"
index 1003119..f962823 100755 (executable)
@@ -232,10 +232,14 @@ setup_rt_networking()
        local nsname=rt-${rt}
 
        ip netns add ${nsname}
+
+       ip netns exec ${nsname} sysctl -wq net.ipv6.conf.all.accept_dad=0
+       ip netns exec ${nsname} sysctl -wq net.ipv6.conf.default.accept_dad=0
+
        ip link set veth-rt-${rt} netns ${nsname}
        ip -netns ${nsname} link set veth-rt-${rt} name veth0
 
-       ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0
+       ip -netns ${nsname} addr add ${IPv6_RT_NETWORK}::${rt}/64 dev veth0 nodad
        ip -netns ${nsname} link set veth0 up
        ip -netns ${nsname} link set lo up
 
@@ -254,6 +258,12 @@ setup_hs()
 
        # set the networking for the host
        ip netns add ${hsname}
+
+       # disable the rp_filter otherwise the kernel gets confused about how
+       # to route decap ipv4 packets.
+       ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
+       ip netns exec ${rtname} sysctl -wq net.ipv4.conf.default.rp_filter=0
+
        ip -netns ${hsname} link add veth0 type veth peer name ${rtveth}
        ip -netns ${hsname} link set ${rtveth} netns ${rtname}
        ip -netns ${hsname} addr add ${IPv4_HS_NETWORK}.${hs}/24 dev veth0
@@ -272,11 +282,6 @@ setup_hs()
 
        ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.proxy_arp=1
 
-       # disable the rp_filter otherwise the kernel gets confused about how
-       # to route decap ipv4 packets.
-       ip netns exec ${rtname} sysctl -wq net.ipv4.conf.all.rp_filter=0
-       ip netns exec ${rtname} sysctl -wq net.ipv4.conf.${rtveth}.rp_filter=0
-
        ip netns exec ${rtname} sh -c "echo 1 > /proc/sys/net/vrf/strict_mode"
 }
 
index 75af864..50aab6b 100644 (file)
@@ -17,6 +17,7 @@ ENCL_CFLAGS := -Wall -Werror -static -nostdlib -nostartfiles -fPIC \
               -fno-stack-protector -mrdrnd $(INCLUDES)
 
 TEST_CUSTOM_PROGS := $(OUTPUT)/test_sgx
+TEST_FILES := $(OUTPUT)/test_encl.elf
 
 ifeq ($(CAN_BUILD_X86_64), 1)
 all: $(TEST_CUSTOM_PROGS) $(OUTPUT)/test_encl.elf
index cb5c13e..479802a 100644 (file)
@@ -3962,18 +3962,19 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        }
 
        vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
-       r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
-       BUG_ON(r == -EBUSY);
+       r = xa_reserve(&kvm->vcpu_array, vcpu->vcpu_idx, GFP_KERNEL_ACCOUNT);
        if (r)
                goto unlock_vcpu_destroy;
 
        /* Now it's all set up, let userspace reach it */
        kvm_get_kvm(kvm);
        r = create_vcpu_fd(vcpu);
-       if (r < 0) {
-               xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
-               kvm_put_kvm_no_destroy(kvm);
-               goto unlock_vcpu_destroy;
+       if (r < 0)
+               goto kvm_put_xa_release;
+
+       if (KVM_BUG_ON(!!xa_store(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, 0), kvm)) {
+               r = -EINVAL;
+               goto kvm_put_xa_release;
        }
 
        /*
@@ -3988,6 +3989,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        kvm_create_vcpu_debugfs(vcpu);
        return r;
 
+kvm_put_xa_release:
+       kvm_put_kvm_no_destroy(kvm);
+       xa_release(&kvm->vcpu_array, vcpu->vcpu_idx);
 unlock_vcpu_destroy:
        mutex_unlock(&kvm->lock);
        kvm_dirty_ring_free(&vcpu->dirty_ring);
@@ -5184,7 +5188,20 @@ static void hardware_disable_all(void)
 static int hardware_enable_all(void)
 {
        atomic_t failed = ATOMIC_INIT(0);
-       int r = 0;
+       int r;
+
+       /*
+        * Do not enable hardware virtualization if the system is going down.
+        * If userspace initiated a forced reboot, e.g. reboot -f, then it's
+        * possible for an in-flight KVM_CREATE_VM to trigger hardware enabling
+        * after kvm_reboot() is called.  Note, this relies on system_state
+        * being set _before_ kvm_reboot(), which is why KVM uses a syscore ops
+        * hook instead of registering a dedicated reboot notifier (the latter
+        * runs before system_state is updated).
+        */
+       if (system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF ||
+           system_state == SYSTEM_RESTART)
+               return -EBUSY;
 
        /*
         * When onlining a CPU, cpu_online_mask is set before kvm_online_cpu()
@@ -5197,6 +5214,8 @@ static int hardware_enable_all(void)
        cpus_read_lock();
        mutex_lock(&kvm_lock);
 
+       r = 0;
+
        kvm_usage_count++;
        if (kvm_usage_count == 1) {
                on_each_cpu(hardware_enable_nolock, &failed, 1);
@@ -5213,26 +5232,24 @@ static int hardware_enable_all(void)
        return r;
 }
 
-static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
-                     void *v)
+static void kvm_shutdown(void)
 {
        /*
-        * Some (well, at least mine) BIOSes hang on reboot if
-        * in vmx root mode.
-        *
-        * And Intel TXT required VMX off for all cpu when system shutdown.
+        * Disable hardware virtualization and set kvm_rebooting to indicate
+        * that KVM has asynchronously disabled hardware virtualization, i.e.
+        * that relevant errors and exceptions aren't entirely unexpected.
+        * Some flavors of hardware virtualization need to be disabled before
+        * transferring control to firmware (to perform shutdown/reboot), e.g.
+        * on x86, virtualization can block INIT interrupts, which are used by
+        * firmware to pull APs back under firmware control.  Note, this path
+        * is used for both shutdown and reboot scenarios, i.e. neither name is
+        * 100% comprehensive.
         */
        pr_info("kvm: exiting hardware virtualization\n");
        kvm_rebooting = true;
        on_each_cpu(hardware_disable_nolock, NULL, 1);
-       return NOTIFY_OK;
 }
 
-static struct notifier_block kvm_reboot_notifier = {
-       .notifier_call = kvm_reboot,
-       .priority = 0,
-};
-
 static int kvm_suspend(void)
 {
        /*
@@ -5263,6 +5280,7 @@ static void kvm_resume(void)
 static struct syscore_ops kvm_syscore_ops = {
        .suspend = kvm_suspend,
        .resume = kvm_resume,
+       .shutdown = kvm_shutdown,
 };
 #else /* CONFIG_KVM_GENERIC_HARDWARE_ENABLING */
 static int hardware_enable_all(void)
@@ -5967,7 +5985,6 @@ int kvm_init(unsigned vcpu_size, unsigned vcpu_align, struct module *module)
        if (r)
                return r;
 
-       register_reboot_notifier(&kvm_reboot_notifier);
        register_syscore_ops(&kvm_syscore_ops);
 #endif
 
@@ -6039,7 +6056,6 @@ err_cpu_kick_mask:
 err_vcpu_cache:
 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
        unregister_syscore_ops(&kvm_syscore_ops);
-       unregister_reboot_notifier(&kvm_reboot_notifier);
        cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
 #endif
        return r;
@@ -6065,7 +6081,6 @@ void kvm_exit(void)
        kvm_async_pf_deinit();
 #ifdef CONFIG_KVM_GENERIC_HARDWARE_ENABLING
        unregister_syscore_ops(&kvm_syscore_ops);
-       unregister_reboot_notifier(&kvm_reboot_notifier);
        cpuhp_remove_state_nocalls(CPUHP_AP_KVM_ONLINE);
 #endif
        kvm_irqfd_exit();